mirror of
https://github.com/mongodb/mongo.git
synced 2024-12-01 01:21:03 +01:00
SERVER-366 clean up a bit in pack()
This commit is contained in:
parent
6232703489
commit
1e934132ad
49
db/btree.cpp
49
db/btree.cpp
@ -84,7 +84,7 @@ namespace mongo {
|
||||
bt_dmp=0;
|
||||
}
|
||||
|
||||
int BucketBasics::fullValidate(const DiskLoc& thisLoc, const BSONObj &order) {
|
||||
int BucketBasics::fullValidate(const DiskLoc& thisLoc, const BSONObj &order, int *unusedCount) {
|
||||
{
|
||||
bool f = false;
|
||||
assert( f = true );
|
||||
@ -107,18 +107,24 @@ namespace mongo {
|
||||
for ( int i = 0; i < n; i++ ) {
|
||||
_KeyNode& kn = k(i);
|
||||
|
||||
if ( kn.isUsed() ) kc++;
|
||||
if ( kn.isUsed() ) {
|
||||
kc++;
|
||||
} else {
|
||||
if ( unusedCount ) {
|
||||
++( *unusedCount );
|
||||
}
|
||||
}
|
||||
if ( !kn.prevChildBucket.isNull() ) {
|
||||
DiskLoc left = kn.prevChildBucket;
|
||||
BtreeBucket *b = left.btree();
|
||||
wassert( b->parent == thisLoc );
|
||||
kc += b->fullValidate(kn.prevChildBucket, order);
|
||||
kc += b->fullValidate(kn.prevChildBucket, order, unusedCount);
|
||||
}
|
||||
}
|
||||
if ( !nextChild.isNull() ) {
|
||||
BtreeBucket *b = nextChild.btree();
|
||||
wassert( b->parent == thisLoc );
|
||||
kc += b->fullValidate(nextChild, order);
|
||||
kc += b->fullValidate(nextChild, order, unusedCount);
|
||||
}
|
||||
|
||||
return kc;
|
||||
@ -269,12 +275,12 @@ namespace mongo {
|
||||
}*/
|
||||
|
||||
/* insert a key in a bucket with no complexity -- no splits required */
|
||||
bool BucketBasics::basicInsert(const DiskLoc& thisLoc, int keypos, const DiskLoc& recordLoc, const BSONObj& key, const Ordering &order) {
|
||||
bool BucketBasics::basicInsert(const DiskLoc& thisLoc, int &keypos, const DiskLoc& recordLoc, const BSONObj& key, const Ordering &order) {
|
||||
modified(thisLoc);
|
||||
assert( keypos >= 0 && keypos <= n );
|
||||
int bytesNeeded = key.objsize() + sizeof(_KeyNode);
|
||||
if ( bytesNeeded > emptySize ) {
|
||||
pack( order );
|
||||
pack( order, keypos );
|
||||
if ( bytesNeeded > emptySize )
|
||||
return false;
|
||||
}
|
||||
@ -294,7 +300,7 @@ namespace mongo {
|
||||
/* when we delete things we just leave empty space until the node is
|
||||
full and then we repack it.
|
||||
*/
|
||||
void BucketBasics::pack( const Ordering &order ) {
|
||||
void BucketBasics::pack( const Ordering &order, int &refPos ) {
|
||||
if ( flags & Packed )
|
||||
return;
|
||||
|
||||
@ -302,14 +308,26 @@ namespace mongo {
|
||||
char temp[BucketSize];
|
||||
int ofs = tdz;
|
||||
topSize = 0;
|
||||
int i = 0;
|
||||
for ( int j = 0; j < n; j++ ) {
|
||||
short ofsold = k(j).keyDataOfs();
|
||||
int sz = keyNode(j).key.objsize();
|
||||
if( j > 0 && k( j ).isUnused() && k( j ).prevChildBucket.isNull() ) {
|
||||
if ( i < refPos ) {
|
||||
--refPos;
|
||||
}
|
||||
continue; // key is unused and has no children - drop it
|
||||
}
|
||||
if( i != j ) {
|
||||
k( i ) = k( j );
|
||||
}
|
||||
short ofsold = k(i).keyDataOfs();
|
||||
int sz = keyNode(i).key.objsize();
|
||||
ofs -= sz;
|
||||
topSize += sz;
|
||||
memcpy(temp+ofs, dataAt(ofsold), sz);
|
||||
k(j).setKeyDataOfsSavingUse( ofs );
|
||||
k(i).setKeyDataOfsSavingUse( ofs );
|
||||
++i;
|
||||
}
|
||||
n = i;
|
||||
int dataUsed = tdz - ofs;
|
||||
memcpy(data + ofs, temp + ofs, dataUsed);
|
||||
emptySize = tdz - dataUsed - n * sizeof(_KeyNode);
|
||||
@ -319,10 +337,10 @@ namespace mongo {
|
||||
assertValid( order );
|
||||
}
|
||||
|
||||
inline void BucketBasics::truncateTo(int N, const Ordering &order) {
|
||||
inline void BucketBasics::truncateTo(int N, const Ordering &order, int &refPos) {
|
||||
n = N;
|
||||
setNotPacked();
|
||||
pack( order );
|
||||
pack( order, refPos );
|
||||
}
|
||||
|
||||
/* - BtreeBucket --------------------------------------------------- */
|
||||
@ -596,6 +614,8 @@ found:
|
||||
|
||||
/* insert a key in this bucket, splitting if necessary.
|
||||
keypos - where to insert the key i3n range 0..n. 0=make leftmost, n=make rightmost.
|
||||
NOTE this function may free some data, and as a result the value passed for keypos may
|
||||
be invalid after calling insertHere()
|
||||
*/
|
||||
void BtreeBucket::insertHere(DiskLoc thisLoc, int keypos,
|
||||
DiskLoc recordLoc, const BSONObj& key, const Ordering& order,
|
||||
@ -716,7 +736,8 @@ found:
|
||||
}
|
||||
}
|
||||
|
||||
truncateTo(split, order); // note this may trash splitkey.key. thus we had to promote it before finishing up here.
|
||||
int newpos = keypos;
|
||||
truncateTo(split, order, newpos); // note this may trash splitkey.key. thus we had to promote it before finishing up here.
|
||||
|
||||
// add our new key, there is room now
|
||||
{
|
||||
@ -724,7 +745,7 @@ found:
|
||||
if ( keypos <= split ) {
|
||||
if ( split_debug )
|
||||
out() << " keypos<split, insertHere() the new key" << endl;
|
||||
insertHere(thisLoc, keypos, recordLoc, key, order, lchild, rchild, idx);
|
||||
insertHere(thisLoc, newpos, recordLoc, key, order, lchild, rchild, idx);
|
||||
} else {
|
||||
int kp = keypos-split-1;
|
||||
assert(kp>=0);
|
||||
|
11
db/btree.h
11
db/btree.h
@ -84,7 +84,7 @@ namespace mongo {
|
||||
void assertValid(const BSONObj &orderObj, bool force = false) {
|
||||
return assertValid(Ordering::make(orderObj),force);
|
||||
}
|
||||
int fullValidate(const DiskLoc& thisLoc, const BSONObj &order); /* traverses everything */
|
||||
int fullValidate(const DiskLoc& thisLoc, const BSONObj &order, int *unusedCount = 0); /* traverses everything */
|
||||
|
||||
KeyNode keyNode(int i) const {
|
||||
if ( i >= n ){
|
||||
@ -106,7 +106,7 @@ namespace mongo {
|
||||
/* returns false if node is full and must be split
|
||||
keypos is where to insert -- inserted after that key #. so keypos=0 is the leftmost one.
|
||||
*/
|
||||
bool basicInsert(const DiskLoc& thisLoc, int keypos, const DiskLoc& recordLoc, const BSONObj& key, const Ordering &order);
|
||||
bool basicInsert(const DiskLoc& thisLoc, int &keypos, const DiskLoc& recordLoc, const BSONObj& key, const Ordering &order);
|
||||
|
||||
/**
|
||||
* @return true if works, false if not enough space
|
||||
@ -130,12 +130,12 @@ namespace mongo {
|
||||
}
|
||||
|
||||
int totalDataSize() const;
|
||||
void pack( const Ordering &order );
|
||||
void pack( const Ordering &order, int &refPos);
|
||||
void setNotPacked();
|
||||
void setPacked();
|
||||
int _alloc(int bytes);
|
||||
void _unalloc(int bytes);
|
||||
void truncateTo(int N, const Ordering &order);
|
||||
void truncateTo(int N, const Ordering &order, int &refPos);
|
||||
void markUnused(int keypos);
|
||||
|
||||
/* BtreeBuilder uses the parent var as a temp place to maintain a linked list chain.
|
||||
@ -361,6 +361,9 @@ namespace mongo {
|
||||
virtual void setMatcher( shared_ptr< CoveredIndexMatcher > matcher ) {
|
||||
_matcher = matcher;
|
||||
}
|
||||
|
||||
// for debugging only
|
||||
DiskLoc getBucket() const { return bucket; }
|
||||
|
||||
private:
|
||||
/* Our btrees may (rarely) have "unused" keys when items are deleted.
|
||||
|
@ -26,47 +26,47 @@
|
||||
|
||||
namespace BtreeTests {
|
||||
|
||||
class Base {
|
||||
const char* ns() {
|
||||
return "unittests.btreetests";
|
||||
}
|
||||
|
||||
class Ensure {
|
||||
public:
|
||||
Ensure() {
|
||||
_c.ensureIndex( ns(), BSON( "a" << 1 ), false, "testIndex" );
|
||||
}
|
||||
~Ensure() {
|
||||
_c.dropIndexes( ns() );
|
||||
}
|
||||
private:
|
||||
DBDirectClient _c;
|
||||
};
|
||||
|
||||
class Base : public Ensure {
|
||||
public:
|
||||
Base() :
|
||||
_context( ns() ) {
|
||||
|
||||
_context( ns() ) {
|
||||
{
|
||||
bool f = false;
|
||||
assert( f = true );
|
||||
massert( 10402 , "assert is misdefined", f);
|
||||
}
|
||||
BSONObjBuilder builder;
|
||||
builder.append( "ns", ns() );
|
||||
builder.append( "name", "testIndex" );
|
||||
BSONObj bobj = builder.done();
|
||||
idx_.info =
|
||||
theDataFileMgr.insert( ns(), bobj.objdata(), bobj.objsize() );
|
||||
idx_.head = BtreeBucket::addBucket( idx_ );
|
||||
}
|
||||
~Base() {
|
||||
// FIXME cleanup all btree buckets.
|
||||
theDataFileMgr.deleteRecord( ns(), idx_.info.rec(), idx_.info );
|
||||
ASSERT( theDataFileMgr.findAll( ns() )->eof() );
|
||||
}
|
||||
protected:
|
||||
BtreeBucket* bt() const {
|
||||
return idx_.head.btree();
|
||||
BtreeBucket* bt() {
|
||||
return id().head.btree();
|
||||
}
|
||||
DiskLoc dl() const {
|
||||
return idx_.head;
|
||||
DiskLoc dl() {
|
||||
return id().head;
|
||||
}
|
||||
IndexDetails& id() {
|
||||
return idx_;
|
||||
}
|
||||
static const char* ns() {
|
||||
return "unittests.btreetests";
|
||||
return nsdetails( ns() )->idx( 1 );
|
||||
}
|
||||
// dummy, valid record loc
|
||||
static DiskLoc recordLoc() {
|
||||
return DiskLoc( 0, 2 );
|
||||
}
|
||||
void checkValid( int nKeys ) const {
|
||||
void checkValid( int nKeys ) {
|
||||
ASSERT( bt() );
|
||||
ASSERT( bt()->isHead() );
|
||||
bt()->assertValid( order(), true );
|
||||
@ -98,13 +98,12 @@ namespace BtreeTests {
|
||||
ASSERT( location == expectedLocation );
|
||||
ASSERT_EQUALS( expectedPos, pos );
|
||||
}
|
||||
BSONObj order() const {
|
||||
return idx_.keyPattern();
|
||||
BSONObj order() {
|
||||
return id().keyPattern();
|
||||
}
|
||||
private:
|
||||
dblock lk_;
|
||||
Client::Context _context;
|
||||
IndexDetails idx_;
|
||||
};
|
||||
|
||||
class Create : public Base {
|
||||
@ -251,7 +250,93 @@ namespace BtreeTests {
|
||||
Base::insert( k );
|
||||
}
|
||||
};
|
||||
|
||||
class ReuseUnused : public Base {
|
||||
public:
|
||||
void run() {
|
||||
for ( int i = 0; i < 10; ++i ) {
|
||||
insert( i );
|
||||
}
|
||||
BSONObj root = key( 'p' );
|
||||
unindex( root );
|
||||
Base::insert( root );
|
||||
locate( root, 0, true, dl(), 1 );
|
||||
}
|
||||
private:
|
||||
BSONObj key( char c ) {
|
||||
return simpleKey( c, 800 );
|
||||
}
|
||||
void insert( int i ) {
|
||||
BSONObj k = key( 'b' + 2 * i );
|
||||
Base::insert( k );
|
||||
}
|
||||
};
|
||||
|
||||
class PackUnused : public Base {
|
||||
public:
|
||||
void run() {
|
||||
for ( long long i = 0; i < 1000000; i += 1000 ) {
|
||||
insert( i );
|
||||
}
|
||||
string orig, after;
|
||||
{
|
||||
stringstream ss;
|
||||
bt()->shape( ss );
|
||||
orig = ss.str();
|
||||
}
|
||||
vector< string > toDel;
|
||||
vector< string > other;
|
||||
BSONObjBuilder start;
|
||||
start.appendMinKey( "a" );
|
||||
BSONObjBuilder end;
|
||||
end.appendMaxKey( "a" );
|
||||
auto_ptr< BtreeCursor > c( new BtreeCursor( nsdetails( ns() ), 1, id(), start.done(), end.done(), false, 1 ) );
|
||||
while( c->ok() ) {
|
||||
if ( !c->currKeyNode().prevChildBucket.isNull() ) {
|
||||
toDel.push_back( c->currKey().firstElement().valuestr() );
|
||||
} else {
|
||||
other.push_back( c->currKey().firstElement().valuestr() );
|
||||
}
|
||||
c->advance();
|
||||
}
|
||||
ASSERT( toDel.size() > 0 );
|
||||
for( vector< string >::const_iterator i = toDel.begin(); i != toDel.end(); ++i ) {
|
||||
BSONObj o = BSON( "a" << *i );
|
||||
unindex( o );
|
||||
}
|
||||
ASSERT( other.size() > 0 );
|
||||
for( vector< string >::const_iterator i = other.begin(); i != other.end(); ++i ) {
|
||||
BSONObj o = BSON( "a" << *i );
|
||||
unindex( o );
|
||||
}
|
||||
|
||||
int unused = 0;
|
||||
ASSERT_EQUALS( 0, bt()->fullValidate( dl(), order(), &unused ) );
|
||||
|
||||
for ( long long i = 50000; i < 50100; ++i ) {
|
||||
insert( i );
|
||||
}
|
||||
|
||||
int unused2 = 0;
|
||||
ASSERT_EQUALS( 100, bt()->fullValidate( dl(), order(), &unused2 ) );
|
||||
|
||||
ASSERT( unused2 < unused );
|
||||
}
|
||||
private:
|
||||
void insert( long long n ) {
|
||||
string val( 800, ' ' );
|
||||
for( int i = 0; i < 800; i += 8 ) {
|
||||
for( int j = 0; j < 8; ++j ) {
|
||||
// probably we won't get > 56 bits
|
||||
unsigned char v = 0x80 | ( n >> ( ( 8 - j - 1 ) * 7 ) & 0x000000000000007f );
|
||||
val[ i + j ] = v;
|
||||
}
|
||||
}
|
||||
BSONObj k = BSON( "a" << val );
|
||||
Base::insert( k );
|
||||
}
|
||||
};
|
||||
|
||||
class All : public Suite {
|
||||
public:
|
||||
All() : Suite( "btree" ){
|
||||
@ -265,6 +350,8 @@ namespace BtreeTests {
|
||||
add< MissingLocate >();
|
||||
add< MissingLocateMultiBucket >();
|
||||
add< SERVER983 >();
|
||||
add< ReuseUnused >();
|
||||
add< PackUnused >();
|
||||
}
|
||||
} myall;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user