mirror of
https://github.com/mongodb/mongo.git
synced 2024-12-01 09:32:32 +01:00
Revert "SERVER-643 eliminate race conditions in clonecollection test"
This reverts commit 1ec372ec3b
.
This commit is contained in:
parent
1ec372ec3b
commit
9e0c0ec480
@ -23,45 +23,6 @@ waitParallel = function() {
|
||||
assert.soon( function() { return doneParallel(); }, "parallel did not finish in time", 300000, 1000 );
|
||||
}
|
||||
|
||||
cloneNo = -1;
|
||||
startstartclone = function( spec ) {
|
||||
spec = spec || "";
|
||||
cloneNo++;
|
||||
doParallel( "z = db.runCommand( {startCloneCollection:\"jstests_clonecollection.a\", from:\"localhost:" + ports[ 0 ] + "\"" + spec + " } ); print( \"clone_clone_clone_commandResult::" + cloneNo + "::\" + tojson( z , '' , true ) + \":::::\" );" );
|
||||
}
|
||||
|
||||
finishstartclone = function() {
|
||||
waitParallel();
|
||||
// even after parallel shell finished, must wait for finishToken line to appear in log
|
||||
assert.soon( function() {
|
||||
raw = rawMongoProgramOutput().replace( /[\r\n]/gm , " " )
|
||||
ret = raw.match( new RegExp( "clone_clone_clone_commandResult::" + cloneNo + "::(.*):::::" ) );
|
||||
if ( ret == null ) {
|
||||
return false;
|
||||
}
|
||||
ret = ret[ 1 ];
|
||||
return true;
|
||||
} );
|
||||
|
||||
eval( "ret = " + ret );
|
||||
|
||||
assert.commandWorked( ret );
|
||||
return ret;
|
||||
}
|
||||
|
||||
dofinishclonecmd = function( ret ) {
|
||||
finishToken = ret.finishToken;
|
||||
// Round-tripping through JS can corrupt the cursor ids we store as BSON
|
||||
// Date elements. Date( 0 ) will correspond to a cursorId value of 0, which
|
||||
// makes the db start scanning from the beginning of the collection.
|
||||
finishToken.cursorId = new Date( 0 );
|
||||
return t.runCommand( {finishCloneCollection:finishToken} );
|
||||
}
|
||||
|
||||
finishclone = function( ret ) {
|
||||
assert.commandWorked( dofinishclonecmd( ret ) );
|
||||
}
|
||||
|
||||
ports = allocatePorts( 2 );
|
||||
|
||||
f = startMongod( "--port", ports[ 0 ], "--dbpath", "/data/db/" + baseName + "_from", "--nohttpinterface", "--bind_ip", "127.0.0.1" ).getDB( baseName );
|
||||
@ -112,15 +73,15 @@ for( i = 0; i < 100000; ++i ) {
|
||||
}
|
||||
assert.eq( 100000, f.a.count() );
|
||||
|
||||
startstartclone( ", query:{i:{$gte:0}}" );
|
||||
doParallel( "assert.commandWorked( db.cloneCollection( \"localhost:" + ports[ 0 ] + "\", \"a\", {i:{$gte:0}} ) );" );
|
||||
|
||||
sleep( 200 );
|
||||
f.a.save( { i: 200000 } );
|
||||
f.a.save( { i: -1 } );
|
||||
f.a.remove( { i: 0 } );
|
||||
f.a.update( { i: 99998 }, { i: 99998, x: "y" } );
|
||||
ret = finishstartclone();
|
||||
finishclone( ret );
|
||||
assert( !doneParallel(), "test run invalid" );
|
||||
waitParallel();
|
||||
|
||||
assert.eq( 100000, t.a.find().count() );
|
||||
assert.eq( 1, t.a.find( { i: 200000 } ).count() );
|
||||
@ -138,14 +99,14 @@ for( i = 0; i < 200000; ++i ) {
|
||||
}
|
||||
assert.eq( 200000, f.a.count() );
|
||||
|
||||
startstartclone( ", logSizeMb:1" );
|
||||
ret = finishstartclone();
|
||||
doParallel( "assert.commandFailed( db.runCommand( { cloneCollection: \"jstests_clonecollection.a\", from: \"localhost:" + ports[ 0 ] + "\", logSizeMb:1 } ) );" );
|
||||
|
||||
sleep( 100 );
|
||||
for( i = 200000; i < 210000; ++i ) {
|
||||
f.a.save( { i: i } );
|
||||
}
|
||||
|
||||
assert.commandFailed( dofinishclonecmd( ret ) );
|
||||
waitParallel();
|
||||
|
||||
// Make sure the same works with standard size op log.
|
||||
f.a.drop();
|
||||
@ -156,16 +117,15 @@ for( i = 0; i < 200000; ++i ) {
|
||||
}
|
||||
assert.eq( 200000, f.a.count() );
|
||||
|
||||
startstartclone();
|
||||
ret = finishstartclone();
|
||||
doParallel( "assert.commandWorked( db.cloneCollection( \"localhost:" + ports[ 0 ] + "\", \"a\" ) );" );
|
||||
|
||||
sleep( 100 );
|
||||
for( i = 200000; i < 210000; ++i ) {
|
||||
f.a.save( { i: i } );
|
||||
}
|
||||
assert.eq( 210000, f.a.count() );
|
||||
|
||||
finishclone( ret );
|
||||
assert.eq( 210000, t.a.find().count() );
|
||||
waitParallel();
|
||||
assert( 200000 < t.a.find().count() );
|
||||
|
||||
// Test startCloneCollection and finishCloneCollection commands.
|
||||
f.a.drop();
|
||||
@ -176,15 +136,34 @@ for( i = 0; i < 100000; ++i ) {
|
||||
}
|
||||
assert.eq( 100000, f.a.count() );
|
||||
|
||||
startstartclone();
|
||||
doParallel( "z = db.runCommand( {startCloneCollection:\"jstests_clonecollection.a\", from:\"localhost:" + ports[ 0 ] + "\" } ); print( \"clone_clone_clone_commandResult:::::\" + tojson( z , '' , true ) + \":::::\" );" );
|
||||
|
||||
sleep( 200 );
|
||||
f.a.save( { i: -1 } );
|
||||
|
||||
ret = finishstartclone();
|
||||
waitParallel();
|
||||
// even after parallel shell finished, must wait for finishToken line to appear in log
|
||||
assert.soon( function() {
|
||||
raw = rawMongoProgramOutput().replace( /[\r\n]/gm , " " )
|
||||
ret = raw.match( /clone_clone_clone_commandResult:::::(.*):::::/ );
|
||||
if ( ret == null ) {
|
||||
return false;
|
||||
}
|
||||
ret = ret[ 1 ];
|
||||
return true;
|
||||
} );
|
||||
|
||||
eval( "ret = " + ret );
|
||||
|
||||
assert.commandWorked( ret );
|
||||
assert.eq( 100001, t.a.find().count() );
|
||||
|
||||
f.a.save( { i: -2 } );
|
||||
assert.eq( 100002, f.a.find().count() );
|
||||
finishclone( ret );
|
||||
finishToken = ret.finishToken;
|
||||
// Round-tripping through JS can corrupt the cursor ids we store as BSON
|
||||
// Date elements. Date( 0 ) will correspond to a cursorId value of 0, which
|
||||
// makes the db start scanning from the beginning of the collection.
|
||||
finishToken.cursorId = new Date( 0 );
|
||||
assert.commandWorked( t.runCommand( {finishCloneCollection:finishToken} ) );
|
||||
assert.eq( 100002, t.a.find().count() );
|
||||
|
Loading…
Reference in New Issue
Block a user