From 1ec372ec3b03657c1ceabdc21aeb453f092c747b Mon Sep 17 00:00:00 2001 From: Aaron Date: Wed, 17 Feb 2010 23:47:15 -0800 Subject: [PATCH] SERVER-643 eliminate race conditions in clonecollection test --- jstests/clone/clonecollection.js | 85 ++++++++++++++++++++------------ 1 file changed, 53 insertions(+), 32 deletions(-) diff --git a/jstests/clone/clonecollection.js b/jstests/clone/clonecollection.js index 74d4d872af7..7ceb1b0807c 100644 --- a/jstests/clone/clonecollection.js +++ b/jstests/clone/clonecollection.js @@ -23,6 +23,45 @@ waitParallel = function() { assert.soon( function() { return doneParallel(); }, "parallel did not finish in time", 300000, 1000 ); } +cloneNo = -1; +startstartclone = function( spec ) { + spec = spec || ""; + cloneNo++; + doParallel( "z = db.runCommand( {startCloneCollection:\"jstests_clonecollection.a\", from:\"localhost:" + ports[ 0 ] + "\"" + spec + " } ); print( \"clone_clone_clone_commandResult::" + cloneNo + "::\" + tojson( z , '' , true ) + \":::::\" );" ); +} + +finishstartclone = function() { + waitParallel(); + // even after parallel shell finished, must wait for finishToken line to appear in log + assert.soon( function() { + raw = rawMongoProgramOutput().replace( /[\r\n]/gm , " " ) + ret = raw.match( new RegExp( "clone_clone_clone_commandResult::" + cloneNo + "::(.*):::::" ) ); + if ( ret == null ) { + return false; + } + ret = ret[ 1 ]; + return true; + } ); + + eval( "ret = " + ret ); + + assert.commandWorked( ret ); + return ret; +} + +dofinishclonecmd = function( ret ) { + finishToken = ret.finishToken; + // Round-tripping through JS can corrupt the cursor ids we store as BSON + // Date elements. Date( 0 ) will correspond to a cursorId value of 0, which + // makes the db start scanning from the beginning of the collection. + finishToken.cursorId = new Date( 0 ); + return t.runCommand( {finishCloneCollection:finishToken} ); +} + +finishclone = function( ret ) { + assert.commandWorked( dofinishclonecmd( ret ) ); +} + ports = allocatePorts( 2 ); f = startMongod( "--port", ports[ 0 ], "--dbpath", "/data/db/" + baseName + "_from", "--nohttpinterface", "--bind_ip", "127.0.0.1" ).getDB( baseName ); @@ -73,15 +112,15 @@ for( i = 0; i < 100000; ++i ) { } assert.eq( 100000, f.a.count() ); -doParallel( "assert.commandWorked( db.cloneCollection( \"localhost:" + ports[ 0 ] + "\", \"a\", {i:{$gte:0}} ) );" ); +startstartclone( ", query:{i:{$gte:0}}" ); sleep( 200 ); f.a.save( { i: 200000 } ); f.a.save( { i: -1 } ); f.a.remove( { i: 0 } ); f.a.update( { i: 99998 }, { i: 99998, x: "y" } ); -assert( !doneParallel(), "test run invalid" ); -waitParallel(); +ret = finishstartclone(); +finishclone( ret ); assert.eq( 100000, t.a.find().count() ); assert.eq( 1, t.a.find( { i: 200000 } ).count() ); @@ -99,14 +138,14 @@ for( i = 0; i < 200000; ++i ) { } assert.eq( 200000, f.a.count() ); -doParallel( "assert.commandFailed( db.runCommand( { cloneCollection: \"jstests_clonecollection.a\", from: \"localhost:" + ports[ 0 ] + "\", logSizeMb:1 } ) );" ); +startstartclone( ", logSizeMb:1" ); +ret = finishstartclone(); -sleep( 100 ); for( i = 200000; i < 210000; ++i ) { f.a.save( { i: i } ); } -waitParallel(); +assert.commandFailed( dofinishclonecmd( ret ) ); // Make sure the same works with standard size op log. f.a.drop(); @@ -117,15 +156,16 @@ for( i = 0; i < 200000; ++i ) { } assert.eq( 200000, f.a.count() ); -doParallel( "assert.commandWorked( db.cloneCollection( \"localhost:" + ports[ 0 ] + "\", \"a\" ) );" ); +startstartclone(); +ret = finishstartclone(); -sleep( 100 ); for( i = 200000; i < 210000; ++i ) { f.a.save( { i: i } ); } +assert.eq( 210000, f.a.count() ); -waitParallel(); -assert( 200000 < t.a.find().count() ); +finishclone( ret ); +assert.eq( 210000, t.a.find().count() ); // Test startCloneCollection and finishCloneCollection commands. f.a.drop(); @@ -136,34 +176,15 @@ for( i = 0; i < 100000; ++i ) { } assert.eq( 100000, f.a.count() ); -doParallel( "z = db.runCommand( {startCloneCollection:\"jstests_clonecollection.a\", from:\"localhost:" + ports[ 0 ] + "\" } ); print( \"clone_clone_clone_commandResult:::::\" + tojson( z , '' , true ) + \":::::\" );" ); +startstartclone(); sleep( 200 ); f.a.save( { i: -1 } ); -waitParallel(); -// even after parallel shell finished, must wait for finishToken line to appear in log -assert.soon( function() { - raw = rawMongoProgramOutput().replace( /[\r\n]/gm , " " ) - ret = raw.match( /clone_clone_clone_commandResult:::::(.*):::::/ ); - if ( ret == null ) { - return false; - } - ret = ret[ 1 ]; - return true; - } ); - -eval( "ret = " + ret ); - -assert.commandWorked( ret ); +ret = finishstartclone(); assert.eq( 100001, t.a.find().count() ); f.a.save( { i: -2 } ); assert.eq( 100002, f.a.find().count() ); -finishToken = ret.finishToken; -// Round-tripping through JS can corrupt the cursor ids we store as BSON -// Date elements. Date( 0 ) will correspond to a cursorId value of 0, which -// makes the db start scanning from the beginning of the collection. -finishToken.cursorId = new Date( 0 ); -assert.commandWorked( t.runCommand( {finishCloneCollection:finishToken} ) ); +finishclone( ret ); assert.eq( 100002, t.a.find().count() );