0
0
mirror of https://github.com/mongodb/mongo.git synced 2024-12-01 09:32:32 +01:00

Merge branch 'master' of github.com:mongodb/mongo

This commit is contained in:
dwight 2010-07-04 09:28:16 -04:00
commit 2585c42a88
38 changed files with 591 additions and 184 deletions

View File

@ -388,7 +388,7 @@ commonFiles = Split( "pch.cpp buildinfo.cpp db/common.cpp db/jsobj.cpp db/json.c
commonFiles += [ "util/background.cpp" , "util/mmap.cpp" , "util/ramstore.cpp", "util/sock.cpp" , "util/util.cpp" , "util/message.cpp" ,
"util/assert_util.cpp" , "util/httpclient.cpp" , "util/md5main.cpp" , "util/base64.cpp", "util/concurrency/vars.cpp", "util/concurrency/task.cpp", "util/debug_util.cpp",
"util/concurrency/thread_pool.cpp", "util/password.cpp", "util/version.cpp",
"util/histogram.cpp", "util/concurrency/spin_lock.cpp", "util/text.cpp" ]
"util/histogram.cpp", "util/concurrency/spin_lock.cpp", "util/text.cpp" , "util/stringutils.cpp" ]
commonFiles += Glob( "util/*.c" )
commonFiles += Split( "client/connpool.cpp client/dbclient.cpp client/dbclientcursor.cpp client/model.cpp client/syncclusterconnection.cpp s/shardconnection.cpp" )
@ -435,7 +435,7 @@ coreServerFiles += scriptingFiles
coreShardFiles = [ "s/config.cpp" , "s/chunk.cpp" , "s/shard.cpp" , "s/shardkey.cpp" ]
shardServerFiles = coreShardFiles + Glob( "s/strategy*.cpp" ) + [ "s/commands_admin.cpp" , "s/commands_public.cpp" , "s/request.cpp" , "s/cursors.cpp" , "s/server.cpp" , "s/config_migrate.cpp" , "s/s_only.cpp" , "s/stats.cpp" , "s/balance.cpp" , "s/balancer_policy.cpp" , "db/cmdline.cpp" ]
serverOnlyFiles += coreShardFiles + [ "s/d_logic.cpp" , "s/d_writeback.cpp" , "s/d_migrate.cpp" , "s/d_state.cpp" ]
serverOnlyFiles += coreShardFiles + [ "s/d_logic.cpp" , "s/d_writeback.cpp" , "s/d_migrate.cpp" , "s/d_state.cpp" , "s/d_split.cpp" ]
serverOnlyFiles += [ "db/module.cpp" ] + Glob( "db/modules/*.cpp" )
@ -611,7 +611,8 @@ elif "win32" == os.sys.platform:
env.Append( CPPDEFINES=[ "UNICODE" ] )
winSDKHome = findVersion( [ "C:/Program Files/Microsoft SDKs/Windows/", "C:/Program Files (x86)/Microsoft SDKs/Windows/" ] ,
[ "v6.0" , "v6.0a" , "v6.1", "v7.0A" ] )
[ "v7.0A", "v7.0", "v6.1", "v6.0a", "v6.0" ] )
print( "Windows SDK Root '" + winSDKHome + "'" )
env.Append( CPPPATH=[ boostDir , "pcre-7.4" , winSDKHome + "/Include" ] )
@ -645,7 +646,7 @@ elif "win32" == os.sys.platform:
# /ZI debug info w/edit & continue
# /TP it's a c++ file
# RTC1 /GZ (Enable Stack Frame Run-Time Error Checking)
env.Append( CPPFLAGS=" /Od /RTC1 /MDd /Zi /TP /errorReport:none " )
env.Append( CPPFLAGS=" /Od /RTC1 /MDd /Z7 /TP /errorReport:none " )
env.Append( CPPFLAGS=' /Fd"mongod.pdb" ' )
env.Append( LINKFLAGS=" /debug " )
@ -666,7 +667,8 @@ elif "win32" == os.sys.platform:
env.Append( LIBPATH=[ winSDKHome + "/Lib" ] )
if release:
env.Append( LINKFLAGS=" /NODEFAULTLIB:MSVCPRT /NODEFAULTLIB:MSVCRTD " )
#env.Append( LINKFLAGS=" /NODEFAULTLIB:MSVCPRT /NODEFAULTLIB:MSVCRTD " )
env.Append( LINKFLAGS=" /NODEFAULTLIB:MSVCPRT " )
else:
env.Append( LINKFLAGS=" /NODEFAULTLIB:MSVCPRT /NODEFAULTLIB:MSVCRT " )

View File

@ -417,7 +417,7 @@ namespace mongo {
/** Get error result from the last operation on this connection.
@return full error object.
*/
BSONObj getLastErrorDetailed();
virtual BSONObj getLastErrorDetailed();
static string getLastErrorString( const BSONObj& res );

View File

@ -27,11 +27,14 @@
namespace mongo {
extern string ourHostname;
class DistributedLock {
public:
DistributedLock( const ConnectionString& conn , const string& ns , const BSONObj& key , const string& field )
: _conn(conn), _ns(ns), _key(key.getOwned()), _field(field){
DistributedLock( const ConnectionString& conn , const string& name )
: _conn(conn),_name(name),_myid(""){
_id = BSON( "_id" << name );
_ns = "config.locks";
}
int getState(){
@ -42,7 +45,7 @@ namespace mongo {
return _state.get() != 0;
}
bool lock_try(){
bool lock_try( string why , BSONObj * other = 0 ){
// recursive
if ( getState() > 0 )
return true;
@ -50,28 +53,46 @@ namespace mongo {
ScopedDbConnection conn( _conn );
{ // make sure its there so we can use simple update logic below
BSONObj o = conn->findOne( _ns , _key );
BSONObj o = conn->findOne( _ns , _id );
if ( o.isEmpty() ){
conn->update( _ns , _key , BSON( "$set" << BSON( _field << BSON( "state" << 0 ) ) ) , 1 );
try {
conn->insert( _ns , BSON( "_id" << _name << "state" << 0 << "who" << "" ) );
}
catch ( UserException& e ){
}
}
}
BSONObjBuilder b;
b.appendElements( _key );
b.append( _field + ".state" , 0 );
BSONObjBuilder b;
b.appendElements( _id );
b.append( "state" , 0 );
conn->update( _ns , b.obj() , BSON( "$set" << BSON( _field + ".state" << 1 << "who" << myid() ) ) );
assert(0);
conn->update( _ns , b.obj() , BSON( "$set" << BSON( "state" << 1 << "who" << myid() << "when" << DATENOW << "why" << why ) ) );
BSONObj o = conn->getLastErrorDetailed();
BSONObj now = conn->findOne( _ns , _id );
conn.done();
log() << "dist_lock lock getLastErrorDetailed: " << o << " now: " << now << endl;
if ( o["n"].numberInt() == 0 ){
if ( other )
*other = now;
return false;
}
_state.set( 1 );
return true;
}
void unlock(){
ScopedDbConnection conn( _conn );
conn->update( _ns , _key, BSON( "$set" << BSON( _field + ".state" << 0 ) ) );
conn->update( _ns , _id, BSON( "$set" << BSON( "state" << 0 ) ) );
log() << "dist_lock unlock unlock: " << conn->findOne( _ns , _id ) << endl;
conn.done();
_state.set( 0 );
}
@ -86,12 +107,13 @@ namespace mongo {
return s;
}
private:
ConnectionString _conn;
string _ns;
BSONObj _key;
string _field;
string _name;
BSONObj _id;
ThreadLocalValue<int> _state;
ThreadLocalValue<string> _myid;
};
@ -99,9 +121,9 @@ namespace mongo {
class dist_lock_try {
public:
dist_lock_try( DistributedLock * lock )
dist_lock_try( DistributedLock * lock , string why )
: _lock(lock){
_got = _lock->lock_try();
_got = _lock->lock_try( why , &_other );
}
~dist_lock_try(){
@ -114,9 +136,14 @@ namespace mongo {
return _got;
}
BSONObj other() const {
return _other;
}
private:
DistributedLock * _lock;
bool _got;
BSONObj _other;
};

View File

@ -69,6 +69,7 @@ namespace mongo {
}
bool SyncClusterConnection::prepare( string& errmsg ){
_lastError = BSONObj();
return fsync( errmsg );
}
@ -111,6 +112,7 @@ namespace mongo {
}
all.push_back( res );
errors.push_back( err );
_lastError = res.getOwned();
}
assert( all.size() == errors.size() && all.size() == _conns.size() );
@ -124,6 +126,7 @@ namespace mongo {
continue;
ok = false;
err << _conns[i]->toString() << ": " << res << " " << errors[i];
_lastError = res.getOwned();
}
if ( ok )
@ -131,6 +134,12 @@ namespace mongo {
throw UserException( 8001 , (string)"SyncClusterConnection write op failed: " + err.str() );
}
BSONObj SyncClusterConnection::getLastErrorDetailed(){
if ( _lastError.isEmpty() )
return DBClientBase::getLastErrorDetailed();
return _lastError;
}
void SyncClusterConnection::_connect( string host ){
log() << "SyncClusterConnection connecting to [" << host << "]" << endl;
DBClientConnection * c = new DBClientConnection( true );
@ -179,7 +188,7 @@ namespace mongo {
auto_ptr<DBClientCursor> SyncClusterConnection::query(const string &ns, Query query, int nToReturn, int nToSkip,
const BSONObj *fieldsToReturn, int queryOptions, int batchSize ){
_lastError = BSONObj();
if ( ns.find( ".$cmd" ) != string::npos ){
string cmdName = query.obj.firstElement().fieldName();
int lockType = _lockType( cmdName );

View File

@ -83,7 +83,8 @@ namespace mongo {
virtual string getServerAddress() const { return _address; }
virtual bool isFailed() const { return false; }
virtual string toString() { return _toString(); }
virtual BSONObj getLastErrorDetailed();
private:
SyncClusterConnection( SyncClusterConnection& prev );
@ -99,6 +100,8 @@ namespace mongo {
vector<DBClientConnection*> _conns;
map<string,int> _lockTypes;
mongo::mutex _mutex;
BSONObj _lastError;
};

View File

@ -164,7 +164,10 @@ namespace mongo {
_unlock.reset(new dbtempreleasecond());
}
~YieldLock(){
assert( ! _unlock );
if ( _unlock ){
log( LL_WARNING ) << "ClientCursor::YieldLock not closed properly" << endl;
relock();
}
}
bool stillOk(){

View File

@ -152,7 +152,6 @@ namespace mongo {
anObjBuilder.append( "help" , help.str() );
}
else {
c->_timer.reset();
ok = c->run( nsToDatabase( ns ) , jsobj, errmsg, anObjBuilder, false);
}
@ -188,8 +187,8 @@ namespace mongo {
return c->locktype();
}
void Command::logIfSlow( const string& msg ) const {
int ms = _timer.millis();
void Command::logIfSlow( const Timer& timer, const string& msg ) {
int ms = timer.millis();
if ( ms > cmdLine.slowMS ){
out() << msg << " took " << ms << " ms." << endl;
}

View File

@ -112,14 +112,12 @@ namespace mongo {
return BSONObj();
}
void logIfSlow( const string& msg) const;
static void logIfSlow( const Timer& cmdTimer, const string& msg);
static map<string,Command*> * _commands;
static map<string,Command*> * _commandsByBestName;
static map<string,Command*> * _webCommands;
Timer _timer; // started right before calling run
public:
static const map<string,Command*>* commandsByBestName() { return _commandsByBestName; }
static const map<string,Command*>* webCommands() { return _webCommands; }

View File

@ -957,7 +957,7 @@ namespace mongo {
}
} cmdFileMD5;
IndexDetails *cmdIndexDetailsForRange( const char *ns, string &errmsg, BSONObj &min, BSONObj &max, BSONObj &keyPattern ) {
static IndexDetails *cmdIndexDetailsForRange( const char *ns, string &errmsg, BSONObj &min, BSONObj &max, BSONObj &keyPattern ) {
if ( ns[ 0 ] == '\0' || min.isEmpty() || max.isEmpty() ) {
errmsg = "invalid command syntax (note: min and max are required)";
return 0;
@ -965,49 +965,6 @@ namespace mongo {
return indexDetailsForRange( ns, errmsg, min, max, keyPattern );
}
class CmdMedianKey : public Command {
public:
CmdMedianKey() : Command( "medianKey" ) {}
virtual bool slaveOk() const { return true; }
virtual LockType locktype() const { return READ; }
virtual void help( stringstream &help ) const {
help << "internal.\nexample: { medianKey:\"blog.posts\", keyPattern:{x:1}, min:{x:10}, max:{x:55} }\n"
"NOTE: This command may take awhile to run";
}
bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
const char *ns = jsobj.getStringField( "medianKey" );
BSONObj min = jsobj.getObjectField( "min" );
BSONObj max = jsobj.getObjectField( "max" );
BSONObj keyPattern = jsobj.getObjectField( "keyPattern" );
Client::Context ctx( ns );
IndexDetails *id = cmdIndexDetailsForRange( ns, errmsg, min, max, keyPattern );
if ( id == 0 )
return false;
int num = 0;
NamespaceDetails *d = nsdetails(ns);
int idxNo = d->idxNo(*id);
for( BtreeCursor c( d, idxNo, *id, min, max, false, 1 ); c.ok(); c.advance(), ++num );
num /= 2;
BtreeCursor c( d, idxNo, *id, min, max, false, 1 );
for( ; num; c.advance(), --num );
ostringstream os;
os << "Finding median for index: " << keyPattern << " between " << min << " and " << max;
logIfSlow( os.str() );
if ( !c.ok() ) {
errmsg = "no index entries in the specified range";
return false;
}
result.append( "median", c.prettyKey( c.currKey() ) );
return true;
}
} cmdMedianKey;
class CmdDatasize : public Command {
public:
CmdDatasize() : Command( "dataSize", false, "datasize" ) {}
@ -1047,6 +1004,7 @@ namespace mongo {
long long maxSize = jsobj["maxSize"].numberLong();
long long maxObjects = jsobj["maxObjects"].numberLong();
Timer timer;
long long size = 0;
long long numObjects = 0;
while( c->ok() ) {
@ -1067,11 +1025,11 @@ namespace mongo {
if ( ! min.isEmpty() ){
os << " between " << min << " and " << max;
}
logIfSlow( os.str() );
logIfSlow( timer , os.str() );
result.append( "size", (double)size );
result.append( "numObjects" , (double)numObjects );
result.append( "millis" , _timer.millis() );
result.append( "millis" , timer.millis() );
return true;
}
} cmdDatasize;
@ -1595,9 +1553,11 @@ namespace mongo {
db.update(ns, origQuery, update.embeddedObjectUserCheck(), true);
if (cmdObj["new"].trueValue()){
BSONElement _id = origQuery["_id"];
BSONObj gle = db.getLastErrorDetailed();
BSONElement _id = gle["upserted"];
if (_id.eoo())
_id = db.getLastErrorDetailed()["upserted"];
_id = origQuery["_id"];
out = db.findOne(ns, QUERY("_id" << _id), fields);
}

View File

@ -552,13 +552,7 @@ namespace mongo {
}
ClientCursor::YieldLock yield (cursor.get());
try {
state.finalReduce( all );
}
catch ( ... ){
yield.relock();
throw;
}
state.finalReduce( all );
all.clear();
prev = o;

View File

@ -370,11 +370,12 @@ namespace mongo {
class CountOp : public QueryOp {
public:
CountOp( const BSONObj &spec ) :
count_(),
skip_( spec["skip"].numberLong() ),
limit_( spec["limit"].numberLong() ),
bc_() {}
CountOp( const string& ns , const BSONObj &spec ) :
_ns(ns), count_(),
skip_( spec["skip"].numberLong() ),
limit_( spec["limit"].numberLong() ),
bc_(),_yieldTracker(256,20){
}
virtual void _init() {
c_ = qp().newCursor();
@ -391,6 +392,19 @@ namespace mongo {
setComplete();
return;
}
if ( _cc || _yieldTracker.ping() ){
if ( ! _cc )
_cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , c_ , _ns.c_str() ) );
if ( ! _cc->yieldSometimes() ){
c_.reset();
_cc.reset();
setComplete();
return;
}
}
if ( bc_ ) {
if ( firstMatch_.isEmpty() ) {
firstMatch_ = bc_->currKeyNode().key;
@ -407,7 +421,8 @@ namespace mongo {
}
_gotOne();
}
} else {
}
else {
if ( !matcher()->matches(c_->currKey(), c_->currLoc() ) ) {
}
else if( !c_->getsetdup(c_->currLoc()) ) {
@ -417,7 +432,7 @@ namespace mongo {
c_->advance();
}
virtual QueryOp *_createChild() const {
CountOp *ret = new CountOp( BSONObj() );
CountOp *ret = new CountOp( _ns , BSONObj() );
ret->count_ = count_;
ret->skip_ = skip_;
ret->limit_ = limit_;
@ -441,6 +456,8 @@ namespace mongo {
count_++;
}
string _ns;
long long count_;
long long skip_;
long long limit_;
@ -448,6 +465,9 @@ namespace mongo {
BSONObj query_;
BtreeCursor *bc_;
BSONObj firstMatch_;
ElapsedTracker _yieldTracker;
shared_ptr<ClientCursor> _cc;
};
/* { count: "collectionname"[, query: <query>] }
@ -477,7 +497,7 @@ namespace mongo {
return num;
}
MultiPlanScanner mps( ns, query, BSONObj() );
CountOp original( cmd );
CountOp original( ns , cmd );
shared_ptr< CountOp > res = mps.runOp( original );
if ( !res->complete() ) {
log() << "Count with ns: " << ns << " and query: " << query

View File

@ -77,12 +77,12 @@ namespace mongo {
struct GetMoreWaitException { };
QueryResult* processGetMore(const char *ns, int ntoreturn, long long cursorid , CurOp& op, int pass, bool& exhaust);
struct UpdateResult {
bool existing;
bool mod;
long long num;
OID upserted;
bool existing; // if existing objects were modified
bool mod; // was this a $ mod
long long num; // how many objects touched
OID upserted; // if something was upserted, the new _id of the object
UpdateResult( bool e, bool m, unsigned long long n , const BSONObj& upsertedObject = BSONObj() )
: existing(e) , mod(m), num(n){

View File

@ -107,7 +107,11 @@ namespace mongo {
void update( const BSONObj& rid , const string& host , const string& ns , OpTime last ){
scoped_lock mylk(_mutex);
#ifdef _DEBUG
MongoFileAllowWrites allowWrites;
#endif
Ident ident(rid,host,ns);
Info& i = _slaves[ ident ];
if ( i.loc ){
@ -116,6 +120,7 @@ namespace mongo {
}
dbMutex.assertAtLeastReadLocked();
BSONObj res;
if ( Helpers::findOne( NS , ident.obj , res ) ){
assert( res["syncedTo"].type() );
@ -133,8 +138,9 @@ namespace mongo {
_started = true;
go();
}
}
}
bool opReplicatedEnough( OpTime op , int w ){
if ( w <= 1 || ! replSettings.master )
return true;

6
debian/changelog vendored
View File

@ -1,3 +1,9 @@
mongodb (1.5.4) unstable; urgency=low
* sharding lots of changes, see http://jira.mongodb.org/browse/SERVER/fixforversion/10157
-- Richard Kreuter <richard@10gen.com> Fri, 2 Jul 2010 16:56:28 -0500
mongodb (1.5.3) unstable; urgency=low
* sharding lots of changes, see http://jira.mongodb.org/browse/SERVER/fixforversion/10157

View File

@ -3,7 +3,7 @@
#---------------------------------------------------------------------------
DOXYFILE_ENCODING = UTF-8
PROJECT_NAME = MongoDB
PROJECT_NUMBER = 1.5.3
PROJECT_NUMBER = 1.5.4
OUTPUT_DIRECTORY = docs
CREATE_SUBDIRS = NO
OUTPUT_LANGUAGE = English

View File

@ -49,7 +49,6 @@ var out = t.findAndModify({
upsert: true,
'new': true
});
printjson(out);
assert.neq(out._id, undefined);
assert.eq(out.a, 1);
assert.eq(out.b, 2);

View File

@ -56,7 +56,26 @@ for ( i=0; i<3; i++ ){
printjson( hashes );
for ( i=1; i<hashes.length; i++ ){
assert.eq( hashes[0].md5 , hashes[i].md5 , "hashes different" );
if ( hashes[0].md5 == hashes[i].md5 )
continue;
assert.eq( hashes[0].numCollections , hashes[i].numCollections , "num collections" );
for ( var k in hashes[0].collections ){
if ( hashes[0].collections[k] ==
hashes[i].collections[k] )
continue;
print( "collection " + k + " is differnet" );
print( "----" );
s._connections[0].getDB( "config" ).getCollection( k ).find().sort( { _id : 1 } ).forEach( printjsononeline );
print( "----" );
s._connections[i].getDB( "config" ).getCollection( k ).find().sort( { _id : 1 } ).forEach( printjsononeline );
print( "----" );
}
throw "hashes different";
}
s.stop();

81
jstests/splitvector.js Normal file
View File

@ -0,0 +1,81 @@
// -------------------------
// SPLITVECTOR TEST UTILS
// -------------------------
// -------------------------
// assertChunkSizes verifies that a given 'splitVec' divides the 'test.jstest_splitvector'
// collection in 'maxChunkSize' approximately-sized chunks. Its asserts fail otherwise.
// @param splitVec: an array with keys for field 'x'
// e.g. [ { x : 1927 }, { x : 3855 }, ...
// @param numDocs: domain of 'x' field
// e.g. 20000
// @param maxChunkSize is in MBs.
//
assertChunkSizes = function ( splitVec , numDocs , maxChunkSize ){
splitVec = [{ x: -1 }].concat( splitVec );
splitVec.push( { x: numDocs+1 } );
for ( i=0; i<splitVec.length-1; i++) {
min = splitVec[i];
max = splitVec[i+1];
size = db.runCommand( { datasize: "test.jstests_splitvector" , min: min , max: max } ).size;
// It is okay for the last chunk to be smaller. A collection's size does not
// need to be exactly a multiple of maxChunkSize.
if ( i < splitVec.length - 2 )
assert.close( maxChunkSize , size , "A"+i , -3 );
else
assert.gt( maxChunkSize, size, "A"+i );
}
}
// -------------------------
// TESTS START HERE
// -------------------------
f = db.jstests_splitvector;
f.drop();
// -------------------------
// Case: missing paramters
assert.eq( false, db.runCommand( { splitVector: "test.jstests_splitvector" } ).ok );
assert.eq( false, db.runCommand( { splitVector: "test.jstests_splitvector" , maxChunkSize: 1} ).ok );
// -------------------------
// Case: missing index
assert.eq( false, db.runCommand( { splitVector: "test.jstests_splitvector" , keyPattern: {x:1} , maxChunkSize: 1 } ).ok );
// -------------------------
// Case: empty collection
f.ensureIndex( { x: 1} );
assert.eq( [], db.runCommand( { splitVector: "test.jstests_splitvector" , keyPattern: {x:1} , maxChunkSize: 1 } ).splitKeys );
// -------------------------
// Case: uniform collection
f.drop();
f.ensureIndex( { x: 1 } );
// Get baseline document size
filler = "";
while( filler.length < 500 ) filler += "a";
f.save( { x: 0, y: filler } );
docSize = db.runCommand( { datasize: "test.jstests_splitvector" } ).size;
assert.gt( docSize, 500 );
// Fill collection and get split vector for 1MB maxChunkSize
numDocs = 20000;
for( i=1; i<numDocs; i++ ){
f.save( { x: i, y: filler } );
}
res = db.runCommand( { splitVector: "test.jstests_splitvector" , keyPattern: {x:1} , maxChunkSize: 1 } );
assert.eq( true , res.ok );
assert.close( numDocs*docSize / (1<<20) , res.splitKeys.length , "num split keys" , -1 );
assertChunkSizes( res.splitKeys , numDocs, (1<<20) * 0.9 ); // splitVector cuts at 90% of maxChunkSize

View File

@ -18,11 +18,7 @@ md5_computed = db.runCommand({filemd5: file_obj._id}).md5;
assert.eq( md5 , md5_stored , "A 1" );
assert.eq( md5 , md5_computed, "A 2" );
try {
listFiles(t.ext);
} catch (e) {
runProgram('mkdir', t.ext);
}
mkdir(t.ext);
t.runTool( "files" , "-d" , t.baseName , "get" , filename , '-l' , t.extFile );
md5 = md5sumFile(t.extFile);

View File

@ -1,5 +1,5 @@
Name: mongo
Version: 1.5.3
Version: 1.5.4
Release: mongodb_1%{?dist}
Summary: mongo client shell and tools
License: AGPL 3.0

View File

@ -71,9 +71,9 @@ namespace mongo {
return NULL;
}
log(4) << "min: " << min.first << "\t" << min.second << endl;
log(4) << "max: " << max.first << "\t" << max.second << endl;
log(4) << "draining: " << ! drainingShards.empty() << "(" << drainingShards.size() << ")" << endl;
log(1) << "min: " << min.first << "\t" << min.second << endl;
log(1) << "max: " << max.first << "\t" << max.second << endl;
log(1) << "draining: " << ! drainingShards.empty() << "(" << drainingShards.size() << ")" << endl;
// Solving imbalances takes a higher priority than draining shards. Many shards can
// be draining at once but we choose only one of them to cater to per round.

View File

@ -54,6 +54,7 @@ namespace mongo {
{}
string Chunk::getns() const {
assert( _manager );
return _manager->getns();
}
@ -156,6 +157,11 @@ namespace mongo {
}
ChunkPtr Chunk::split( const BSONObj& m ){
DistributedLock lockSetup( ConnectionString( modelServer() , ConnectionString::SYNC ) , getns() );
dist_lock_try dlk( &lockSetup , (string)"split-" + toString() );
uassert( 10166 , "locking namespace failed" , dlk.got() );
uassert( 10165 , "can't split as shard that doesn't have a manager" , _manager );
log(1) << " before split on: " << m << '\n'
@ -165,7 +171,6 @@ namespace mongo {
appendShortVersion( "before" , detail );
uassert( 10166 , "locking namespace on server failed" , lockNamespaceOnServer( getShard() , _manager->getns() ) );
uassert( 13003 , "can't split chunk. does it have only one distinct value?" ,
!m.isEmpty() && _min.woCompare(m) && _max.woCompare(m));
@ -716,24 +721,20 @@ namespace mongo {
void ChunkManager::drop( ChunkManagerPtr me ){
rwlock lk( _lock , true );
DistributedLock lockSetup( ConnectionString( configServer.modelServer() , ConnectionString::SYNC ) , getns() );
dist_lock_try dlk( &lockSetup , "drop" );
uassert( 13331 , "locking namespace failed" , dlk.got() );
uassert( 10174 , "config servers not all up" , configServer.allUp() );
map<Shard,ShardChunkVersion> seen;
set<Shard> seen;
log(1) << "ChunkManager::drop : " << _ns << endl;
// lock all shards so no one can do a split/migrate
for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ){
ChunkPtr c = i->second;
ShardChunkVersion& version = seen[ c->getShard() ];
if ( version.isSet() )
continue;
version = lockNamespaceOnServer( c->getShard() , _ns );
if ( version.isSet() )
continue;
// rollback
uassert( 10175 , "don't know how to rollback locks b/c drop can't lock all shards" , 0 );
seen.insert( c->getShard() );
}
log(1) << "ChunkManager::drop : " << _ns << "\t all locked" << endl;
@ -745,8 +746,8 @@ namespace mongo {
// delete data from mongod
for ( map<Shard,ShardChunkVersion>::iterator i=seen.begin(); i!=seen.end(); i++ ){
ScopedDbConnection conn( i->first );
for ( set<Shard>::iterator i=seen.begin(); i!=seen.end(); i++ ){
ScopedDbConnection conn( *i );
conn->dropCollection( _ns );
conn.done();
}
@ -765,8 +766,8 @@ namespace mongo {
conn.done();
log(1) << "ChunkManager::drop : " << _ns << "\t removed chunk data" << endl;
for ( map<Shard,ShardChunkVersion>::iterator i=seen.begin(); i!=seen.end(); i++ ){
ScopedDbConnection conn( i->first );
for ( set<Shard>::iterator i=seen.begin(); i!=seen.end(); i++ ){
ScopedDbConnection conn( *i );
BSONObj res;
if ( ! setShardVersion( conn.conn() , _ns , 0 , true , res ) )
throw UserException( 8071 , (string)"OH KNOW, cleaning up after drop failed: " + res.toString() );
@ -845,6 +846,8 @@ namespace mongo {
BSONObj cmd = cmdBuilder.obj();
log(7) << "ChunkManager::save update: " << cmd << endl;
ScopedDbConnection conn( Chunk(0).modelServer() );
BSONObj res;
bool ok = conn->runCommand( "config" , cmd , res );
@ -1104,19 +1107,5 @@ namespace mongo {
return conn.runCommand( "admin" , cmd , result );
}
bool lockNamespaceOnServer( const Shard& shard, const string& ns ){
ScopedDbConnection conn( shard.getConnString() );
bool res = lockNamespaceOnServer( conn.conn() , ns );
conn.done();
return res;
}
bool lockNamespaceOnServer( DBClientBase& conn , const string& ns ){
// TODO: replace this
//BSONObj lockResult;
//return setShardVersion( conn , ns , grid.getNextOpTime() , true , lockResult );
return true;
}
} // namespace mongo

View File

@ -427,7 +427,15 @@ namespace mongo {
BSONObjCmp _cmp;
};
/*
struct chunk_lock {
chunk_lock( const Chunk* c ){
}
Chunk _c;
};
*/
inline string Chunk::genID(){ return genID(_manager->getns(), _min); }
} // namespace mongo

View File

@ -18,6 +18,7 @@
#include "pch.h"
#include "../util/message.h"
#include "../util/stringutils.h"
#include "../util/unittest.h"
#include "../client/connpool.h"
#include "../client/model.h"
@ -179,14 +180,6 @@ namespace mongo {
}
}
void DBConfig::save( bool check ){
Model::save( check );
scoped_lock lk( _lock );
for ( map<string,ChunkManagerPtr>::iterator i=_shards.begin(); i != _shards.end(); i++)
i->second->save();
}
bool DBConfig::reload(){
// TODO: i don't think is 100% correct
return doload();
@ -373,17 +366,7 @@ namespace mongo {
bool ConfigServer::init( string s ){
vector<string> configdbs;
while ( true ){
size_t idx = s.find( ',' );
if ( idx == string::npos ){
configdbs.push_back( s );
break;
}
configdbs.push_back( s.substr( 0 , idx ) );
s = s.substr( idx + 1 );
}
splitStringDelim( s, configdbs, ',' );
return init( configdbs );
}

View File

@ -117,8 +117,6 @@ namespace mongo {
bool reload();
bool dropDatabase( string& errmsg );
virtual void save( bool check=true);
virtual string modelServer();

View File

@ -32,6 +32,7 @@
#include "../db/query.h"
#include "../client/connpool.h"
#include "../client/distlock.h"
#include "../util/queue.h"
@ -121,16 +122,39 @@ namespace mongo {
// 2. TODO
DistributedLock lockSetup( ConnectionString( shardingState.getConfigServer() , ConnectionString::SYNC ) , ns );
dist_lock_try dlk( &lockSetup , (string)"migrate-" + filter.toString() );
if ( ! dlk.got() ){
errmsg = "someone else has the lock";
result.append( "who" , dlk.other() );
return false;
}
ShardChunkVersion maxVersion;
string myOldShard;
{
ScopedDbConnection conn( shardingState.getConfigServer() );
BSONObj x = conn->findOne( ShardNS::chunk , Query( BSON( "ns" << ns ) ).sort( BSON( "lastmod" << -1 ) ) );
maxVersion = x["lastmod"];
x = conn->findOne( ShardNS::chunk , shardId.wrap( "_id" ) );
myOldShard = x["shard"].String();
if ( myOldShard != fromShard.getName() ){
errmsg = "i'm out of date";
result.append( "from" , fromShard.getName() );
result.append( "official" , myOldShard );
return false;
}
if ( maxVersion < shardingState.getVersion( ns ) ){
errmsg = "official version less than mine?";;
result.appendTimestamp( "officialVersion" , maxVersion );
result.appendTimestamp( "myVersion" , shardingState.getVersion( ns ) );
return false;
}
conn.done();
}
@ -167,6 +191,7 @@ namespace mongo {
{
dblock lk;
assert( myVersion > shardingState.getVersion( ns ) );
shardingState.setVersion( ns , myVersion );
assert( myVersion == shardingState.getVersion( ns ) );
}

177
s/d_split.cpp Normal file
View File

@ -0,0 +1,177 @@
// d_split.cpp
/**
* Copyright (C) 2008 10gen Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "pch.h"
#include <map>
#include <string>
#include "../db/btree.h"
#include "../db/commands.h"
#include "../db/dbmessage.h"
#include "../db/jsobj.h"
#include "../db/query.h"
#include "../db/queryoptimizer.h"
namespace mongo {
// TODO: Fold these checks into each command.
static IndexDetails *cmdIndexDetailsForRange( const char *ns, string &errmsg, BSONObj &min, BSONObj &max, BSONObj &keyPattern ) {
if ( ns[ 0 ] == '\0' || min.isEmpty() || max.isEmpty() ) {
errmsg = "invalid command syntax (note: min and max are required)";
return 0;
}
return indexDetailsForRange( ns, errmsg, min, max, keyPattern );
}
class CmdMedianKey : public Command {
public:
CmdMedianKey() : Command( "medianKey" ) {}
virtual bool slaveOk() const { return true; }
virtual LockType locktype() const { return READ; }
virtual void help( stringstream &help ) const {
help <<
"Internal command.\n"
"example: { medianKey:\"blog.posts\", keyPattern:{x:1}, min:{x:10}, max:{x:55} }\n"
"NOTE: This command may take a while to run";
}
bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
const char *ns = jsobj.getStringField( "medianKey" );
BSONObj min = jsobj.getObjectField( "min" );
BSONObj max = jsobj.getObjectField( "max" );
BSONObj keyPattern = jsobj.getObjectField( "keyPattern" );
Client::Context ctx( ns );
IndexDetails *id = cmdIndexDetailsForRange( ns, errmsg, min, max, keyPattern );
if ( id == 0 )
return false;
Timer timer;
int num = 0;
NamespaceDetails *d = nsdetails(ns);
int idxNo = d->idxNo(*id);
for( BtreeCursor c( d, idxNo, *id, min, max, false, 1 ); c.ok(); c.advance(), ++num );
num /= 2;
BtreeCursor c( d, idxNo, *id, min, max, false, 1 );
for( ; num; c.advance(), --num );
ostringstream os;
os << "Finding median for index: " << keyPattern << " between " << min << " and " << max;
logIfSlow( timer , os.str() );
if ( !c.ok() ) {
errmsg = "no index entries in the specified range";
return false;
}
result.append( "median", c.prettyKey( c.currKey() ) );
return true;
}
} cmdMedianKey;
class SplitVector : public Command {
public:
SplitVector() : Command( "splitVector" , false ){}
virtual bool slaveOk() const { return false; }
virtual LockType locktype() const { return READ; }
virtual void help( stringstream &help ) const {
help <<
"Internal command.\n"
"example: { splitVector : \"myLargeCollection\" , keyPattern : {x:1} , maxChunkSize : 200 }\n"
"maxChunkSize unit in MBs\n"
"NOTE: This command may take a while to run";
}
bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){
const char* ns = jsobj.getStringField( "splitVector" );
BSONObj keyPattern = jsobj.getObjectField( "keyPattern" );
long long maxChunkSize = 0;
BSONElement maxSizeElem = jsobj[ "maxChunkSize" ];
if ( ! maxSizeElem.eoo() ){
maxChunkSize = maxSizeElem.numberLong() * 1<<20;
} else {
errmsg = "need to specify the desired max chunk size";
return false;
}
Client::Context ctx( ns );
const char* field = keyPattern.firstElement().fieldName();
BSONObjBuilder minBuilder;
minBuilder.appendMinKey( field );
BSONObj min = minBuilder.obj();
BSONObjBuilder maxBuilder;
maxBuilder.appendMaxKey( field );
BSONObj max = maxBuilder.obj();
IndexDetails *idx = cmdIndexDetailsForRange( ns , errmsg , min , max , keyPattern );
if ( idx == NULL ){
errmsg = "couldn't find index over splitting key";
return false;
}
NamespaceDetails *d = nsdetails( ns );
BtreeCursor c( d , d->idxNo(*idx) , *idx , min , max , false , 1 );
// We'll use the average object size and number of object to find approximately how many keys
// each chunk should have. We'll split a little smaller than the specificied by 'maxSize'
// assuming a recently sharded collectio is still going to grow.
const long long dataSize = d->datasize;
const long long recCount = d->nrecords;
long long keyCount = 0;
if (( dataSize > 0 ) && ( recCount > 0 )){
const long long avgRecSize = dataSize / recCount;
keyCount = 0.9 * maxChunkSize / avgRecSize;
}
// We traverse the index and add the keyCount-th key to the result vector. If that key
// appeared in the vector before, we omit it. The assumption here is that all the
// instances of a key value live in the same chunk.
Timer timer;
long long currCount = 0;
vector<BSONObj> splitKeys;
BSONObj currKey;
while ( c.ok() ){
currCount++;
if ( currCount > keyCount ){
if ( ! currKey.isEmpty() && (currKey.woCompare( c.currKey() ) == 0 ) )
continue;
currKey = c.currKey();
splitKeys.push_back( c.prettyKey( currKey ) );
currCount = 0;
}
c.advance();
}
ostringstream os;
os << "Finding the split vector for " << ns << " over "<< keyPattern;
logIfSlow( timer , os.str() );
// Warning: we are sending back an array of keys but are currently limited to
// 4MB work of 'result' size. This should be okay for now.
result.append( "splitKeys" , splitKeys );
return true;
}
} cmdSplitVector;
} // namespace mongo

View File

@ -45,8 +45,5 @@ namespace mongo {
bool setShardVersion( DBClientBase & conn , const string& ns , ShardChunkVersion version , bool authoritative , BSONObj& result );
bool lockNamespaceOnServer( const Shard& shard , const string& ns );
bool lockNamespaceOnServer( DBClientBase& conn , const string& ns );
}

View File

@ -710,10 +710,12 @@ namespace mongo {
c.setProperty( obj, "floatApprox", argv[ 0 ] );
} else {
string num = c.toString( argv[ 0 ] );
PRINT(num);
const char *numStr = num.c_str();
long long n;
try {
n = parseLL( numStr );
PRINT(n);
} catch ( const AssertionException & ) {
smuassert( cx , "could not convert string to long long" , false );
}

View File

@ -250,6 +250,11 @@ namespace mongo {
return BSON( "" << digestToString( d ) );
}
BSONObj mkdir(const BSONObj& args){
boost::filesystem::create_directories(args.firstElement().String());
return BSON( "" << true );
}
BSONObj removeFile(const BSONObj& args){
BSONElement e = oneArg(args);
bool found = false;
@ -847,6 +852,7 @@ namespace mongo {
scope.injectNative( "resetDbpath", ResetDbpath );
scope.injectNative( "copyDbpath", CopyDbpath );
scope.injectNative( "md5sumFile", md5sumFile );
scope.injectNative( "mkdir" , mkdir );
#endif
}

View File

@ -42,7 +42,8 @@ namespace mongo {
// b.lock(); alone is fine too
// only checked on _DEBUG builds.
string a,b;
void aBreakPoint(){}
void programEnding();
MutexDebugger();
void entering(mid m) {
@ -54,6 +55,7 @@ namespace mongo {
Preceeding &preceeding = *_preceeding;
if( a == m ) {
aBreakPoint();
if( preceeding[b.c_str()] ) {
cout << "mutex problem " << b << " was locked before " << a << endl;
assert(false);

View File

@ -28,11 +28,17 @@
#include "../db/cmdline.h"
#include "../client/dbclient.h"
#ifndef _WIN32
#include <sys/resource.h>
#endif
namespace mongo {
bool noUnixSocket = false;
bool objcheck = false;
void checkTicketNumbers();
// if you want trace output:
#define mmm(x)
@ -87,6 +93,7 @@ namespace mongo {
/* listener ------------------------------------------------------------------- */
void Listener::initAndListen() {
checkTicketNumbers();
vector<SockAddr> mine = ipToAddrs(_ip.c_str(), _port);
vector<int> socks;
SOCKET maxfd = 0; // needed for select()
@ -645,5 +652,34 @@ namespace mongo {
int getClientId(){
return clientId.get();
}
int getMaxConnections(){
#ifdef _WIN32
return 20000;
#else
struct rlimit limit;
assert( getrlimit(RLIMIT_NOFILE,&limit) == 0 );
int max = (int)(limit.rlim_cur * .8);
log(1) << "fd limit"
<< " hard:" << limit.rlim_max
<< " soft:" << limit.rlim_cur
<< " max conn: " << max
<< endl;
if ( max > 20000 )
max = 20000;
return max;
#endif
}
void checkTicketNumbers(){
connTicketHolder.resize( getMaxConnections() );
}
TicketHolder connTicketHolder(20000);
} // namespace mongo

View File

@ -122,7 +122,6 @@ namespace mongo {
return new PortMessageServer( opts , handler );
}
TicketHolder connTicketHolder(20000);
}
#endif

View File

@ -58,8 +58,17 @@ namespace mongo {
// no-ops in production
inline void MongoFile::lockAll() {}
inline void MongoFile::unlockAll() {}
#endif
struct MongoFileAllowWrites {
MongoFileAllowWrites(){
MongoFile::lockAll();
}
~MongoFileAllowWrites(){
MongoFile::unlockAll();
}
};
/** template for what a new storage engine's class definition must implement
PRELIMINARY - subject to change.

38
util/stringutils.cpp Normal file
View File

@ -0,0 +1,38 @@
// stringutils.cpp
/**
* Copyright (C) 2008 10gen Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "pch.h"
namespace mongo {
void splitStringDelim( const string& str, vector<string>& vec, char delim ){
string s(str);
while ( true ){
size_t idx = s.find( delim );
if ( idx == string::npos ){
vec.push_back( s );
break;
}
vec.push_back( s.substr( 0 , idx ) );
s = s.substr( idx + 1 );
}
}
} // namespace mongo

22
util/stringutils.h Normal file
View File

@ -0,0 +1,22 @@
// stringutils.h
/* Copyright 2010 10gen Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
namespace mongo {
void splitStringDelim( const string& str, vector<string>& vec, char delim );
} // namespace mongo

View File

@ -124,8 +124,7 @@ namespace mongo {
errno = 0;
ret = strtoll( n, &endPtr, 10 );
uassert( 13305, "could not convert string to long long", *endPtr == 0 && errno == 0 );
#else
#if _MSC_VER>=1600 // 1600 is VS2k10 1500 is VS2k8
#elif _MSC_VER>=1600 // 1600 is VS2k10 1500 is VS2k8
size_t endLen = 0;
try {
ret = stoll( n, &endLen, 10 );
@ -134,14 +133,9 @@ namespace mongo {
}
uassert( 13306, "could not convert string to long long", endLen != 0 && n[ endLen ] == 0 );
#else // stoll() wasn't introduced until VS 2010.
char* endPtr = (char *)&n[strlen(n) - 1];
try {
ret = _strtoi64( n, &endPtr, 10 );
} catch ( ... ) {
endPtr = 0;
}
uassert( 13310, "could not convert string to long long", *endPtr == 0 );
#endif // _MSC_VER >= 16
char* endPtr = 0;
ret = _strtoi64( n, &endPtr, 10 );
uassert( 13310, "could not convert string to long long", (*endPtr == 0) && (ret != _I64_MAX) && (ret != _I64_MIN) );
#endif // !defined(_WIN32)
return ret;
}

View File

@ -14,7 +14,7 @@ namespace mongo {
// mongo processes version support
//
const char versionString[] = "1.5.4-pre-";
const char versionString[] = "1.5.5-pre-";
string mongodVersion() {
stringstream ss;