0
0
mirror of https://github.com/mongodb/mongo.git synced 2024-12-01 09:32:32 +01:00
mongodb/s/d_state.cpp

650 lines
21 KiB
C++
Raw Normal View History

// @file d_state.cpp
2010-06-15 16:18:13 +02:00
/**
* Copyright (C) 2008 10gen Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/**
these are commands that live in mongod
mostly around shard management and checking
*/
#include "pch.h"
#include <map>
#include <string>
#include "../db/commands.h"
#include "../db/jsobj.h"
#include "../db/dbmessage.h"
#include "../db/query.h"
#include "../client/connpool.h"
#include "../util/queue.h"
#include "shard.h"
#include "d_logic.h"
#include "config.h"
2010-06-15 16:18:13 +02:00
using namespace std;
namespace mongo {
// -----ShardingState START ----
2011-01-04 06:40:41 +01:00
2010-06-15 16:49:47 +02:00
ShardingState::ShardingState()
2011-01-04 06:40:41 +01:00
: _enabled(false) , _mutex( "ShardingState" ) {
2010-06-15 16:18:13 +02:00
}
2011-01-04 06:40:41 +01:00
void ShardingState::enable( const string& server ) {
2010-06-15 16:18:13 +02:00
_enabled = true;
assert( server.size() );
if ( _configServer.size() == 0 )
_configServer = server;
else {
assert( server == _configServer );
}
}
2011-01-04 06:40:41 +01:00
void ShardingState::gotShardName( const string& name ) {
if ( _shardName.size() == 0 ) {
2010-12-28 22:11:13 +01:00
// TODO SERVER-2299 verify the name is sound w.r.t IPs
2010-06-15 16:49:47 +02:00
_shardName = name;
return;
}
2011-01-04 06:40:41 +01:00
2010-06-15 16:49:47 +02:00
if ( _shardName == name )
return;
stringstream ss;
2011-01-04 06:40:41 +01:00
ss << "gotShardName different than what i had before "
<< " before [" << _shardName << "] "
<< " got [" << name << "] "
;
2010-06-15 16:49:47 +02:00
uasserted( 13298 , ss.str() );
}
2011-01-04 06:40:41 +01:00
void ShardingState::gotShardHost( string host ) {
size_t slash = host.find( '/' );
if ( slash != string::npos )
host = host.substr( 0 , slash );
2011-01-04 06:40:41 +01:00
if ( _shardHost.size() == 0 ) {
2010-06-15 16:49:47 +02:00
_shardHost = host;
return;
}
2011-01-04 06:40:41 +01:00
2010-06-15 16:49:47 +02:00
if ( _shardHost == host )
return;
stringstream ss;
2011-01-04 06:40:41 +01:00
ss << "gotShardHost different than what i had before "
<< " before [" << _shardHost << "] "
<< " got [" << host << "] "
;
2010-06-15 16:49:47 +02:00
uasserted( 13299 , ss.str() );
}
2011-01-04 06:40:41 +01:00
void ShardingState::resetShardingState() {
scoped_lock lk(_mutex);
_enabled = false;
_configServer.clear();
_shardName.clear();
_shardHost.clear();
_chunks.clear();
}
2010-11-26 16:14:49 +01:00
// TODO we shouldn't need three ways for checking the version. Fix this.
2011-01-04 06:40:41 +01:00
bool ShardingState::hasVersion( const string& ns ) {
2010-06-15 16:49:47 +02:00
scoped_lock lk(_mutex);
2010-11-18 18:19:08 +01:00
ChunkManagersMap::const_iterator it = _chunks.find(ns);
return it != _chunks.end();
2010-06-15 16:18:13 +02:00
}
2011-01-04 06:40:41 +01:00
bool ShardingState::hasVersion( const string& ns , ConfigVersion& version ) {
2010-06-15 16:49:47 +02:00
scoped_lock lk(_mutex);
2010-11-18 18:19:08 +01:00
ChunkManagersMap::const_iterator it = _chunks.find(ns);
if ( it == _chunks.end() )
2010-06-15 16:18:13 +02:00
return false;
ShardChunkManagerPtr p = it->second;
version = p->getVersion();
2010-06-15 16:18:13 +02:00
return true;
}
2011-01-04 06:40:41 +01:00
2010-11-18 16:58:31 +01:00
const ConfigVersion ShardingState::getVersion( const string& ns ) const {
2010-06-15 16:49:47 +02:00
scoped_lock lk(_mutex);
2010-11-18 16:58:31 +01:00
ChunkManagersMap::const_iterator it = _chunks.find( ns );
if ( it != _chunks.end() ) {
ShardChunkManagerPtr p = it->second;
return p->getVersion();
2011-01-04 06:40:41 +01:00
}
else {
2010-11-18 16:58:31 +01:00
return 0;
}
2010-06-15 16:18:13 +02:00
}
2011-01-04 06:40:41 +01:00
void ShardingState::donateChunk( const string& ns , const BSONObj& min , const BSONObj& max , ShardChunkVersion version ) {
scoped_lock lk( _mutex );
ChunkManagersMap::const_iterator it = _chunks.find( ns );
assert( it != _chunks.end() ) ;
ShardChunkManagerPtr p = it->second;
// empty shards should have version 0
version = ( p->getNumChunks() > 1 ) ? version : ShardChunkVersion( 0 , 0 );
ShardChunkManagerPtr cloned( p->cloneMinus( min , max , version ) );
_chunks[ns] = cloned;
}
void ShardingState::undoDonateChunk( const string& ns , const BSONObj& min , const BSONObj& max , ShardChunkVersion version ) {
scoped_lock lk( _mutex );
ChunkManagersMap::const_iterator it = _chunks.find( ns );
assert( it != _chunks.end() ) ;
ShardChunkManagerPtr p( it->second->clonePlus( min , max , version ) );
_chunks[ns] = p;
}
void ShardingState::splitChunk( const string& ns , const BSONObj& min , const BSONObj& max , const vector<BSONObj>& splitKeys ,
ShardChunkVersion version ) {
scoped_lock lk( _mutex );
2011-01-04 06:40:41 +01:00
ChunkManagersMap::const_iterator it = _chunks.find( ns );
assert( it != _chunks.end() ) ;
ShardChunkManagerPtr p( it->second->cloneSplit( min , max , splitKeys , version ) );
_chunks[ns] = p;
}
2011-01-04 06:40:41 +01:00
void ShardingState::resetVersion( const string& ns ) {
scoped_lock lk( _mutex );
_chunks.erase( ns );
}
2011-01-04 06:40:41 +01:00
bool ShardingState::trySetVersion( const string& ns , ConfigVersion& version /* IN-OUT */ ) {
// fast path - requested version is at the same version as this chunk manager
//
2011-01-04 06:40:41 +01:00
// cases:
// + this shard updated the version for a migrate's commit (FROM side)
// a client reloaded chunk state from config and picked the newest version
// + two clients reloaded
2011-01-04 06:40:41 +01:00
// one triggered the 'slow path' (below)
// when the second's request gets here, the version is already current
{
scoped_lock lk( _mutex );
ChunkManagersMap::const_iterator it = _chunks.find( ns );
if ( it != _chunks.end() && it->second->getVersion() == version )
return true;
}
2011-01-04 06:40:41 +01:00
// slow path - requested version is different than the current chunk manager's, if one exists, so must check for
// newest version in the config server
//
// cases:
2011-01-04 06:40:41 +01:00
// + a chunk moved TO here
// (we don't bump up the version on the TO side but the commit to config does use higher version)
// a client reloads from config an issued the request
// + there was a take over from a secondary
// the secondary had no state (managers) at all, so every client request will fall here
// + a stale client request a version that's not current anymore
const string c = (_configServer == _shardHost) ? "" /* local */ : _configServer;
ShardChunkManagerPtr p( new ShardChunkManager( c , ns , _shardName ) );
{
scoped_lock lk( _mutex );
// since we loaded the chunk manager unlocked, other thread may have done the same
// make sure we keep the freshest config info only
ChunkManagersMap::const_iterator it = _chunks.find( ns );
if ( it == _chunks.end() || p->getVersion() >= it->second->getVersion() ) {
_chunks[ns] = p;
}
ShardChunkVersion oldVersion = version;
version = p->getVersion();
return oldVersion == version;
}
}
2011-01-04 06:40:41 +01:00
void ShardingState::appendInfo( BSONObjBuilder& b ) {
2010-06-15 16:49:47 +02:00
b.appendBool( "enabled" , _enabled );
if ( ! _enabled )
return;
b.append( "configServer" , _configServer );
b.append( "shardName" , _shardName );
b.append( "shardHost" , _shardHost );
{
BSONObjBuilder bb( b.subobjStart( "versions" ) );
2011-01-04 06:40:41 +01:00
2010-06-15 16:49:47 +02:00
scoped_lock lk(_mutex);
2010-11-18 18:19:08 +01:00
2011-01-04 06:40:41 +01:00
for ( ChunkManagersMap::iterator it = _chunks.begin(); it != _chunks.end(); ++it ) {
ShardChunkManagerPtr p = it->second;
bb.appendTimestamp( it->first , p->getVersion() );
2010-06-15 16:49:47 +02:00
}
bb.done();
}
}
2010-11-19 17:15:35 +01:00
bool ShardingState::needShardChunkManager( const string& ns ) const {
if ( ! _enabled )
return false;
2011-01-04 06:40:41 +01:00
if ( ! ShardedConnectionInfo::get( false ) )
return false;
return true;
}
2011-01-04 06:40:41 +01:00
ShardChunkManagerPtr ShardingState::getShardChunkManager( const string& ns ) {
scoped_lock lk( _mutex );
ChunkManagersMap::const_iterator it = _chunks.find( ns );
if ( it == _chunks.end() ) {
return ShardChunkManagerPtr();
2011-01-04 06:40:41 +01:00
}
else {
return it->second;
}
}
2010-06-15 16:18:13 +02:00
ShardingState shardingState;
// -----ShardingState END ----
2011-01-04 06:40:41 +01:00
2010-06-15 16:18:13 +02:00
// -----ShardedConnectionInfo START ----
boost::thread_specific_ptr<ShardedConnectionInfo> ShardedConnectionInfo::_tl;
2011-01-04 06:40:41 +01:00
ShardedConnectionInfo::ShardedConnectionInfo() {
2010-09-14 16:05:58 +02:00
_forceVersionOk = false;
2010-06-15 16:18:13 +02:00
_id.clear();
}
2011-01-04 06:40:41 +01:00
ShardedConnectionInfo* ShardedConnectionInfo::get( bool create ) {
2010-06-15 16:18:13 +02:00
ShardedConnectionInfo* info = _tl.get();
2011-01-04 06:40:41 +01:00
if ( ! info && create ) {
2010-06-15 16:18:13 +02:00
log(1) << "entering shard mode for connection" << endl;
info = new ShardedConnectionInfo();
_tl.reset( info );
}
return info;
}
2011-01-04 06:40:41 +01:00
void ShardedConnectionInfo::reset() {
_tl.reset();
}
2010-11-18 16:58:31 +01:00
const ConfigVersion ShardedConnectionInfo::getVersion( const string& ns ) const {
NSVersionMap::const_iterator it = _versions.find( ns );
if ( it != _versions.end() ) {
return it->second;
2011-01-04 06:40:41 +01:00
}
else {
2010-11-18 16:58:31 +01:00
return 0;
}
2010-06-15 16:18:13 +02:00
}
2011-01-04 06:40:41 +01:00
void ShardedConnectionInfo::setVersion( const string& ns , const ConfigVersion& version ) {
2010-06-15 16:18:13 +02:00
_versions[ns] = version;
}
2011-01-04 06:40:41 +01:00
void ShardedConnectionInfo::setID( const OID& id ) {
2010-06-15 16:18:13 +02:00
_id = id;
}
// -----ShardedConnectionInfo END ----
2011-01-04 06:40:41 +01:00
unsigned long long extractVersion( BSONElement e , string& errmsg ) {
if ( e.eoo() ) {
2010-06-15 16:18:13 +02:00
errmsg = "no version";
return 0;
}
2011-01-04 06:40:41 +01:00
2010-06-15 16:18:13 +02:00
if ( e.isNumber() )
return (unsigned long long)e.number();
2011-01-04 06:40:41 +01:00
2010-06-15 16:18:13 +02:00
if ( e.type() == Date || e.type() == Timestamp )
return e._numberLong();
2011-01-04 06:40:41 +01:00
2010-06-15 16:18:13 +02:00
errmsg = "version is not a numeric type";
return 0;
}
class MongodShardCommand : public Command {
public:
2011-01-04 06:40:41 +01:00
MongodShardCommand( const char * n ) : Command( n ) {
2010-06-15 16:18:13 +02:00
}
virtual bool slaveOk() const {
return false;
}
virtual bool adminOnly() const {
return true;
}
};
2011-01-04 06:40:41 +01:00
bool haveLocalShardingInfo( const string& ns ) {
2010-06-15 16:18:13 +02:00
if ( ! shardingState.enabled() )
return false;
2011-01-04 06:40:41 +01:00
2010-06-15 16:18:13 +02:00
if ( ! shardingState.hasVersion( ns ) )
return false;
return ShardedConnectionInfo::get(false) > 0;
}
class UnsetShardingCommand : public MongodShardCommand {
public:
2011-01-04 06:40:41 +01:00
UnsetShardingCommand() : MongodShardCommand("unsetSharding") {}
virtual void help( stringstream& help ) const {
help << " example: { unsetSharding : 1 } ";
}
2011-01-04 06:40:41 +01:00
virtual LockType locktype() const { return NONE; }
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
ShardedConnectionInfo::reset();
return true;
2011-01-04 06:40:41 +01:00
}
} unsetShardingCommand;
2011-01-04 06:40:41 +01:00
2010-06-15 16:18:13 +02:00
class SetShardVersion : public MongodShardCommand {
public:
2011-01-04 06:40:41 +01:00
SetShardVersion() : MongodShardCommand("setShardVersion") {}
2010-06-15 16:18:13 +02:00
virtual void help( stringstream& help ) const {
help << " example: { setShardVersion : 'alleyinsider.foo' , version : 1 , configdb : '' } ";
}
2011-01-04 06:40:41 +01:00
2010-06-15 16:18:13 +02:00
virtual LockType locktype() const { return WRITE; } // TODO: figure out how to make this not need to lock
2011-01-04 06:40:41 +01:00
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
2010-07-23 03:00:57 +02:00
lastError.disableForCommand();
2010-06-15 16:18:13 +02:00
ShardedConnectionInfo* info = ShardedConnectionInfo::get( true );
bool authoritative = cmdObj.getBoolField( "authoritative" );
string configdb = cmdObj["configdb"].valuestrsafe();
2011-01-04 06:40:41 +01:00
{
// configdb checking
if ( configdb.size() == 0 ) {
2010-06-15 16:18:13 +02:00
errmsg = "no configdb";
return false;
}
2011-01-04 06:40:41 +01:00
if ( shardingState.enabled() ) {
if ( configdb != shardingState.getConfigServer() ) {
2010-06-15 16:18:13 +02:00
errmsg = "specified a different configdb!";
return false;
}
}
else {
2011-01-04 06:40:41 +01:00
if ( ! authoritative ) {
2010-06-15 16:18:13 +02:00
result.appendBool( "need_authoritative" , true );
errmsg = "first setShardVersion";
return false;
}
shardingState.enable( configdb );
configServer.init( configdb );
2010-06-15 16:18:13 +02:00
}
}
2011-01-04 06:40:41 +01:00
if ( cmdObj["shard"].type() == String ) {
2010-06-15 16:49:47 +02:00
shardingState.gotShardName( cmdObj["shard"].String() );
shardingState.gotShardHost( cmdObj["shardHost"].String() );
}
2011-01-04 06:40:41 +01:00
{
// setting up ids
if ( cmdObj["serverID"].type() != jstOID ) {
2010-06-15 16:18:13 +02:00
// TODO: fix this
//errmsg = "need serverID to be an OID";
//return 0;
}
else {
OID clientId = cmdObj["serverID"].__oid();
2011-01-04 06:40:41 +01:00
if ( ! info->hasID() ) {
2010-06-15 16:18:13 +02:00
info->setID( clientId );
}
2011-01-04 06:40:41 +01:00
else if ( clientId != info->getID() ) {
2010-06-15 16:18:13 +02:00
errmsg = "server id has changed!";
return 0;
}
}
}
2010-06-15 16:18:13 +02:00
unsigned long long version = extractVersion( cmdObj["version"] , errmsg );
2010-06-29 23:56:20 +02:00
2011-01-04 06:40:41 +01:00
if ( errmsg.size() ) {
2010-06-15 16:18:13 +02:00
return false;
}
2011-01-04 06:40:41 +01:00
2010-06-15 16:18:13 +02:00
string ns = cmdObj["setShardVersion"].valuestrsafe();
2011-01-04 06:40:41 +01:00
if ( ns.size() == 0 ) {
2010-06-15 16:18:13 +02:00
errmsg = "need to speciy fully namespace";
return false;
}
2011-01-04 06:40:41 +01:00
2010-11-18 16:58:31 +01:00
const ConfigVersion oldVersion = info->getVersion(ns);
const ConfigVersion globalVersion = shardingState.getVersion(ns);
2011-01-04 06:40:41 +01:00
if ( oldVersion > 0 && globalVersion == 0 ) {
// this had been reset
2010-11-18 16:58:31 +01:00
info->setVersion( ns , 0 );
}
2011-01-04 06:40:41 +01:00
if ( version == 0 && globalVersion == 0 ) {
2010-06-15 16:18:13 +02:00
// this connection is cleaning itself
2010-11-18 16:58:31 +01:00
info->setVersion( ns , 0 );
return true;
2010-06-15 16:18:13 +02:00
}
2011-01-04 06:40:41 +01:00
if ( version == 0 && globalVersion > 0 ) {
if ( ! authoritative ) {
2010-06-15 16:18:13 +02:00
result.appendBool( "need_authoritative" , true );
2010-12-17 05:21:02 +01:00
result.append( "ns" , ns );
2010-06-15 16:18:13 +02:00
result.appendTimestamp( "globalVersion" , globalVersion );
result.appendTimestamp( "oldVersion" , oldVersion );
errmsg = "dropping needs to be authoritative";
2010-11-18 16:58:31 +01:00
return false;
2010-06-15 16:18:13 +02:00
}
log() << "wiping data for: " << ns << endl;
result.appendTimestamp( "beforeDrop" , globalVersion );
// only setting global version on purpose
// need clients to re-find meta-data
shardingState.resetVersion( ns );
2010-11-18 16:58:31 +01:00
info->setVersion( ns , 0 );
return true;
2010-06-15 16:18:13 +02:00
}
2011-01-04 06:40:41 +01:00
if ( version < oldVersion ) {
2010-12-17 05:21:02 +01:00
errmsg = "you already have a newer version of collection '" + ns + "'";
result.append( "ns" , ns );
2010-06-15 16:18:13 +02:00
result.appendTimestamp( "oldVersion" , oldVersion );
result.appendTimestamp( "newVersion" , version );
result.appendTimestamp( "globalVersion" , globalVersion );
2010-06-15 16:18:13 +02:00
return false;
}
2011-01-04 06:40:41 +01:00
if ( version < globalVersion ) {
while ( shardingState.inCriticalMigrateSection() ) {
dbtemprelease r;
sleepmillis(2);
2011-01-14 22:44:04 +01:00
OCCASIONALLY log() << "waiting till out of critical section" << endl;
}
2010-12-17 05:21:02 +01:00
errmsg = "going to older version for global for collection '" + ns + "'";
result.append( "ns" , ns );
2010-06-29 23:56:20 +02:00
result.appendTimestamp( "version" , version );
result.appendTimestamp( "globalVersion" , globalVersion );
2010-06-15 16:18:13 +02:00
return false;
}
2011-01-04 06:40:41 +01:00
if ( globalVersion == 0 && ! cmdObj.getBoolField( "authoritative" ) ) {
2010-06-15 16:18:13 +02:00
// need authoritative for first look
result.append( "ns" , ns );
2010-12-17 05:21:02 +01:00
result.appendBool( "need_authoritative" , true );
errmsg = "first time for collection '" + ns + "'";
2010-06-15 16:18:13 +02:00
return false;
}
{
dbtemprelease unlock;
ShardChunkVersion currVersion = version;
2011-01-04 06:40:41 +01:00
if ( ! shardingState.trySetVersion( ns , currVersion ) ) {
2010-12-17 06:22:10 +01:00
errmsg = str::stream() << "client version differs from config's for colleciton '" << ns << "'";
2010-12-17 05:21:02 +01:00
result.append( "ns" , ns );
result.appendTimestamp( "version" , version );
result.appendTimestamp( "globalVersion" , currVersion );
return false;
}
}
info->setVersion( ns , version );
result.appendTimestamp( "oldVersion" , oldVersion );
result.append( "ok" , 1 );
2010-11-18 16:58:31 +01:00
return true;
2010-06-15 16:18:13 +02:00
}
2011-01-04 06:40:41 +01:00
} setShardVersionCmd;
2011-01-04 06:40:41 +01:00
2010-06-15 16:18:13 +02:00
class GetShardVersion : public MongodShardCommand {
public:
2011-01-04 06:40:41 +01:00
GetShardVersion() : MongodShardCommand("getShardVersion") {}
2010-06-15 16:18:13 +02:00
virtual void help( stringstream& help ) const {
help << " example: { getShardVersion : 'alleyinsider.foo' } ";
}
2011-01-04 06:40:41 +01:00
virtual LockType locktype() const { return NONE; }
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
2010-06-15 16:18:13 +02:00
string ns = cmdObj["getShardVersion"].valuestrsafe();
2011-01-04 06:40:41 +01:00
if ( ns.size() == 0 ) {
2010-06-15 16:18:13 +02:00
errmsg = "need to speciy fully namespace";
return false;
}
2011-01-04 06:40:41 +01:00
2010-06-15 16:18:13 +02:00
result.append( "configServer" , shardingState.getConfigServer() );
result.appendTimestamp( "global" , shardingState.getVersion(ns) );
2011-01-04 06:40:41 +01:00
2010-06-15 16:18:13 +02:00
ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );
if ( info )
result.appendTimestamp( "mine" , info->getVersion(ns) );
2011-01-04 06:40:41 +01:00
else
2010-06-15 16:18:13 +02:00
result.appendTimestamp( "mine" , 0 );
2011-01-04 06:40:41 +01:00
2010-06-15 16:18:13 +02:00
return true;
}
2011-01-04 06:40:41 +01:00
2010-06-15 16:18:13 +02:00
} getShardVersion;
2010-06-15 16:49:47 +02:00
class ShardingStateCmd : public MongodShardCommand {
public:
2011-01-04 06:40:41 +01:00
ShardingStateCmd() : MongodShardCommand( "shardingState" ) {}
2010-06-15 16:49:47 +02:00
virtual LockType locktype() const { return WRITE; } // TODO: figure out how to make this not need to lock
2011-01-04 06:40:41 +01:00
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
2010-06-15 16:49:47 +02:00
shardingState.appendInfo( result );
return true;
}
2011-01-04 06:40:41 +01:00
2010-06-15 16:49:47 +02:00
} shardingStateCmd;
2010-06-15 16:18:13 +02:00
/**
* @ return true if not in sharded mode
or if version for this client is ok
*/
2011-01-04 06:40:41 +01:00
bool shardVersionOk( const string& ns , bool isWriteOp , string& errmsg ) {
2010-06-15 16:18:13 +02:00
if ( ! shardingState.enabled() )
return true;
2010-06-15 16:18:13 +02:00
ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );
2011-01-04 06:40:41 +01:00
if ( ! info ) {
2010-06-15 16:18:13 +02:00
// this means the client has nothing sharded
// so this allows direct connections to do whatever they want
// which i think is the correct behavior
return true;
}
2011-01-04 06:40:41 +01:00
if ( info->inForceVersionOkMode() ) {
return true;
}
2010-06-15 16:18:13 +02:00
// TODO
// all collections at some point, be sharded or not, will have a version (and a ShardChunkManager)
// for now, we remove the sharding state of dropped collection
// so delayed request may come in. This has to be fixed.
ConfigVersion clientVersion = info->getVersion(ns);
2011-01-04 06:40:41 +01:00
ConfigVersion version;
if ( ! shardingState.hasVersion( ns , version ) && clientVersion == 0 ) {
2010-06-15 16:18:13 +02:00
return true;
}
2010-06-15 16:18:13 +02:00
2011-01-04 06:40:41 +01:00
if ( version == 0 && clientVersion > 0 ) {
2010-06-15 16:18:13 +02:00
stringstream ss;
ss << "collection was dropped or this shard no longer valied version: " << version << " clientVersion: " << clientVersion;
2010-06-15 16:18:13 +02:00
errmsg = ss.str();
return false;
}
2011-01-04 06:40:41 +01:00
2010-06-15 16:18:13 +02:00
if ( clientVersion >= version )
return true;
2011-01-04 06:40:41 +01:00
if ( clientVersion == 0 ) {
2010-07-16 20:32:37 +02:00
stringstream ss;
ss << "client in sharded mode, but doesn't have version set for this collection: " << ns << " myVersion: " << version;
errmsg = ss.str();
2010-06-15 16:18:13 +02:00
return false;
}
2011-01-04 06:40:41 +01:00
if ( isWriteOp && version.majorVersion() == clientVersion.majorVersion() ) {
// this means there was just a split
// since on a split w/o a migrate this server is ok
// going to accept write
return true;
}
2010-07-16 18:46:47 +02:00
stringstream ss;
ss << "your version is too old ns: " + ns << " global: " << version << " client: " << clientVersion;
errmsg = ss.str();
2010-06-15 16:18:13 +02:00
return false;
}
}