mirror of
https://github.com/mongodb/mongo.git
synced 2024-12-01 01:21:03 +01:00
SERVER-17638 Catalog manager loads database metadata
This change switches the loading of the database metadata (DBConfig) to happen through the catalog manager instead of calling the config server directly.
This commit is contained in:
parent
25476fbd6a
commit
4ba2435eaf
@ -43,7 +43,9 @@ for( var i = 0; i < 2; i++ ){
|
||||
|
||||
// Make sure we get a transport error, and not a no-primary error
|
||||
// Unfortunately e gets stringified so we have to test this way
|
||||
assert( e.message.indexOf( "10276" ) >= 0 || e.message.indexOf( "socket" ) >= 0 )
|
||||
assert(e.message.indexOf("10276") >= 0 || // Transport error
|
||||
e.message.indexOf("13328") >= 0 || // Connect error
|
||||
e.message.indexOf("socket") >= 0 )
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,10 +1,9 @@
|
||||
// sync2.js
|
||||
|
||||
s = new ShardingTest( "sync2" , 3 , 50 , 2 , { sync : true } );
|
||||
|
||||
var s = new ShardingTest( "sync2" , 3 , 50 , 2 , { sync : true } );
|
||||
s.stopBalancer()
|
||||
|
||||
s2 = s._mongos[1];
|
||||
var s2 = s._mongos[1];
|
||||
|
||||
s.adminCommand( { enablesharding : "test" } );
|
||||
s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
|
||||
@ -22,14 +21,18 @@ s.getDB( "test" ).foo.insert( { num : 7 } );
|
||||
assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal A" );
|
||||
assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other A" );
|
||||
|
||||
s.adminCommand( { split : "test.foo" , middle : { num : 4 } } );
|
||||
s.adminCommand({ movechunk: "test.foo", find: { num: 3 },
|
||||
to: s.getFirstOther(s.getServer("test" )).name, _waitForDelete: true });
|
||||
s.adminCommand({ split : "test.foo" , middle : { num : 4 } });
|
||||
s.adminCommand({ movechunk: "test.foo",
|
||||
find: { num: 3 },
|
||||
to: s.getFirstOther(s.getServer("test" )).name,
|
||||
_waitForDelete: true });
|
||||
|
||||
assert( s._connections[0].getDB( "test" ).foo.find().toArray().length > 0 , "shard 0 request" );
|
||||
assert( s._connections[1].getDB( "test" ).foo.find().toArray().length > 0 , "shard 1 request" );
|
||||
assert.eq( 7 , s._connections[0].getDB( "test" ).foo.find().toArray().length +
|
||||
s._connections[1].getDB( "test" ).foo.find().toArray().length , "combined shards" );
|
||||
assert.eq(7,
|
||||
s._connections[0].getDB( "test" ).foo.find().toArray().length +
|
||||
s._connections[1].getDB( "test" ).foo.find().toArray().length,
|
||||
"combined shards");
|
||||
|
||||
assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B" );
|
||||
assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B" );
|
||||
@ -41,7 +44,9 @@ print( "* A" );
|
||||
|
||||
assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B 1" );
|
||||
assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B 2" );
|
||||
|
||||
print( "* B" );
|
||||
|
||||
assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B 3" );
|
||||
assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B 4" );
|
||||
|
||||
@ -51,15 +56,16 @@ for ( var i=0; i<10; i++ ){
|
||||
}
|
||||
|
||||
assert.eq( 0 , s.config.big.find().itcount() , "C1" );
|
||||
|
||||
for ( i=0; i<50; i++ ){
|
||||
s.config.big.insert( { _id : i } );
|
||||
}
|
||||
|
||||
assert.eq( 50 , s.config.big.find().itcount() , "C2" );
|
||||
assert.eq( 50 , s.config.big.find().count() , "C3" );
|
||||
assert.eq( 50 , s.config.big.find().batchSize(5).itcount() , "C4" );
|
||||
|
||||
|
||||
hashes = []
|
||||
var hashes = []
|
||||
|
||||
for ( i=0; i<3; i++ ){
|
||||
print( i );
|
||||
@ -69,34 +75,39 @@ for ( i=0; i<3; i++ ){
|
||||
|
||||
printjson( hashes );
|
||||
|
||||
for ( i=1; i<hashes.length; i++ ){
|
||||
if ( hashes[0].md5 == hashes[i].md5 )
|
||||
continue;
|
||||
|
||||
assert.eq( hashes[0].numCollections , hashes[i].numCollections , "num collections" );
|
||||
|
||||
var bad = false;
|
||||
var hashMismatch = false;
|
||||
|
||||
for ( var k in hashes[0].collections ){
|
||||
if ( hashes[0].collections[k] ==
|
||||
hashes[i].collections[k] )
|
||||
for (i = 1; i < hashes.length; i++) {
|
||||
if (hashes[0].md5 == hashes[i].md5) {
|
||||
continue;
|
||||
}
|
||||
|
||||
assert.eq(hashes[0].numCollections , hashes[i].numCollections , "num collections does not match");
|
||||
|
||||
for ( var k in hashes[0].collections ) {
|
||||
if (hashes[0].collections[k] == hashes[i].collections[k]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if ( k == "mongos" || k == "changelog" || k == "locks" || k == "lockpings" )
|
||||
if (k == "mongos" || k == "changelog" || k == "locks" || k == "lockpings") {
|
||||
continue;
|
||||
}
|
||||
|
||||
bad = true;
|
||||
print( "collection " + k + " is different" );
|
||||
print("collection " + k + " is different");
|
||||
hashMismatch = true;
|
||||
|
||||
print( "----" );
|
||||
s._connections[0].getDB( "config" ).getCollection( k ).find().sort( { _id : 1 } ).forEach( printjsononeline );
|
||||
|
||||
print( "----" );
|
||||
|
||||
s._connections[i].getDB( "config" ).getCollection( k ).find().sort( { _id : 1 } ).forEach( printjsononeline );
|
||||
print( "----" );
|
||||
}
|
||||
|
||||
if ( bad )
|
||||
if (hashMismatch) {
|
||||
throw Error("hashes different");
|
||||
}
|
||||
}
|
||||
|
||||
s.stop();
|
||||
|
@ -231,6 +231,8 @@ namespace {
|
||||
}
|
||||
|
||||
Status CatalogManagerLegacy::updateDatabase(const std::string& dbName, const DatabaseType& db) {
|
||||
fassert(28616, db.validate());
|
||||
|
||||
BatchedCommandResponse response;
|
||||
Status status = update(DatabaseType::ConfigNS,
|
||||
BSON(DatabaseType::name(dbName)),
|
||||
@ -257,14 +259,7 @@ namespace {
|
||||
stream() << "database " << dbName << " not found.");
|
||||
}
|
||||
|
||||
DatabaseType db;
|
||||
string errmsg;
|
||||
|
||||
if (!db.parseBSON(dbObj, &errmsg)) {
|
||||
return Status(ErrorCodes::InvalidBSON, errmsg);
|
||||
}
|
||||
|
||||
return StatusWith<DatabaseType>(db);
|
||||
return DatabaseType::fromBSON(dbObj);
|
||||
}
|
||||
|
||||
void CatalogManagerLegacy::writeConfigServerDirect(const BatchedCommandRequest& request,
|
||||
@ -312,16 +307,15 @@ namespace {
|
||||
ScopedDbConnection conn(_configServerConnectionString, 30);
|
||||
|
||||
BSONObjBuilder b;
|
||||
b.appendRegex("_id", (string)"^" + pcrecpp::RE::QuoteMeta(dbName) + "$", "i");
|
||||
b.appendRegex(DatabaseType::name(),
|
||||
(string)"^" + pcrecpp::RE::QuoteMeta(dbName) + "$", "i");
|
||||
|
||||
BSONObj dbObj = conn->findOne(DatabaseType::ConfigNS, b.obj());
|
||||
conn.done();
|
||||
|
||||
// If our name is exactly the same as the name we want, try loading
|
||||
// the database again.
|
||||
if (!dbObj.isEmpty() &&
|
||||
dbObj[DatabaseType::name()].String() == dbName) {
|
||||
|
||||
if (!dbObj.isEmpty() && dbObj[DatabaseType::name()].String() == dbName) {
|
||||
return Status(ErrorCodes::NamespaceExists,
|
||||
str::stream() << "database " << dbName << " already exists");
|
||||
}
|
||||
|
@ -538,45 +538,26 @@ namespace mongo {
|
||||
_save();
|
||||
}
|
||||
|
||||
void DBConfig::serialize(BSONObjBuilder& to) {
|
||||
to.append("_id", _name);
|
||||
to.appendBool(DatabaseType::DEPRECATED_partitioned(), _shardingEnabled );
|
||||
to.append(DatabaseType::primary(), _primary.getName() );
|
||||
}
|
||||
|
||||
void DBConfig::unserialize(const BSONObj& from) {
|
||||
LOG(1) << "DBConfig unserialize: " << _name << " " << from << endl;
|
||||
verify( _name == from[DatabaseType::name()].String() );
|
||||
|
||||
_shardingEnabled = from.getBoolField(DatabaseType::DEPRECATED_partitioned().c_str());
|
||||
_primary.reset( from.getStringField(DatabaseType::primary().c_str()));
|
||||
|
||||
// In the 1.5.x series, we used to have collection metadata nested in the database entry. The 1.6.x series
|
||||
// had migration code that ported that info to where it belongs now: the 'collections' collection. We now
|
||||
// just assert that we're not migrating from a 1.5.x directly into a 1.7.x without first converting.
|
||||
BSONObj sharded = from.getObjectField(DatabaseType::DEPRECATED_sharded().c_str());
|
||||
if ( ! sharded.isEmpty() )
|
||||
uasserted( 13509 , "can't migrate from 1.5.x release to the current one; need to upgrade to 1.6.x first");
|
||||
}
|
||||
|
||||
bool DBConfig::load() {
|
||||
boost::lock_guard<boost::mutex> lk( _lock );
|
||||
return _load();
|
||||
}
|
||||
|
||||
bool DBConfig::_load() {
|
||||
ScopedDbConnection conn(configServer.modelServer(), 30.0);
|
||||
|
||||
BSONObj dbObj = conn->findOne( DatabaseType::ConfigNS,
|
||||
BSON( DatabaseType::name( _name ) ) );
|
||||
|
||||
if ( dbObj.isEmpty() ) {
|
||||
conn.done();
|
||||
StatusWith<DatabaseType> status = grid.catalogManager()->getDatabase(_name);
|
||||
if (status == ErrorCodes::DatabaseNotFound) {
|
||||
return false;
|
||||
}
|
||||
|
||||
unserialize( dbObj );
|
||||
// All other errors are connectivity, etc so throw an exception.
|
||||
uassertStatusOK(status.getStatus());
|
||||
|
||||
DatabaseType dbt = status.getValue();
|
||||
invariant(_name == dbt.getName());
|
||||
_primary.reset(dbt.getPrimary());
|
||||
_shardingEnabled = dbt.getSharded();
|
||||
|
||||
// Load all collections
|
||||
BSONObjBuilder b;
|
||||
b.appendRegex(CollectionType::ns(),
|
||||
(string)"^" + pcrecpp::RE::QuoteMeta( _name ) + "\\." );
|
||||
@ -584,6 +565,7 @@ namespace mongo {
|
||||
int numCollsErased = 0;
|
||||
int numCollsSharded = 0;
|
||||
|
||||
ScopedDbConnection conn(configServer.modelServer(), 30.0);
|
||||
auto_ptr<DBClientCursor> cursor = conn->query(CollectionType::ConfigNS, b.obj());
|
||||
verify( cursor.get() );
|
||||
while ( cursor->more() ) {
|
||||
@ -619,24 +601,12 @@ namespace mongo {
|
||||
|
||||
void DBConfig::_save(bool db, bool coll) {
|
||||
if (db) {
|
||||
BSONObj n;
|
||||
{
|
||||
BSONObjBuilder b;
|
||||
serialize(b);
|
||||
n = b.obj();
|
||||
}
|
||||
DatabaseType dbt;
|
||||
dbt.setName(_name);
|
||||
dbt.setPrimary(_primary.getName());
|
||||
dbt.setSharded(_shardingEnabled);
|
||||
|
||||
BatchedCommandResponse response;
|
||||
Status result = grid.catalogManager()->update(DatabaseType::ConfigNS,
|
||||
BSON(DatabaseType::name(_name)),
|
||||
n,
|
||||
true, // upsert
|
||||
false, // multi
|
||||
&response);
|
||||
if (!result.isOK()) {
|
||||
uasserted(13396,
|
||||
str::stream() << "DBConfig save failed: " << response.toBSON());
|
||||
}
|
||||
uassertStatusOK(grid.catalogManager()->updateDatabase(_name, dbt));
|
||||
}
|
||||
|
||||
if (coll) {
|
||||
|
@ -124,10 +124,6 @@ namespace mongo {
|
||||
// model stuff
|
||||
|
||||
// lockless loading
|
||||
void serialize(BSONObjBuilder& to);
|
||||
|
||||
void unserialize(const BSONObj& from);
|
||||
|
||||
void getAllShards(std::set<Shard>& shards) const;
|
||||
|
||||
void getAllShardedCollections(std::set<std::string>& namespaces) const;
|
||||
|
@ -25,123 +25,105 @@
|
||||
* delete this exception statement from all source files in the program,
|
||||
* then also delete it in the license file.
|
||||
*/
|
||||
|
||||
#include "mongo/platform/basic.h"
|
||||
|
||||
#include "mongo/s/type_database.h"
|
||||
|
||||
#include "mongo/db/field_parser.h"
|
||||
#include "mongo/util/mongoutils/str.h"
|
||||
#include "mongo/base/status_with.h"
|
||||
#include "mongo/bson/bsonobj.h"
|
||||
#include "mongo/bson/bsonobjbuilder.h"
|
||||
#include "mongo/bson/util/bson_extract.h"
|
||||
#include "mongo/util/assert_util.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
using std::string;
|
||||
|
||||
using mongoutils::str::stream;
|
||||
|
||||
const std::string DatabaseType::ConfigNS = "config.databases";
|
||||
|
||||
const BSONField<std::string> DatabaseType::name("_id");
|
||||
const BSONField<std::string> DatabaseType::primary("primary");
|
||||
const BSONField<bool> DatabaseType::draining("draining", false);
|
||||
const BSONField<bool> DatabaseType::DEPRECATED_partitioned("partitioned");
|
||||
const BSONField<std::string> DatabaseType::DEPRECATED_name("name");
|
||||
const BSONField<bool> DatabaseType::DEPRECATED_sharded("sharded");
|
||||
const BSONField<bool> DatabaseType::sharded("partitioned");
|
||||
|
||||
|
||||
DatabaseType::DatabaseType() {
|
||||
clear();
|
||||
}
|
||||
|
||||
DatabaseType::~DatabaseType() {
|
||||
StatusWith<DatabaseType> DatabaseType::fromBSON(const BSONObj& source) {
|
||||
DatabaseType dbt;
|
||||
|
||||
{
|
||||
std::string dbtName;
|
||||
Status status = bsonExtractStringField(source, name.name(), &dbtName);
|
||||
if (!status.isOK()) return status;
|
||||
|
||||
dbt._name = dbtName;
|
||||
}
|
||||
|
||||
{
|
||||
std::string dbtPrimary;
|
||||
Status status = bsonExtractStringField(source, primary.name(), &dbtPrimary);
|
||||
if (!status.isOK()) return status;
|
||||
|
||||
dbt._primary = dbtPrimary;
|
||||
}
|
||||
|
||||
{
|
||||
bool dbtSharded;
|
||||
Status status = bsonExtractBooleanFieldWithDefault(source, sharded.name(), false, &dbtSharded);
|
||||
if (!status.isOK()) return status;
|
||||
|
||||
dbt._sharded = dbtSharded;
|
||||
}
|
||||
|
||||
return StatusWith<DatabaseType>(dbt);
|
||||
}
|
||||
|
||||
bool DatabaseType::isValid(std::string* errMsg) const {
|
||||
std::string dummy;
|
||||
if (errMsg == NULL) {
|
||||
errMsg = &dummy;
|
||||
Status DatabaseType::validate() const {
|
||||
if (!_name.is_initialized() || _name->empty()) {
|
||||
return Status(ErrorCodes::NoSuchKey, "missing name");
|
||||
}
|
||||
|
||||
// All the mandatory fields must be present.
|
||||
if (!_isNameSet) {
|
||||
*errMsg = stream() << "missing " << name.name() << " field";
|
||||
return false;
|
||||
}
|
||||
if (!_isPrimarySet) {
|
||||
*errMsg = stream() << "missing " << primary.name() << " field";
|
||||
return false;
|
||||
if (!_primary.is_initialized() || _primary->empty()) {
|
||||
return Status(ErrorCodes::NoSuchKey, "missing primary");
|
||||
}
|
||||
|
||||
return true;
|
||||
if (!_sharded.is_initialized()) {
|
||||
return Status(ErrorCodes::NoSuchKey, "missing sharded");
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
BSONObj DatabaseType::toBSON() const {
|
||||
BSONObjBuilder builder;
|
||||
|
||||
if (_isNameSet) {
|
||||
builder.append(name(), _name);
|
||||
}
|
||||
|
||||
if (_isPrimarySet) {
|
||||
builder.append(primary(), _primary);
|
||||
}
|
||||
|
||||
builder.appendBool(DEPRECATED_partitioned(), !_isPrimarySet);
|
||||
|
||||
if (_isDrainingSet) {
|
||||
builder.append(draining(), _draining);
|
||||
}
|
||||
builder.append(name.name(), _name.get_value_or(""));
|
||||
builder.append(primary.name(), _primary.get_value_or(""));
|
||||
builder.append(sharded.name(), _sharded.get_value_or(false));
|
||||
|
||||
return builder.obj();
|
||||
}
|
||||
|
||||
bool DatabaseType::parseBSON(const BSONObj& source, string* errMsg) {
|
||||
clear();
|
||||
|
||||
std::string dummy;
|
||||
if (!errMsg) errMsg = &dummy;
|
||||
|
||||
FieldParser::FieldState fieldState;
|
||||
fieldState = FieldParser::extract(source, name, &_name, errMsg);
|
||||
if (fieldState == FieldParser::FIELD_INVALID) return false;
|
||||
_isNameSet = fieldState == FieldParser::FIELD_SET;
|
||||
|
||||
fieldState = FieldParser::extract(source, primary, &_primary, errMsg);
|
||||
if (fieldState == FieldParser::FIELD_INVALID) return false;
|
||||
_isPrimarySet = fieldState == FieldParser::FIELD_SET;
|
||||
|
||||
fieldState = FieldParser::extract(source, draining, &_draining, errMsg);
|
||||
if (fieldState == FieldParser::FIELD_INVALID) return false;
|
||||
_isDrainingSet = fieldState == FieldParser::FIELD_SET;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void DatabaseType::clear() {
|
||||
|
||||
_name.clear();
|
||||
_isNameSet = false;
|
||||
|
||||
_primary.clear();
|
||||
_isPrimarySet = false;
|
||||
|
||||
_draining = false;
|
||||
_isDrainingSet = false;
|
||||
|
||||
}
|
||||
|
||||
void DatabaseType::cloneTo(DatabaseType* other) const {
|
||||
other->clear();
|
||||
|
||||
other->_name = _name;
|
||||
other->_isNameSet = _isNameSet;
|
||||
|
||||
other->_primary = _primary;
|
||||
other->_isPrimarySet = _isPrimarySet;
|
||||
|
||||
other->_draining = _draining;
|
||||
other->_isDrainingSet = _isDrainingSet;
|
||||
|
||||
_name.reset();
|
||||
_primary.reset();
|
||||
_sharded.reset();
|
||||
}
|
||||
|
||||
std::string DatabaseType::toString() const {
|
||||
return toBSON().toString();
|
||||
}
|
||||
|
||||
void DatabaseType::setName(const std::string& name) {
|
||||
invariant(!name.empty());
|
||||
_name = name;
|
||||
}
|
||||
|
||||
void DatabaseType::setPrimary(const std::string& primary) {
|
||||
invariant(!primary.empty());
|
||||
_primary = primary;
|
||||
}
|
||||
|
||||
} // namespace mongo
|
||||
|
@ -28,158 +28,81 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <boost/optional.hpp>
|
||||
#include <string>
|
||||
|
||||
#include "mongo/base/disallow_copying.h"
|
||||
#include "mongo/base/string_data.h"
|
||||
#include "mongo/db/jsobj.h"
|
||||
#include "mongo/bson/bson_field.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
class BSONObj;
|
||||
class Status;
|
||||
template<typename T> class StatusWith;
|
||||
|
||||
|
||||
/**
|
||||
* This class represents the layout and contents of documents contained in the
|
||||
* config.databases collection. All manipulation of documents coming from that
|
||||
* collection should be done with this class.
|
||||
*
|
||||
* Usage Example:
|
||||
*
|
||||
* // Contact the config. 'conn' has been obtained before.
|
||||
* DBClientBase* conn;
|
||||
* BSONObj query = QUERY(DatabaseType::exampleField("exampleFieldName"));
|
||||
* exampleDoc = conn->findOne(DatabaseType::ConfigNS, query);
|
||||
*
|
||||
* // Process the response.
|
||||
* DatabaseType exampleType;
|
||||
* std::string errMsg;
|
||||
* if (!exampleType.parseBSON(exampleDoc, &errMsg) || !exampleType.isValid(&errMsg)) {
|
||||
* // Can't use 'exampleType'. Take action.
|
||||
* }
|
||||
* // use 'exampleType'
|
||||
*
|
||||
* This class represents the layout and contents of documents contained in the config.databases
|
||||
* collection. All manipulation of documents coming from that collection should be done with
|
||||
* this class.
|
||||
*/
|
||||
class DatabaseType {
|
||||
public:
|
||||
|
||||
//
|
||||
// schema declarations
|
||||
//
|
||||
|
||||
// Name of the databases collection in the config server.
|
||||
static const std::string ConfigNS;
|
||||
|
||||
// Field names and types in the databases collection type.
|
||||
static const BSONField<std::string> name;
|
||||
static const BSONField<std::string> primary;
|
||||
static const BSONField<bool> draining;
|
||||
static const BSONField<bool> DEPRECATED_partitioned;
|
||||
static const BSONField<std::string> DEPRECATED_name;
|
||||
static const BSONField<bool> DEPRECATED_sharded;
|
||||
static const BSONField<bool> sharded;
|
||||
|
||||
//
|
||||
// databases type methods
|
||||
//
|
||||
|
||||
DatabaseType();
|
||||
~DatabaseType();
|
||||
|
||||
/**
|
||||
* Returns true if all the mandatory fields are present and have valid
|
||||
* representations. Otherwise returns false and fills in the optional 'errMsg' string.
|
||||
* Constructs a new DatabaseType object from BSON. Also does validation of the contents.
|
||||
*/
|
||||
bool isValid(std::string* errMsg) const;
|
||||
static StatusWith<DatabaseType> fromBSON(const BSONObj& source);
|
||||
|
||||
/**
|
||||
* Returns OK if all fields have been set. Otherwise returns NoSuchKey and information
|
||||
* about what is the first field which is missing.
|
||||
*/
|
||||
Status validate() const;
|
||||
|
||||
/**
|
||||
* Returns the BSON representation of the entry.
|
||||
*/
|
||||
BSONObj toBSON() const;
|
||||
|
||||
/**
|
||||
* Clears and populates the internal state using the 'source' BSON object if the
|
||||
* latter contains valid values. Otherwise sets errMsg and returns false.
|
||||
*/
|
||||
bool parseBSON(const BSONObj& source, std::string* errMsg);
|
||||
|
||||
/**
|
||||
* Clears the internal state.
|
||||
*/
|
||||
void clear();
|
||||
|
||||
/**
|
||||
* Copies all the fields present in 'this' to 'other'.
|
||||
*/
|
||||
void cloneTo(DatabaseType* other) const;
|
||||
|
||||
/**
|
||||
* Returns a std::string representation of the current internal state.
|
||||
*/
|
||||
std::string toString() const;
|
||||
|
||||
//
|
||||
// individual field accessors
|
||||
//
|
||||
const std::string& getName() const { return _name.get(); }
|
||||
void setName(const std::string& name);
|
||||
|
||||
// Mandatory Fields
|
||||
void setName(StringData name) {
|
||||
_name = name.toString();
|
||||
_isNameSet = true;
|
||||
}
|
||||
const std::string& getPrimary() const { return _primary.get(); }
|
||||
void setPrimary(const std::string& primary);
|
||||
|
||||
void unsetName() { _isNameSet = false; }
|
||||
|
||||
bool isNameSet() const { return _isNameSet; }
|
||||
|
||||
// Calling get*() methods when the member is not set results in undefined behavior
|
||||
const std::string& getName() const {
|
||||
dassert(_isNameSet);
|
||||
return _name;
|
||||
}
|
||||
|
||||
void setPrimary(StringData primary) {
|
||||
_primary = primary.toString();
|
||||
_isPrimarySet = true;
|
||||
}
|
||||
|
||||
void unsetPrimary() { _isPrimarySet = false; }
|
||||
|
||||
bool isPrimarySet() const { return _isPrimarySet; }
|
||||
|
||||
// Calling get*() methods when the member is not set results in undefined behavior
|
||||
const std::string& getPrimary() const {
|
||||
dassert(_isPrimarySet);
|
||||
return _primary;
|
||||
}
|
||||
|
||||
// Optional Fields
|
||||
void setDraining(bool draining) {
|
||||
_draining = draining;
|
||||
_isDrainingSet = true;
|
||||
}
|
||||
|
||||
void unsetDraining() { _isDrainingSet = false; }
|
||||
|
||||
bool isDrainingSet() const {
|
||||
return _isDrainingSet || draining.hasDefault();
|
||||
}
|
||||
|
||||
// Calling get*() methods when the member is not set and has no default results in undefined
|
||||
// behavior
|
||||
bool getDraining() const {
|
||||
if (_isDrainingSet) {
|
||||
return _draining;
|
||||
} else {
|
||||
dassert(draining.hasDefault());
|
||||
return draining.getDefault();
|
||||
}
|
||||
}
|
||||
bool getSharded() const { return _sharded.get(); }
|
||||
void setSharded(bool sharded) { _sharded = sharded; }
|
||||
|
||||
private:
|
||||
// Convention: (M)andatory, (O)ptional, (S)pecial rule.
|
||||
std::string _name; // (M) database name
|
||||
bool _isNameSet;
|
||||
std::string _primary; // (M) primary shard for the database
|
||||
bool _isPrimarySet;
|
||||
bool _draining; // (O) is this database about to be deleted?
|
||||
bool _isDrainingSet;
|
||||
// Requred database name
|
||||
boost::optional<std::string> _name;
|
||||
|
||||
// Required primary shard (must be set even if the database is sharded, because there
|
||||
// might be collections, which are unsharded).
|
||||
boost::optional<std::string> _primary;
|
||||
|
||||
// Required whether sharding is enabled for this database. Even though this field is of
|
||||
// type optional, it is only used as an indicator that the value was explicitly set.
|
||||
boost::optional<bool> _sharded;
|
||||
};
|
||||
|
||||
} // namespace mongo
|
||||
|
@ -28,113 +28,43 @@
|
||||
|
||||
#include "mongo/platform/basic.h"
|
||||
|
||||
#include "mongo/bson/oid.h"
|
||||
#include "mongo/db/field_parser.h"
|
||||
#include "mongo/base/status_with.h"
|
||||
#include "mongo/db/jsobj.h"
|
||||
#include "mongo/s/type_database.h"
|
||||
#include "mongo/unittest/unittest.h"
|
||||
|
||||
namespace {
|
||||
|
||||
using mongo::BSONObj;
|
||||
using mongo::DatabaseType;
|
||||
using mongo::FieldParser;
|
||||
using namespace mongo;
|
||||
using std::string;
|
||||
|
||||
TEST(Validity, Empty) {
|
||||
DatabaseType db;
|
||||
BSONObj emptyObj = BSONObj();
|
||||
string errMsg;
|
||||
ASSERT_TRUE(db.parseBSON(emptyObj, &errMsg));
|
||||
ASSERT_FALSE(db.isValid(NULL));
|
||||
TEST(DatabaseType, Empty) {
|
||||
StatusWith<DatabaseType> status = DatabaseType::fromBSON(BSONObj());
|
||||
ASSERT_FALSE(status.isOK());
|
||||
}
|
||||
|
||||
TEST(Validity, BasicDatabase) {
|
||||
DatabaseType db;
|
||||
BSONObj obj = BSON(DatabaseType::name("mydb") <<
|
||||
DatabaseType::primary("shard"));
|
||||
string errMsg;
|
||||
ASSERT_TRUE(db.parseBSON(obj, &errMsg));
|
||||
ASSERT_TRUE(db.isValid(NULL));
|
||||
}
|
||||
TEST(DatabaseType, Basic) {
|
||||
StatusWith<DatabaseType> status = DatabaseType::fromBSON(
|
||||
BSON(DatabaseType::name("mydb") <<
|
||||
DatabaseType::primary("shard") <<
|
||||
DatabaseType::sharded(true)));
|
||||
ASSERT_TRUE(status.isOK());
|
||||
|
||||
TEST(Compatibility, PartitionedIsIrrelevant) {
|
||||
DatabaseType db;
|
||||
BSONObj obj = BSON(DatabaseType::name("mydb") <<
|
||||
DatabaseType::primary("shard") <<
|
||||
DatabaseType::DEPRECATED_partitioned(true));
|
||||
string errMsg;
|
||||
ASSERT_TRUE(db.parseBSON(obj, &errMsg));
|
||||
DatabaseType db = status.getValue();
|
||||
ASSERT_EQUALS(db.getName(), "mydb");
|
||||
ASSERT_EQUALS(db.getPrimary(), "shard");
|
||||
ASSERT_EQUALS(db.getDraining(), false);
|
||||
ASSERT_TRUE(db.getSharded());
|
||||
}
|
||||
|
||||
TEST(Validity, BadType) {
|
||||
DatabaseType db;
|
||||
BSONObj obj = BSON(DatabaseType::name() << 0);
|
||||
string errMsg;
|
||||
ASSERT((!db.parseBSON(obj, &errMsg)) && (errMsg != ""));
|
||||
TEST(DatabaseType, BadType) {
|
||||
StatusWith<DatabaseType> status = DatabaseType::fromBSON(BSON(DatabaseType::name() << 0));
|
||||
ASSERT_FALSE(status.isOK());
|
||||
}
|
||||
|
||||
TEST(Optionals, TestDefault) {
|
||||
DatabaseType dbNotDraining;
|
||||
BSONObj notDraining = BSON(DatabaseType::name("mydb") <<
|
||||
DatabaseType::primary("shard"));
|
||||
string errMsg;
|
||||
ASSERT_TRUE(dbNotDraining.parseBSON(notDraining, &errMsg));
|
||||
ASSERT_TRUE(dbNotDraining.isValid(NULL));
|
||||
ASSERT_TRUE(dbNotDraining.isDrainingSet());
|
||||
ASSERT_EQUALS(dbNotDraining.getDraining(), DatabaseType::draining.getDefault());
|
||||
}
|
||||
|
||||
TEST(Optionals, TestSet) {
|
||||
DatabaseType dbDraining;
|
||||
BSONObj draining = BSON(DatabaseType::name("mydb") <<
|
||||
DatabaseType::primary("shard") <<
|
||||
DatabaseType::draining(true));
|
||||
string errMsg;
|
||||
ASSERT_TRUE(dbDraining.parseBSON(draining, &errMsg));
|
||||
ASSERT_TRUE(dbDraining.isValid(NULL));
|
||||
ASSERT_TRUE(dbDraining.isDrainingSet());
|
||||
ASSERT_TRUE(dbDraining.getDraining());
|
||||
}
|
||||
|
||||
TEST(Optionals, TestNotSet) {
|
||||
DatabaseType dbNotDraining;
|
||||
dbNotDraining.setName("mydb");
|
||||
dbNotDraining.setPrimary("shard");
|
||||
ASSERT_TRUE(dbNotDraining.isValid(NULL));
|
||||
ASSERT_TRUE(dbNotDraining.isDrainingSet());
|
||||
ASSERT_EQUALS(dbNotDraining.getDraining(), DatabaseType::draining.getDefault());
|
||||
bool isDraining;
|
||||
BSONObj genObj;
|
||||
ASSERT_EQUALS( FieldParser::extract( genObj, DatabaseType::draining, &isDraining ),
|
||||
FieldParser::FIELD_DEFAULT );
|
||||
}
|
||||
|
||||
TEST(Optionals, RoundTripOptionalOff) {
|
||||
DatabaseType dbNotDraining;
|
||||
BSONObj notDraining = BSON(DatabaseType::name("mydb") <<
|
||||
DatabaseType::primary("shard"));
|
||||
ASSERT_TRUE(notDraining[DatabaseType::name()].ok());
|
||||
ASSERT_TRUE(notDraining[DatabaseType::primary()].ok());
|
||||
ASSERT_TRUE(notDraining[DatabaseType::draining()].eoo());
|
||||
|
||||
string errMsg;
|
||||
ASSERT_TRUE(dbNotDraining.parseBSON(notDraining, &errMsg));
|
||||
}
|
||||
|
||||
TEST(Optionals, RoundTripOptionalOn) {
|
||||
DatabaseType dbDraining;
|
||||
BSONObj draining = BSON(DatabaseType::name("mydb") <<
|
||||
DatabaseType::primary("shard") <<
|
||||
DatabaseType::draining(true));
|
||||
ASSERT_TRUE(draining[DatabaseType::name()].ok());
|
||||
ASSERT_TRUE(draining[DatabaseType::primary()].ok());
|
||||
ASSERT_TRUE(draining[DatabaseType::draining()].ok());
|
||||
|
||||
string errMsg;
|
||||
ASSERT_TRUE(dbDraining.parseBSON(draining, &errMsg));
|
||||
TEST(DatabaseType, MissingRequired) {
|
||||
StatusWith<DatabaseType> status = DatabaseType::fromBSON(
|
||||
BSON(DatabaseType::name("mydb")));
|
||||
ASSERT_FALSE(status.isOK());
|
||||
}
|
||||
|
||||
} // unnamed namespace
|
||||
|
Loading…
Reference in New Issue
Block a user