mirror of
https://github.com/mongodb/mongo.git
synced 2024-12-01 09:32:32 +01:00
Merge branch 'master' of github.com:mongodb/mongo
This commit is contained in:
commit
4c570bcada
@ -1248,7 +1248,7 @@ elif not onlyServer:
|
||||
shellEnv["CPPPATH"].remove( "/usr/64/include" )
|
||||
shellEnv["LIBPATH"].remove( "/usr/64/lib" )
|
||||
shellEnv.Append( CPPPATH=filterExists(["/sw/include" , "/opt/local/include"]) )
|
||||
shellEnv.Append( LIBPATH=filterExists(["/sw/lib/", "/opt/local/lib" , "/usr/lib"]) )
|
||||
shellEnv.Append( LIBPATH=filterExists(["/sw/lib/", "/opt/local/lib" , "/usr/lib", "/usr/local/lib" ]) )
|
||||
|
||||
l = shellEnv["LIBS"]
|
||||
|
||||
|
@ -261,7 +261,7 @@ namespace mongo {
|
||||
case Symbol:
|
||||
case mongo::String: {
|
||||
int x = valuestrsize();
|
||||
if ( x > 0 && valuestr()[x-1] == 0 )
|
||||
if ( x > 0 && x < BSONObjMaxSize && valuestr()[x-1] == 0 )
|
||||
return;
|
||||
StringBuilder buf;
|
||||
buf << "Invalid dbref/code/string/symbol size: " << x << " strnlen:" << mongo::strnlen( valuestr() , x );
|
||||
|
@ -27,6 +27,8 @@ namespace mongo {
|
||||
|
||||
typedef set< BSONElement, BSONElementCmpWithoutField > BSONElementSet;
|
||||
|
||||
const int BSONObjMaxSize = 32 * 1024 * 1024;
|
||||
|
||||
/**
|
||||
C++ representation of a "BSON" object -- that is, an extended JSON-style
|
||||
object in a binary representation.
|
||||
@ -67,6 +69,7 @@ namespace mongo {
|
||||
*/
|
||||
class BSONObj {
|
||||
public:
|
||||
|
||||
/** Construct a BSONObj from data in the proper format.
|
||||
@param ifree true if the BSONObj should free() the msgdata when
|
||||
it destructs.
|
||||
|
@ -5,28 +5,13 @@ import sys
|
||||
import re
|
||||
import utils
|
||||
|
||||
def getAllSourceFiles( arr=None , prefix="." ):
|
||||
if arr is None:
|
||||
arr = []
|
||||
|
||||
for x in os.listdir( prefix ):
|
||||
if x.startswith( "." ) or x.startswith( "pcre-" ) or x.startswith( "32bit" ) or x.startswith( "mongodb-" ) or x.startswith("debian") or x.startswith( "mongo-cxx-driver" ):
|
||||
continue
|
||||
full = prefix + "/" + x
|
||||
if os.path.isdir( full ) and not os.path.islink( full ):
|
||||
getAllSourceFiles( arr , full )
|
||||
else:
|
||||
if full.endswith( ".cpp" ) or full.endswith( ".h" ) or full.endswith( ".c" ):
|
||||
arr.append( full )
|
||||
|
||||
return arr
|
||||
|
||||
assertNames = [ "uassert" , "massert" ]
|
||||
|
||||
def assignErrorCodes():
|
||||
cur = 10000
|
||||
for root in assertNames:
|
||||
for x in getAllSourceFiles():
|
||||
for x in utils.getAllSourceFiles():
|
||||
print( x )
|
||||
didAnything = False
|
||||
fixed = ""
|
||||
@ -50,7 +35,7 @@ def readErrorCodes( callback ):
|
||||
ps = [ re.compile( "([um]asser(t|ted)) *\( *(\d+)" ) ,
|
||||
re.compile( "(User|Msg)Exceptio(n)\( *(\d+)" )
|
||||
]
|
||||
for x in getAllSourceFiles():
|
||||
for x in utils.getAllSourceFiles():
|
||||
lineNum = 1
|
||||
for line in open( x ):
|
||||
for p in ps:
|
||||
|
@ -152,7 +152,7 @@ class mongod(object):
|
||||
utils.ensureDir(dir_name)
|
||||
argv = [mongod_executable, "--port", str(self.port), "--dbpath", dir_name]
|
||||
if self.kwargs.get('small_oplog'):
|
||||
argv += ["--master", "--oplogSize", "10"]
|
||||
argv += ["--master", "--oplogSize", "100"]
|
||||
if self.slave:
|
||||
argv += ['--slave', '--source', 'localhost:' + str(srcport)]
|
||||
print "running " + " ".join(argv)
|
||||
@ -214,8 +214,9 @@ def check_db_hashes(master, slave):
|
||||
if not slave.slave:
|
||||
raise(Bug("slave instance doesn't have slave attribute set"))
|
||||
|
||||
print "waiting for slave to catch up, result:"
|
||||
print Connection(port=master.port).test.smokeWait.insert({}, w=2, wtimeout=120000)
|
||||
print "waiting for slave to catch up"
|
||||
Connection(port=master.port).test.smokeWait.insert({}, w=2, wtimeout=5*60*1000)
|
||||
print "caught up!"
|
||||
|
||||
# FIXME: maybe make this run dbhash on all databases?
|
||||
for mongod in [master, slave]:
|
||||
|
@ -5,6 +5,23 @@ import time
|
||||
import os
|
||||
# various utilities that are handy
|
||||
|
||||
def getAllSourceFiles( arr=None , prefix="." ):
|
||||
if arr is None:
|
||||
arr = []
|
||||
|
||||
for x in os.listdir( prefix ):
|
||||
if x.startswith( "." ) or x.startswith( "pcre-" ) or x.startswith( "32bit" ) or x.startswith( "mongodb-" ) or x.startswith("debian") or x.startswith( "mongo-cxx-driver" ):
|
||||
continue
|
||||
full = prefix + "/" + x
|
||||
if os.path.isdir( full ) and not os.path.islink( full ):
|
||||
getAllSourceFiles( arr , full )
|
||||
else:
|
||||
if full.endswith( ".cpp" ) or full.endswith( ".h" ) or full.endswith( ".c" ):
|
||||
arr.append( full )
|
||||
|
||||
return arr
|
||||
|
||||
|
||||
def getGitBranch():
|
||||
if not os.path.exists( ".git" ):
|
||||
return None
|
||||
|
@ -43,7 +43,7 @@ namespace mongo {
|
||||
("quiet", "quieter output")
|
||||
("port", po::value<int>(&cmdLine.port), "specify port number")
|
||||
("bind_ip", po::value<string>(&cmdLine.bind_ip), "comma separated list of ip addresses to listen on - all local ips by default")
|
||||
("logpath", po::value<string>() , "file to send all output to instead of stdout" )
|
||||
("logpath", po::value<string>() , "log file to send write to instead of stdout - has to be a file, not directory" )
|
||||
("logappend" , "append to logpath instead of over-writing" )
|
||||
("pidfilepath", po::value<string>(), "full path to pidfile (if not set, no pidfile is created)")
|
||||
#ifndef _WIN32
|
||||
|
@ -515,7 +515,7 @@ sendmore:
|
||||
l << ( is32bit ? " 32" : " 64" ) << "-bit " << endl;
|
||||
}
|
||||
DEV log() << "_DEBUG build (which is slower)" << endl;
|
||||
show_32_warning();
|
||||
show_warnings();
|
||||
log() << mongodVersion() << endl;
|
||||
printGitVersion();
|
||||
printSysInfo();
|
||||
@ -622,7 +622,7 @@ using namespace mongo;
|
||||
namespace po = boost::program_options;
|
||||
|
||||
void show_help_text(po::options_description options) {
|
||||
show_32_warning();
|
||||
show_warnings();
|
||||
cout << options << endl;
|
||||
};
|
||||
|
||||
|
53
db/diskloc.h
53
db/diskloc.h
@ -14,7 +14,7 @@
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
/* storage.h
|
||||
/* @file diskloc.h
|
||||
|
||||
Storage subsystem management.
|
||||
Lays out our datafiles on disk, manages disk space.
|
||||
@ -26,7 +26,6 @@
|
||||
|
||||
namespace mongo {
|
||||
|
||||
|
||||
class Record;
|
||||
class DeletedRecord;
|
||||
class Extent;
|
||||
@ -34,43 +33,40 @@ namespace mongo {
|
||||
class MongoDataFile;
|
||||
|
||||
#pragma pack(1)
|
||||
/** represents a disk location/offset on disk in a database. 64 bits.
|
||||
it is assumed these will be passed around by value a lot so don't do anything to make them large
|
||||
(such as adding a virtual function)
|
||||
*/
|
||||
class DiskLoc {
|
||||
int fileNo; /* this will be volume, file #, etc. */
|
||||
int fileNo; // this will be volume, file #, etc. but is a logical value could be anything depending on storage engine
|
||||
int ofs;
|
||||
|
||||
public:
|
||||
// Note: MaxFiles imposes a limit of about 32TB of data per process
|
||||
enum SentinelValues { MaxFiles=16000, NullOfs = -1 };
|
||||
|
||||
int a() const {
|
||||
return fileNo;
|
||||
}
|
||||
enum SentinelValues {
|
||||
MaxFiles=16000, // thus a limit of about 32TB of data per db
|
||||
NullOfs = -1
|
||||
};
|
||||
|
||||
DiskLoc(int a, int b) : fileNo(a), ofs(b) {
|
||||
//assert(ofs!=0);
|
||||
}
|
||||
DiskLoc(int a, int b) : fileNo(a), ofs(b) { }
|
||||
DiskLoc() { Null(); }
|
||||
DiskLoc(const DiskLoc& l) {
|
||||
fileNo=l.fileNo;
|
||||
ofs=l.ofs;
|
||||
}
|
||||
|
||||
bool questionable() {
|
||||
bool questionable() const {
|
||||
return ofs < -1 ||
|
||||
fileNo < -1 ||
|
||||
fileNo > 524288;
|
||||
}
|
||||
|
||||
bool isNull() const {
|
||||
return fileNo == -1;
|
||||
// return ofs == NullOfs;
|
||||
}
|
||||
bool isNull() const { return fileNo == -1; }
|
||||
void Null() {
|
||||
fileNo = -1;
|
||||
fileNo = NullOfs;
|
||||
ofs = 0;
|
||||
}
|
||||
void assertOk() {
|
||||
assert(!isNull());
|
||||
}
|
||||
void assertOk() { assert(!isNull()); }
|
||||
void setInvalid() {
|
||||
fileNo = -2;
|
||||
ofs = 0;
|
||||
@ -91,12 +87,10 @@ namespace mongo {
|
||||
return BSON( "file" << fileNo << "offset" << ofs );
|
||||
}
|
||||
|
||||
int& GETOFS() {
|
||||
return ofs;
|
||||
}
|
||||
int getOfs() const {
|
||||
return ofs;
|
||||
}
|
||||
int a() const { return fileNo; }
|
||||
|
||||
int& GETOFS() { return ofs; }
|
||||
int getOfs() const { return ofs; }
|
||||
void set(int a, int b) {
|
||||
fileNo=a;
|
||||
ofs=b;
|
||||
@ -137,9 +131,10 @@ namespace mongo {
|
||||
return compare(b) < 0;
|
||||
}
|
||||
|
||||
/* get the "thing" associated with this disk location.
|
||||
it is assumed the object is what it is -- you must asure that:
|
||||
think of this as an unchecked type cast.
|
||||
/* Get the "thing" associated with this disk location.
|
||||
it is assumed the object is what you say it is -- you must assure that
|
||||
(think of this as an unchecked type cast)
|
||||
Note: set your Context first so that the database to which the diskloc applies is known.
|
||||
*/
|
||||
BSONObj obj() const;
|
||||
Record* rec() const;
|
||||
|
193
db/geo/2d.cpp
193
db/geo/2d.cpp
@ -65,6 +65,12 @@ namespace mongo {
|
||||
GEO_SPHERE
|
||||
};
|
||||
|
||||
inline double computeXScanDistance(double y, double maxDistDegrees){
|
||||
// TODO: this overestimates for large madDistDegrees far from the equator
|
||||
return maxDistDegrees / min(cos(deg2rad(min(+89.0, y + maxDistDegrees))),
|
||||
cos(deg2rad(max(-89.0, y - maxDistDegrees))));
|
||||
}
|
||||
|
||||
GeoBitSets geoBitSets;
|
||||
|
||||
const string GEO2DNAME = "2d";
|
||||
@ -243,6 +249,11 @@ namespace mongo {
|
||||
b.move( 1 , 1 );
|
||||
unhash( a, ax, ay );
|
||||
unhash( b, bx, by );
|
||||
|
||||
// _min and _max are a singularity
|
||||
if (bx == _min)
|
||||
bx = _max;
|
||||
|
||||
return (fabs(ax-bx));
|
||||
}
|
||||
|
||||
@ -364,6 +375,10 @@ namespace mongo {
|
||||
between( _min._x , _max._x , x , fudge ) &&
|
||||
between( _min._y , _max._y , y , fudge );
|
||||
}
|
||||
|
||||
bool contains(const Box& other, double fudge=0){
|
||||
return inside(other._min, fudge) && inside(other._max, fudge);
|
||||
}
|
||||
|
||||
Point _min;
|
||||
Point _max;
|
||||
@ -717,15 +732,23 @@ namespace mongo {
|
||||
public:
|
||||
typedef multiset<GeoPoint> Holder;
|
||||
|
||||
GeoHopper( const Geo2dType * g , unsigned max , const GeoHash& n , const BSONObj& filter = BSONObj() , double maxDistance = numeric_limits<double>::max() )
|
||||
: GeoAccumulator( g , filter ) , _max( max ) , _near( n ), _maxDistance( maxDistance ) {
|
||||
_farthest = -1;
|
||||
}
|
||||
GeoHopper( const Geo2dType * g , unsigned max , const Point& n , const BSONObj& filter = BSONObj() , double maxDistance = numeric_limits<double>::max() , GeoDistType type=GEO_PLAIN)
|
||||
: GeoAccumulator( g , filter ) , _max( max ) , _near( n ), _maxDistance( maxDistance ), _type( type ), _farthest(-1)
|
||||
{}
|
||||
|
||||
virtual bool checkDistance( const GeoHash& h , double& d ){
|
||||
d = _g->distance( _near , h );
|
||||
switch (_type){
|
||||
case GEO_PLAIN:
|
||||
d = _near.distance( Point(_g, h) );
|
||||
break;
|
||||
case GEO_SPHERE:
|
||||
d = spheredist_deg(_near, Point(_g, h));
|
||||
break;
|
||||
default:
|
||||
assert(0);
|
||||
}
|
||||
bool good = d < _maxDistance && ( _points.size() < _max || d < farthest() );
|
||||
GEODEBUG( "\t\t\t\t\t\t\t checkDistance " << _near << "\t" << h << "\t" << d
|
||||
GEODEBUG( "\t\t\t\t\t\t\t checkDistance " << _near.toString() << "\t" << h << "\t" << d
|
||||
<< " ok: " << good << " farthest: " << farthest() );
|
||||
return good;
|
||||
}
|
||||
@ -735,21 +758,26 @@ namespace mongo {
|
||||
_points.insert( GeoPoint( node.key , node.recordLoc , d ) );
|
||||
if ( _points.size() > _max ){
|
||||
_points.erase( --_points.end() );
|
||||
|
||||
Holder::iterator i = _points.end();
|
||||
i--;
|
||||
_farthest = i->_distance;
|
||||
} else {
|
||||
if (d > _farthest)
|
||||
_farthest = d;
|
||||
}
|
||||
|
||||
Holder::iterator i = _points.end();
|
||||
i--;
|
||||
_farthest = i->_distance;
|
||||
}
|
||||
|
||||
double farthest() const {
|
||||
return _farthest;
|
||||
}
|
||||
|
||||
|
||||
unsigned _max;
|
||||
GeoHash _near;
|
||||
Point _near;
|
||||
Holder _points;
|
||||
double _maxDistance;
|
||||
GeoDistType _type;
|
||||
double _farthest;
|
||||
};
|
||||
|
||||
@ -814,13 +842,13 @@ namespace mongo {
|
||||
|
||||
min.bucket = id.head.btree()->locate( id , id.head , start.wrap() ,
|
||||
ordering , min.pos , min.found , minDiskLoc );
|
||||
min.checkCur( found , hopper );
|
||||
if (hopper) min.checkCur( found , hopper );
|
||||
max = min;
|
||||
|
||||
if ( min.bucket.isNull() || ( !(hopper->found()) ) ){
|
||||
if ( min.bucket.isNull() || ( hopper && !(hopper->found()) ) ){
|
||||
min.bucket = id.head.btree()->locate( id , id.head , start.wrap() ,
|
||||
ordering , min.pos , min.found , minDiskLoc , -1 );
|
||||
min.checkCur( found , hopper );
|
||||
if (hopper) min.checkCur( found , hopper );
|
||||
}
|
||||
|
||||
return ! min.bucket.isNull() || ! max.bucket.isNull();
|
||||
@ -829,14 +857,27 @@ namespace mongo {
|
||||
|
||||
class GeoSearch {
|
||||
public:
|
||||
GeoSearch( const Geo2dType * g , const GeoHash& n , int numWanted=100 , BSONObj filter=BSONObj() , double maxDistance = numeric_limits<double>::max() )
|
||||
: _spec( g ) , _n( n ) , _start( n ) ,
|
||||
GeoSearch( const Geo2dType * g , const GeoHash& n , int numWanted=100 , BSONObj filter=BSONObj() , double maxDistance = numeric_limits<double>::max() , GeoDistType type=GEO_PLAIN)
|
||||
: _spec( g ) ,_startPt(g,n), _start( n ) ,
|
||||
_numWanted( numWanted ) , _filter( filter ) , _maxDistance( maxDistance ) ,
|
||||
_hopper( new GeoHopper( g , numWanted , n , filter , maxDistance ) )
|
||||
_hopper( new GeoHopper( g , numWanted , _startPt , filter , maxDistance, type ) ), _type(type)
|
||||
{
|
||||
assert( g->getDetails() );
|
||||
_nscanned = 0;
|
||||
_found = 0;
|
||||
|
||||
if (type == GEO_PLAIN){
|
||||
_scanDistance = maxDistance;
|
||||
} else if (type == GEO_SPHERE) {
|
||||
if (maxDistance == numeric_limits<double>::max()){
|
||||
_scanDistance = maxDistance;
|
||||
} else {
|
||||
//TODO: consider splitting into x and y scan distances
|
||||
_scanDistance = computeXScanDistance(_startPt._y, rad2deg(maxDistance));
|
||||
}
|
||||
} else {
|
||||
assert(0);
|
||||
}
|
||||
}
|
||||
|
||||
void exec(){
|
||||
@ -855,53 +896,77 @@ namespace mongo {
|
||||
GeoHopper * hopper = _hopper.get();
|
||||
|
||||
_prefix = _start;
|
||||
BtreeLocation min,max;
|
||||
{ // 1 regular geo hash algorithm
|
||||
|
||||
|
||||
BtreeLocation min,max;
|
||||
if ( ! BtreeLocation::initial( id , _spec , min , max , _n , _found , hopper ) )
|
||||
if ( ! BtreeLocation::initial( id , _spec , min , max , _start , _found , NULL ) )
|
||||
return;
|
||||
|
||||
while ( _hopper->found() < _numWanted ){
|
||||
while ( !_prefix.constrains() || // if next pass would cover universe, just keep going
|
||||
( _hopper->found() < _numWanted && _spec->sizeEdge( _prefix ) <= _scanDistance))
|
||||
{
|
||||
GEODEBUG( _prefix << "\t" << _found << "\t DESC" );
|
||||
while ( min.hasPrefix( _prefix ) && min.advance( -1 , _found , hopper ) )
|
||||
while ( min.hasPrefix(_prefix) && min.checkCur(_found, hopper) && min.advance(-1, _found, NULL) )
|
||||
_nscanned++;
|
||||
GEODEBUG( _prefix << "\t" << _found << "\t ASC" );
|
||||
while ( max.hasPrefix( _prefix ) && max.advance( 1 , _found , hopper ) )
|
||||
while ( max.hasPrefix(_prefix) && max.checkCur(_found, hopper) && max.advance(+1, _found, NULL) )
|
||||
_nscanned++;
|
||||
if ( ! _prefix.constrains() )
|
||||
break;
|
||||
|
||||
if ( ! _prefix.constrains() ){
|
||||
GEODEBUG( "done search w/o part 2" )
|
||||
return;
|
||||
}
|
||||
|
||||
_alreadyScanned = Box(_spec, _prefix);
|
||||
_prefix = _prefix.up();
|
||||
|
||||
double temp = _spec->distance( _prefix , _start );
|
||||
if ( temp > ( _maxDistance * 2 ) )
|
||||
break;
|
||||
}
|
||||
}
|
||||
GEODEBUG( "done part 1" );
|
||||
if ( _found && _prefix.constrains() ){
|
||||
{
|
||||
// 2
|
||||
Point center( _spec , _n );
|
||||
double farthest = hopper->farthest();
|
||||
// Phase 1 might not have found any points.
|
||||
if (farthest == -1)
|
||||
farthest = _spec->sizeDiag( _prefix );
|
||||
Box want( center._x - farthest , center._y - farthest , farthest * 2 );
|
||||
_prefix = _n;
|
||||
while ( _spec->sizeEdge( _prefix ) < farthest ){
|
||||
GEODEBUGPRINT(hopper->farthest());
|
||||
if (farthest == -1){
|
||||
// Nothing found in Phase 1
|
||||
farthest = _scanDistance;
|
||||
} else if (_type == GEO_SPHERE) {
|
||||
farthest = std::min(_scanDistance, computeXScanDistance(_startPt._y, rad2deg(farthest)));
|
||||
}
|
||||
GEODEBUGPRINT(farthest);
|
||||
|
||||
Box want( _startPt._x - farthest , _startPt._y - farthest , farthest * 2 );
|
||||
GEODEBUGPRINT(want.toString());
|
||||
|
||||
_prefix = _start;
|
||||
while (_prefix.constrains() && _spec->sizeEdge( _prefix ) < farthest ){
|
||||
_prefix = _prefix.up();
|
||||
}
|
||||
|
||||
|
||||
PREFIXDEBUG(_prefix, _spec);
|
||||
|
||||
if (_prefix.getBits() <= 1){
|
||||
// TODO consider walking in $natural order
|
||||
|
||||
while ( min.checkCur(_found, hopper) && min.advance(-1, _found, NULL) )
|
||||
_nscanned++;
|
||||
while ( max.checkCur(_found, hopper) && max.advance(+1, _found, NULL) )
|
||||
_nscanned++;
|
||||
|
||||
GEODEBUG( "done search after scanning whole collection" )
|
||||
return;
|
||||
}
|
||||
|
||||
if ( logLevel > 0 ){
|
||||
log(1) << "want: " << want << " found:" << _found << " nscanned: " << _nscanned << " hash size:" << _spec->sizeEdge( _prefix )
|
||||
<< " farthest: " << farthest << " using box: " << Box( _spec , _prefix ).toString() << endl;
|
||||
}
|
||||
|
||||
|
||||
for ( int x=-1; x<=1; x++ ){
|
||||
for ( int y=-1; y<=1; y++ ){
|
||||
GeoHash toscan = _prefix;
|
||||
toscan.move( x , y );
|
||||
|
||||
|
||||
// 3 & 4
|
||||
doBox( id , want , toscan );
|
||||
}
|
||||
@ -918,12 +983,21 @@ namespace mongo {
|
||||
for ( int i=0; i<depth; i++ )
|
||||
cout << "\t";
|
||||
cout << " doBox: " << testBox.toString() << "\t" << toscan.toString() << " scanned so far: " << _nscanned << endl;
|
||||
} else {
|
||||
GEODEBUGPRINT(testBox.toString());
|
||||
}
|
||||
|
||||
if (_alreadyScanned.contains(testBox, _spec->_error)){
|
||||
GEODEBUG("skipping box: already scanned");
|
||||
return; // been here, done this
|
||||
}
|
||||
|
||||
double intPer = testBox.intersects( want );
|
||||
|
||||
if ( intPer <= 0 )
|
||||
if ( intPer <= 0 ){
|
||||
GEODEBUG("skipping box: not in want");
|
||||
return;
|
||||
}
|
||||
|
||||
bool goDeeper = intPer < .5 && depth < 2;
|
||||
|
||||
@ -949,16 +1023,20 @@ namespace mongo {
|
||||
|
||||
const Geo2dType * _spec;
|
||||
|
||||
GeoHash _n;
|
||||
Point _startPt;
|
||||
GeoHash _start;
|
||||
GeoHash _prefix;
|
||||
int _numWanted;
|
||||
BSONObj _filter;
|
||||
double _maxDistance;
|
||||
double _scanDistance;
|
||||
shared_ptr<GeoHopper> _hopper;
|
||||
|
||||
long long _nscanned;
|
||||
int _found;
|
||||
GeoDistType _type;
|
||||
|
||||
Box _alreadyScanned;
|
||||
};
|
||||
|
||||
class GeoCursorBase : public Cursor {
|
||||
@ -1160,16 +1238,13 @@ namespace mongo {
|
||||
_xScanDistance = _maxDistance;
|
||||
_yScanDistance = _maxDistance;
|
||||
} else if (type == "$centerSphere") {
|
||||
uassert(13451, "Spherical MaxDistance > PI. Are you sure you are using radians?", _maxDistance < M_PI);
|
||||
uassert(13461, "Spherical MaxDistance > PI. Are you sure you are using radians?", _maxDistance < M_PI);
|
||||
|
||||
_type = GEO_SPHERE;
|
||||
_yScanDistance = rad2deg(_maxDistance);
|
||||
_xScanDistance = computeXScanDistance(_startPt._y, _yScanDistance);
|
||||
|
||||
// TODO: this overestimates for large _maxDistance far from the equator
|
||||
_xScanDistance = rad2deg(_maxDistance) / min(cos(deg2rad(_startPt._y + _yScanDistance)),
|
||||
cos(deg2rad(_startPt._y - _yScanDistance)));
|
||||
|
||||
uassert(13452, "Spherical distance would require wrapping, which isn't implemented yet",
|
||||
uassert(13462, "Spherical distance would require wrapping, which isn't implemented yet",
|
||||
(_startPt._x + _xScanDistance < 180) && (_startPt._x - _xScanDistance > -180) &&
|
||||
(_startPt._y + _yScanDistance < 90) && (_startPt._y - _yScanDistance > -90));
|
||||
|
||||
@ -1177,7 +1252,7 @@ namespace mongo {
|
||||
GEODEBUGPRINT(_xScanDistance);
|
||||
GEODEBUGPRINT(_yScanDistance);
|
||||
} else {
|
||||
uassert(13450, "invalid $center query type: " + type, false);
|
||||
uassert(13460, "invalid $center query type: " + type, false);
|
||||
}
|
||||
|
||||
ok();
|
||||
@ -1302,6 +1377,8 @@ namespace mongo {
|
||||
case GEO_SPHERE:
|
||||
d = spheredist_deg(_startPt, Point(_g, h));
|
||||
break;
|
||||
default:
|
||||
assert(0);
|
||||
}
|
||||
|
||||
GEODEBUG( "\t " << h << "\t" << d );
|
||||
@ -1462,6 +1539,18 @@ namespace mongo {
|
||||
case BSONObj::opNEAR: {
|
||||
BSONObj n = e.embeddedObject();
|
||||
e = n.firstElement();
|
||||
|
||||
const char* suffix = e.fieldName() + 5; // strlen("$near") == 5;
|
||||
GeoDistType type;
|
||||
if (suffix[0] == '\0') {
|
||||
type = GEO_PLAIN;
|
||||
} else if (strcmp(suffix, "Sphere") == 0) {
|
||||
type = GEO_SPHERE;
|
||||
} else {
|
||||
uassert(13464, string("invalid $near search type: ") + e.fieldName(), false);
|
||||
type = GEO_PLAIN; // prevents uninitialized warning
|
||||
}
|
||||
|
||||
double maxDistance = numeric_limits<double>::max();
|
||||
if ( e.isABSONObj() && e.embeddedObject().nFields() > 2 ){
|
||||
BSONObjIterator i(e.embeddedObject());
|
||||
@ -1476,7 +1565,7 @@ namespace mongo {
|
||||
if ( e.isNumber() )
|
||||
maxDistance = e.numberDouble();
|
||||
}
|
||||
shared_ptr<GeoSearch> s( new GeoSearch( this , _tohash(e) , numWanted , query , maxDistance ) );
|
||||
shared_ptr<GeoSearch> s( new GeoSearch( this , _tohash(e) , numWanted , query , maxDistance, type ) );
|
||||
s->exec();
|
||||
shared_ptr<Cursor> c;
|
||||
c.reset( new GeoSearchCursor( s ) );
|
||||
@ -1563,7 +1652,11 @@ namespace mongo {
|
||||
if ( cmdObj["maxDistance"].isNumber() )
|
||||
maxDistance = cmdObj["maxDistance"].number();
|
||||
|
||||
GeoSearch gs( g , n , numWanted , filter , maxDistance );
|
||||
GeoDistType type = GEO_PLAIN;
|
||||
if ( cmdObj["spherical"].trueValue() )
|
||||
type = GEO_SPHERE;
|
||||
|
||||
GeoSearch gs( g , n , numWanted , filter , maxDistance , type);
|
||||
|
||||
if ( cmdObj["start"].type() == String){
|
||||
GeoHash start ((string) cmdObj["start"].valuestr());
|
||||
|
@ -282,7 +282,7 @@ namespace mongo {
|
||||
else if ( fn[1] == 'n' && fn[2] == 'e' ){
|
||||
if ( fn[3] == 0 )
|
||||
return BSONObj::NE;
|
||||
if ( fn[3] == 'a' && fn[4] == 'r' && fn[5] == 0 )
|
||||
if ( fn[3] == 'a' && fn[4] == 'r') // matches anything with $near prefix
|
||||
return BSONObj::opNEAR;
|
||||
}
|
||||
else if ( fn[1] == 'm' ){
|
||||
|
@ -18,7 +18,6 @@
|
||||
|
||||
#include "pch.h"
|
||||
#include "pdfile.h"
|
||||
//#include "reccache.h"
|
||||
#include "rec.h"
|
||||
#include "db.h"
|
||||
|
||||
|
8
debian/changelog
vendored
8
debian/changelog
vendored
@ -1,8 +1,16 @@
|
||||
mongodb (1.7.0) unstable; urgency=low
|
||||
|
||||
* sharding lots of changes
|
||||
* replica_sets lots of changes
|
||||
|
||||
-- Richard Kreuter <richard@10gen.com> Fri, 03 Sep 2010 16:56:28 -0500
|
||||
|
||||
mongodb (1.5.8) unstable; urgency=low
|
||||
|
||||
* sharding lots of changes
|
||||
* replica_sets lots of changes
|
||||
|
||||
-- Richard Kreuter <richard@10gen.com> Tue, 03 Aug 2010 16:56:28 -0500
|
||||
|
||||
mongodb (1.5.7) unstable; urgency=low
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
#---------------------------------------------------------------------------
|
||||
DOXYFILE_ENCODING = UTF-8
|
||||
PROJECT_NAME = MongoDB
|
||||
PROJECT_NUMBER = 1.7.0-pre-
|
||||
PROJECT_NUMBER = 1.7.1-pre-
|
||||
OUTPUT_DIRECTORY = docs/doxygen
|
||||
CREATE_SUBDIRS = NO
|
||||
OUTPUT_LANGUAGE = English
|
||||
|
@ -116,7 +116,7 @@ function f() {
|
||||
}
|
||||
|
||||
var h = (new Date()).getHours();
|
||||
if (!db._adminCommand("buildInfo").debug) {
|
||||
if (!db._adminCommand("buildInfo").debug && !db.runCommand( { serverStatus : 1 , repl : 1 } ).repl ){
|
||||
if (forceSeedToBe || h <= 2 || h >= 22) {
|
||||
/* this test is slow, so don't run during the day */
|
||||
print("\n32bit.js running - this test is slow so only runs at night.");
|
||||
|
12
jstests/geo_near_random1.js
Normal file
12
jstests/geo_near_random1.js
Normal file
@ -0,0 +1,12 @@
|
||||
// this tests all points using $near
|
||||
load("jstests/libs/geo_near_random.js");
|
||||
|
||||
var test = new GeoNearRandomTest("geo_near_random1");
|
||||
|
||||
test.insertPts(50);
|
||||
|
||||
test.testPt([0,0]);
|
||||
test.testPt(test.mkPt());
|
||||
test.testPt(test.mkPt());
|
||||
test.testPt(test.mkPt());
|
||||
test.testPt(test.mkPt());
|
21
jstests/geo_near_random2.js
Normal file
21
jstests/geo_near_random2.js
Normal file
@ -0,0 +1,21 @@
|
||||
// this tests 1% of all points using $near and $nearSphere
|
||||
load("jstests/libs/geo_near_random.js");
|
||||
|
||||
var test = new GeoNearRandomTest("geo_near_random2");
|
||||
|
||||
test.insertPts(5000);
|
||||
|
||||
opts = {sphere:0, nToTest:test.nPts*0.01};
|
||||
test.testPt([0,0], opts);
|
||||
test.testPt(test.mkPt(), opts);
|
||||
test.testPt(test.mkPt(), opts);
|
||||
test.testPt(test.mkPt(), opts);
|
||||
test.testPt(test.mkPt(), opts);
|
||||
|
||||
opts.sphere = 1
|
||||
test.testPt([0,0], opts);
|
||||
test.testPt(test.mkPt(0.8), opts);
|
||||
test.testPt(test.mkPt(0.8), opts);
|
||||
test.testPt(test.mkPt(0.8), opts);
|
||||
test.testPt(test.mkPt(0.8), opts);
|
||||
|
76
jstests/libs/geo_near_random.js
Normal file
76
jstests/libs/geo_near_random.js
Normal file
@ -0,0 +1,76 @@
|
||||
GeoNearRandomTest = function(name) {
|
||||
this.name = name;
|
||||
this.t = db[name];
|
||||
this.nPts = 0;
|
||||
|
||||
// reset state
|
||||
this.t.drop();
|
||||
Random.srand(1234);
|
||||
|
||||
print("starting test: " + name);
|
||||
}
|
||||
|
||||
|
||||
GeoNearRandomTest.prototype.mkPt = function mkPt(scale){
|
||||
scale = scale || 1; // scale is good for staying away from edges
|
||||
return [((Random.rand() * 359.8) - 179.9) * scale, ((Random.rand() * 180) - 90) * scale];
|
||||
}
|
||||
|
||||
GeoNearRandomTest.prototype.insertPts = function(nPts) {
|
||||
assert.eq(this.nPts, 0, "insertPoints already called");
|
||||
this.nPts = nPts;
|
||||
|
||||
for (var i=0; i<nPts; i++){
|
||||
this.t.insert({_id: i, loc: this.mkPt()});
|
||||
}
|
||||
|
||||
this.t.ensureIndex({loc: '2d'});
|
||||
}
|
||||
|
||||
GeoNearRandomTest.prototype.assertIsPrefix = function(short, long) {
|
||||
for (var i=0; i < short.length; i++){
|
||||
assert.eq(short[i], long[i]);
|
||||
}
|
||||
}
|
||||
|
||||
GeoNearRandomTest.prototype.testPt = function(pt, opts) {
|
||||
assert.neq(this.nPts, 0, "insertPoints not yet called");
|
||||
|
||||
opts = opts || {};
|
||||
opts['sphere'] = opts['sphere'] || 0;
|
||||
opts['nToTest'] = opts['nToTest'] || this.nPts; // be careful, test is O( N^2 )
|
||||
|
||||
print("testing point: " + tojson(pt) + " opts: " + tojson(opts));
|
||||
|
||||
|
||||
var cmd = {geoNear:this.t.getName(), near: pt, num: 1, spherical:opts.sphere};
|
||||
|
||||
var last = db.runCommand(cmd).results;
|
||||
for (var i=2; i <= opts.nToTest; i++){
|
||||
//print(i); // uncomment to watch status
|
||||
cmd.num = i
|
||||
var ret = db.runCommand(cmd).results;
|
||||
|
||||
try {
|
||||
this.assertIsPrefix(last, ret);
|
||||
} catch (e) {
|
||||
print("*** failed while compairing " + (i-1) + " and " + i);
|
||||
printjson(cmd);
|
||||
throw e; // rethrow
|
||||
}
|
||||
|
||||
last = ret;
|
||||
}
|
||||
|
||||
|
||||
last = last.map(function(x){return x.obj});
|
||||
|
||||
var query = {loc:{}};
|
||||
query.loc[ opts.sphere ? '$nearSphere' : '$near' ] = pt;
|
||||
var near = this.t.find(query).limit(opts.nToTest).toArray();
|
||||
|
||||
this.assertIsPrefix(last, near);
|
||||
assert.eq(last, near);
|
||||
}
|
||||
|
||||
|
11
jstests/perf/geo_near1.js
Normal file
11
jstests/perf/geo_near1.js
Normal file
@ -0,0 +1,11 @@
|
||||
var t = db.bench.geo_near1;
|
||||
t.drop()
|
||||
|
||||
var numPts = 1000*1000;
|
||||
|
||||
|
||||
for (var i=0; i < numPts; i++){
|
||||
x = (Math.random() * 100) - 50;
|
||||
y = (Math.random() * 100) - 50;
|
||||
t.insert({loc: [x,y], i: i});
|
||||
}
|
@ -23,15 +23,15 @@ doTest = function (signal) {
|
||||
master.getDB("barDB").bar.save({ a: 1 });
|
||||
replTest.awaitReplication();
|
||||
|
||||
// These writes should be replicated immediately
|
||||
master.getDB(testDB).foo.insert({ n: 1 });
|
||||
master.getDB(testDB).foo.insert({ n: 2 });
|
||||
master.getDB(testDB).foo.insert({ n: 3 });
|
||||
// These writes should be replicated immediately
|
||||
var docNum = 5000;
|
||||
for(var n=0; n<docNum; n++) {
|
||||
master.getDB(testDB).foo.insert({ n: n });
|
||||
}
|
||||
|
||||
// *** NOTE ***: The default doesn't seem to be propogating.
|
||||
// When I run getlasterror with no defaults, the slaves don't have the data:
|
||||
// These getlasterror commands can be run individually to verify this.
|
||||
//master.getDB("admin").runCommand({ getlasterror: 1, w: 3, wtimeout: 20000 });
|
||||
// If you want to test failure, just add values for w and wtimeout
|
||||
// to the following command. This will override the default set above and
|
||||
// prevent replication from happening in time for the count tests below.
|
||||
master.getDB("admin").runCommand({getlasterror: 1});
|
||||
|
||||
var slaves = replTest.liveNodes.slaves;
|
||||
@ -40,31 +40,15 @@ doTest = function (signal) {
|
||||
|
||||
print("Testing slave counts");
|
||||
|
||||
// These should all have 3 documents, but they don't always.
|
||||
var master1count = master.getDB(testDB).foo.count();
|
||||
assert( master1count == 3, "Master has " + master1count + " of 3 documents!");
|
||||
|
||||
var slave0count = slaves[0].getDB(testDB).foo.count();
|
||||
assert( slave0count == 3, "Slave 0 has " + slave0count + " of 3 documents!");
|
||||
assert( slave0count == docNum, "Slave 0 has " + slave0count + " of " + docNum + " documents!");
|
||||
|
||||
var slave1count = slaves[1].getDB(testDB).foo.count();
|
||||
assert( slave1count == 3, "Slave 1 has " + slave1count + " of 3 documents!");
|
||||
assert( slave1count == docNum, "Slave 1 has " + slave1count + " of " + docNum + " documents!");
|
||||
|
||||
print("Testing slave 0");
|
||||
var master1count = master.getDB(testDB).foo.count();
|
||||
assert( master1count == docNum, "Master has " + master1count + " of " + docNum + " documents!");
|
||||
|
||||
var s0 = slaves[0].getDB(testDB).foo.find();
|
||||
assert(s0.next()['n']);
|
||||
assert(s0.next()['n']);
|
||||
assert(s0.next()['n']);
|
||||
|
||||
print("Testing slave 1");
|
||||
|
||||
var s1 = slaves[1].getDB(testDB).foo.find();
|
||||
assert(s1.next()['n']);
|
||||
assert(s1.next()['n']);
|
||||
assert(s1.next()['n']);
|
||||
|
||||
// End test
|
||||
replTest.stopSet(signal);
|
||||
}
|
||||
|
||||
|
13
jstests/slowNightly/geo_near_random1.js
Normal file
13
jstests/slowNightly/geo_near_random1.js
Normal file
@ -0,0 +1,13 @@
|
||||
// this tests all points using $near
|
||||
load("jstests/libs/geo_near_random.js");
|
||||
|
||||
var test = new GeoNearRandomTest("nightly.geo_near_random1");
|
||||
|
||||
test.insertPts(200);
|
||||
|
||||
test.testPt([0,0]);
|
||||
test.testPt(test.mkPt());
|
||||
test.testPt(test.mkPt());
|
||||
test.testPt(test.mkPt());
|
||||
test.testPt(test.mkPt());
|
||||
|
21
jstests/slowNightly/geo_near_random2.js
Normal file
21
jstests/slowNightly/geo_near_random2.js
Normal file
@ -0,0 +1,21 @@
|
||||
// this tests 1% of all points using $near and $nearSphere
|
||||
load("jstests/libs/geo_near_random.js");
|
||||
|
||||
var test = new GeoNearRandomTest("nightly.geo_near_random2");
|
||||
|
||||
test.insertPts(10000);
|
||||
|
||||
opts = {sphere:0, nToTest:test.nPts*0.01};
|
||||
test.testPt([0,0], opts);
|
||||
test.testPt(test.mkPt(), opts);
|
||||
test.testPt(test.mkPt(), opts);
|
||||
test.testPt(test.mkPt(), opts);
|
||||
test.testPt(test.mkPt(), opts);
|
||||
|
||||
opts.sphere = 1
|
||||
test.testPt([0,0], opts);
|
||||
test.testPt(test.mkPt(0.8), opts);
|
||||
test.testPt(test.mkPt(0.8), opts);
|
||||
test.testPt(test.mkPt(0.8), opts);
|
||||
test.testPt(test.mkPt(0.8), opts);
|
||||
|
@ -48,19 +48,6 @@ check( "initial at end" )
|
||||
|
||||
assert.lt( 20 , s.config.chunks.count() , "setup2" );
|
||||
|
||||
function dist(){
|
||||
var x = {}
|
||||
s.config.chunks.find( { ns : "test.foo" } ).forEach(
|
||||
function(z){
|
||||
if ( x[z.shard] )
|
||||
x[z.shard]++
|
||||
else
|
||||
x[z.shard] = 1;
|
||||
}
|
||||
);
|
||||
return x;
|
||||
}
|
||||
|
||||
function check( msg , dontAssert ){
|
||||
for ( var x in counts ){
|
||||
var e = counts[x];
|
||||
@ -98,14 +85,14 @@ function diff(){
|
||||
check(); // SERVER-1430 TODO
|
||||
}
|
||||
|
||||
var x = dist();
|
||||
var x = s.chunkCounts( "foo" )
|
||||
if ( Math.random() > .999 )
|
||||
printjson( x )
|
||||
return Math.max( x.shard0000 , x.shard0001 ) - Math.min( x.shard0000 , x.shard0001 );
|
||||
}
|
||||
|
||||
function sum(){
|
||||
var x = dist();
|
||||
var x = s.chunkCounts( "foo" )
|
||||
return x.shard0000 + x.shard0001;
|
||||
}
|
||||
|
||||
|
13
jstests/slowWeekly/geo_near_random1.js
Normal file
13
jstests/slowWeekly/geo_near_random1.js
Normal file
@ -0,0 +1,13 @@
|
||||
// this tests all points using $near
|
||||
load("jstests/libs/geo_near_random.js");
|
||||
|
||||
var test = new GeoNearRandomTest("weekly.geo_near_random1");
|
||||
|
||||
test.insertPts(1000);
|
||||
|
||||
test.testPt([0,0]);
|
||||
test.testPt(test.mkPt());
|
||||
test.testPt(test.mkPt());
|
||||
test.testPt(test.mkPt());
|
||||
test.testPt(test.mkPt());
|
||||
|
21
jstests/slowWeekly/geo_near_random2.js
Normal file
21
jstests/slowWeekly/geo_near_random2.js
Normal file
@ -0,0 +1,21 @@
|
||||
// this tests 1% of all points using $near and $nearSphere
|
||||
load("jstests/libs/geo_near_random.js");
|
||||
|
||||
var test = new GeoNearRandomTest("weekly.geo_near_random2");
|
||||
|
||||
test.insertPts(50000);
|
||||
|
||||
opts = {sphere:0, nToTest:test.nPts*0.01};
|
||||
test.testPt([0,0], opts);
|
||||
test.testPt(test.mkPt(), opts);
|
||||
test.testPt(test.mkPt(), opts);
|
||||
test.testPt(test.mkPt(), opts);
|
||||
test.testPt(test.mkPt(), opts);
|
||||
|
||||
opts.sphere = 1
|
||||
test.testPt([0,0], opts);
|
||||
test.testPt(test.mkPt(0.8), opts);
|
||||
test.testPt(test.mkPt(0.8), opts);
|
||||
test.testPt(test.mkPt(0.8), opts);
|
||||
test.testPt(test.mkPt(0.8), opts);
|
||||
|
@ -1,5 +1,5 @@
|
||||
Name: mongo
|
||||
Version: 1.6.0
|
||||
Version: 1.7.0
|
||||
Release: mongodb_1%{?dist}
|
||||
Summary: mongo client shell and tools
|
||||
License: AGPL 3.0
|
||||
|
15
s/chunk.cpp
15
s/chunk.cpp
@ -135,8 +135,6 @@ namespace mongo {
|
||||
}
|
||||
|
||||
BSONObj median = result.getObjectField( "median" ).getOwned();
|
||||
conn.done();
|
||||
|
||||
|
||||
if (median == getMin()){
|
||||
Query q;
|
||||
@ -147,6 +145,8 @@ namespace mongo {
|
||||
median = _manager->getShardKey().extractKey( median );
|
||||
}
|
||||
|
||||
conn.done();
|
||||
|
||||
if ( median < getMin() || median >= getMax() ){
|
||||
stringstream ss;
|
||||
ss << "medianKey returned value out of range. "
|
||||
@ -352,7 +352,7 @@ namespace mongo {
|
||||
return _splitIfShould( dataWritten );
|
||||
}
|
||||
catch ( std::exception& e ){
|
||||
log( LL_ERROR ) << "splitIfShould failed: " << e.what() << endl;
|
||||
error() << "splitIfShould failed: " << e.what() << endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -390,7 +390,8 @@ namespace mongo {
|
||||
|
||||
BSONObj splitPoint = pickSplitPoint();
|
||||
if ( splitPoint.isEmpty() || _min == splitPoint || _max == splitPoint) {
|
||||
log() << "SHARD PROBLEM** shard is too big, but can't split: " << toString() << endl;
|
||||
error() << "want to split chunk, but can't find split point "
|
||||
<< " chunk: " << toString() << " got: " << splitPoint << endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -427,10 +428,10 @@ namespace mongo {
|
||||
|
||||
assert( toMove );
|
||||
|
||||
Shard newLocation = Shard::pick();
|
||||
Shard newLocation = Shard::pick( getShard() );
|
||||
if ( getShard() == newLocation ){
|
||||
// if this is the best server, then we shouldn't do anything!
|
||||
log(1) << "not moving chunk: " << toString() << " b/c would move to same place " << newLocation.toString() << " -> " << getShard().toString() << endl;
|
||||
// if this is the best shard, then we shouldn't do anything (Shard::pick already logged our shard).
|
||||
log(1) << "recently split chunk: " << toString() << "already in the best shard" << endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
24
s/shard.cpp
24
s/shard.cpp
@ -219,7 +219,7 @@ namespace mongo {
|
||||
staticShardInfo.remove( name );
|
||||
}
|
||||
|
||||
Shard Shard::pick( const Shard& exclude ){
|
||||
Shard Shard::pick( const Shard& current ){
|
||||
vector<Shard> all;
|
||||
staticShardInfo.getAllShards( all );
|
||||
if ( all.size() == 0 ){
|
||||
@ -229,27 +229,19 @@ namespace mongo {
|
||||
return EMPTY;
|
||||
}
|
||||
|
||||
// if provided, do not consider the 'exclude' shard as a viable candidate
|
||||
if ( exclude != EMPTY ){
|
||||
for ( vector<Shard>::iterator it= all.begin() ; it != all.end() ; ++it ){
|
||||
if ( exclude == *it ){
|
||||
all.erase( it );
|
||||
break;
|
||||
}
|
||||
}
|
||||
if ( all.size() == 0 )
|
||||
return EMPTY;
|
||||
}
|
||||
|
||||
// if current shard was provided, pick a different shard only if it is a better choice
|
||||
ShardStatus best = all[0].getStatus();
|
||||
|
||||
for ( size_t i=1; i<all.size(); i++ ){
|
||||
if ( current != EMPTY ){
|
||||
best = current.getStatus();
|
||||
}
|
||||
|
||||
for ( size_t i=0; i<all.size(); i++ ){
|
||||
ShardStatus t = all[i].getStatus();
|
||||
if ( t < best )
|
||||
best = t;
|
||||
}
|
||||
|
||||
log(1) << "picking shard: " << best << endl;
|
||||
log(1) << "best shard for new allocation is " << best << endl;
|
||||
return best.shard();
|
||||
}
|
||||
|
||||
|
@ -131,10 +131,10 @@ namespace mongo {
|
||||
static void printShardInfo( ostream& out );
|
||||
|
||||
/**
|
||||
* @parm exclude - disconsiders this shard when selecting one available
|
||||
* @return the currently best known shard to put a chunk/database or EMPTY
|
||||
* @parm current - shard where the chunk/database currently lives in
|
||||
* @return the currently emptiest shard, if best then current, or EMPTY
|
||||
*/
|
||||
static Shard pick( const Shard& exclude = EMPTY );
|
||||
static Shard pick( const Shard& current = EMPTY );
|
||||
|
||||
static void reloadShardInfo();
|
||||
|
||||
|
@ -90,7 +90,7 @@ namespace mongo {
|
||||
OID lastID;
|
||||
lastID.clear();
|
||||
int secsToSleep = 0;
|
||||
while ( Shard::isMember( _addr ) ){
|
||||
while ( ! inShutdown() && Shard::isMember( _addr ) ){
|
||||
|
||||
if ( lastID.isSet() ){
|
||||
scoped_lock lk( _seenWritebacksLock );
|
||||
@ -159,8 +159,14 @@ namespace mongo {
|
||||
continue;
|
||||
}
|
||||
catch ( std::exception e ){
|
||||
log() << "WriteBackListener exception : " << e.what() << endl;
|
||||
|
||||
if ( inShutdown() ){
|
||||
// we're shutting down, so just clean up
|
||||
return;
|
||||
}
|
||||
|
||||
log() << "WriteBackListener exception : " << e.what() << endl;
|
||||
|
||||
// It's possible this shard was removed
|
||||
Shard::reloadShardInfo();
|
||||
}
|
||||
|
@ -185,10 +185,15 @@ namespace mongo {
|
||||
|
||||
uassert( 10202 , "can't mix multi and upsert and sharding" , ! ( upsert && multi ) );
|
||||
|
||||
if ( upsert && !(manager->hasShardKey(toupdate) ||
|
||||
(toupdate.firstElement().fieldName()[0] == '$' && manager->hasShardKey(query))))
|
||||
{
|
||||
throw UserException( 8012 , "can't upsert something without shard key" );
|
||||
if (upsert){
|
||||
uassert(8012, "can't upsert something without shard key",
|
||||
(manager->hasShardKey(toupdate) ||
|
||||
(toupdate.firstElement().fieldName()[0] == '$' && manager->hasShardKey(query))));
|
||||
|
||||
BSONObj key = manager->getShardKey().extractKey(query);
|
||||
BSONForEach(e, key){
|
||||
uassert(13465, "shard key in upsert query must be an exact match", getGtLtOp(e) == BSONObj::Equality);
|
||||
}
|
||||
}
|
||||
|
||||
bool save = false;
|
||||
|
@ -83,7 +83,7 @@ namespace mongo {
|
||||
}
|
||||
|
||||
|
||||
FILE* tmp = fopen(_path.c_str(), (_append ? "a" : "w"));
|
||||
FILE* tmp = freopen(_path.c_str(), (_append ? "a" : "w"), stdout);
|
||||
if (!tmp){
|
||||
cerr << "can't open: " << _path.c_str() << " for log file" << endl;
|
||||
dbexit( EXIT_BADOPTIONS );
|
||||
@ -92,10 +92,6 @@ namespace mongo {
|
||||
|
||||
Logstream::setLogFile(tmp); // after this point no thread will be using old file
|
||||
|
||||
if (_file){
|
||||
fclose(_file);
|
||||
}
|
||||
|
||||
_file = tmp;
|
||||
_opened = time(0);
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ namespace mongo {
|
||||
// mongo processes version support
|
||||
//
|
||||
|
||||
const char versionString[] = "1.7.0-pre-";
|
||||
const char versionString[] = "1.7.1-pre-";
|
||||
|
||||
string mongodVersion() {
|
||||
stringstream ss;
|
||||
@ -61,26 +61,37 @@ namespace mongo {
|
||||
// 32 bit systems warning
|
||||
//
|
||||
|
||||
void show_32_warning(){
|
||||
void show_warnings(){
|
||||
// each message adds a leading but not a trailing newline
|
||||
|
||||
bool warned = false;
|
||||
{
|
||||
const char * foo = strchr( versionString , '.' ) + 1;
|
||||
int bar = atoi( foo );
|
||||
if ( ( 2 * ( bar / 2 ) ) != bar ) {
|
||||
cout << "\n** NOTE: This is a development version (" << versionString << ") of MongoDB.";
|
||||
cout << "\n** Not recommended for production. \n" << endl;
|
||||
cout << "\n** Not recommended for production." << endl;
|
||||
warned = true;
|
||||
}
|
||||
}
|
||||
|
||||
if ( sizeof(int*) != 4 )
|
||||
return;
|
||||
|
||||
if( !warned ) // prettier this way
|
||||
if ( sizeof(int*) == 4 ) {
|
||||
cout << endl;
|
||||
cout << "** NOTE: when using MongoDB 32 bit, you are limited to about 2 gigabytes of data" << endl;
|
||||
cout << "** see http://blog.mongodb.org/post/137788967/32-bit-limitations" << endl;
|
||||
warned = true;
|
||||
}
|
||||
|
||||
#ifdef __linux__
|
||||
if (boost::filesystem::exists("/proc/vz") && !boost::filesystem::exists("/proc/bc")){
|
||||
cout << endl;
|
||||
cout << "** WARNING: You are running in OpenVZ. This is known to be broken!!!" << endl;
|
||||
warned = true;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (warned)
|
||||
cout << endl;
|
||||
cout << "** NOTE: when using MongoDB 32 bit, you are limited to about 2 gigabytes of data" << endl;
|
||||
cout << "** see http://blog.mongodb.org/post/137788967/32-bit-limitations" << endl;
|
||||
cout << endl;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -17,7 +17,7 @@ namespace mongo {
|
||||
string sysInfo();
|
||||
void printSysInfo();
|
||||
|
||||
void show_32_warning();
|
||||
void show_warnings();
|
||||
|
||||
} // namespace mongo
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user