2009-08-19 20:46:27 +02:00
|
|
|
// namespace.cpp
|
2008-08-28 02:33:47 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Copyright (C) 2008 10gen Inc.
|
2008-12-29 02:28:49 +01:00
|
|
|
*
|
2008-08-28 02:33:47 +02:00
|
|
|
* This program is free software: you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU Affero General Public License, version 3,
|
|
|
|
* as published by the Free Software Foundation.
|
2008-12-29 02:28:49 +01:00
|
|
|
*
|
2008-08-28 02:33:47 +02:00
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU Affero General Public License for more details.
|
2008-12-29 02:28:49 +01:00
|
|
|
*
|
2008-08-28 02:33:47 +02:00
|
|
|
* You should have received a copy of the GNU Affero General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "stdafx.h"
|
|
|
|
#include "pdfile.h"
|
|
|
|
#include "db.h"
|
|
|
|
#include "../util/mmap.h"
|
|
|
|
#include "../util/hashtab.h"
|
2009-09-29 22:54:31 +02:00
|
|
|
#include "../scripting/engine.h"
|
2008-08-28 02:33:47 +02:00
|
|
|
#include "btree.h"
|
|
|
|
#include <algorithm>
|
|
|
|
#include <list>
|
|
|
|
#include "query.h"
|
2009-02-26 17:08:23 +01:00
|
|
|
#include "queryutil.h"
|
2009-01-26 23:23:45 +01:00
|
|
|
#include "json.h"
|
2008-08-28 02:33:47 +02:00
|
|
|
|
2009-01-14 23:09:51 +01:00
|
|
|
namespace mongo {
|
|
|
|
|
2009-01-26 23:23:45 +01:00
|
|
|
BSONObj idKeyPattern = fromjson("{\"_id\":1}");
|
|
|
|
|
2009-08-19 20:46:27 +02:00
|
|
|
/* deleted lists -- linked lists of deleted records -- are placed in 'buckets' of various sizes
|
2009-01-15 16:17:11 +01:00
|
|
|
so you can look for a deleterecord about the right size.
|
|
|
|
*/
|
|
|
|
int bucketSizes[] = {
|
|
|
|
32, 64, 128, 256, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000,
|
|
|
|
0x8000, 0x10000, 0x20000, 0x40000, 0x80000, 0x100000, 0x200000,
|
|
|
|
0x400000, 0x800000
|
|
|
|
};
|
2008-08-28 02:33:47 +02:00
|
|
|
|
2009-02-06 18:45:05 +01:00
|
|
|
bool NamespaceIndex::exists() const {
|
2009-08-20 21:00:25 +02:00
|
|
|
return !boost::filesystem::exists(path());
|
2009-02-06 18:45:05 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
boost::filesystem::path NamespaceIndex::path() const {
|
|
|
|
return boost::filesystem::path( dir_ ) / ( database_ + ".ns" );
|
|
|
|
}
|
2009-08-20 21:00:25 +02:00
|
|
|
|
|
|
|
int lenForNewNsFiles = 16 * 1024 * 1024;
|
2009-02-06 18:45:05 +01:00
|
|
|
|
|
|
|
void NamespaceIndex::init() {
|
|
|
|
if ( ht )
|
|
|
|
return;
|
2009-01-15 16:17:11 +01:00
|
|
|
/* if someone manually deleted the datafiles for a database,
|
|
|
|
we need to be sure to clear any cached info for the database in
|
|
|
|
local.*.
|
|
|
|
*/
|
2009-02-06 18:45:05 +01:00
|
|
|
if ( "local" != database_ ) {
|
|
|
|
DBInfo i(database_.c_str());
|
2009-01-15 16:17:11 +01:00
|
|
|
i.dbDropped();
|
|
|
|
}
|
2008-12-02 20:24:45 +01:00
|
|
|
|
2009-08-20 21:00:25 +02:00
|
|
|
int len = -1;
|
|
|
|
boost::filesystem::path nsPath = path();
|
|
|
|
string pathString = nsPath.string();
|
|
|
|
void *p;
|
|
|
|
if( boost::filesystem::exists(nsPath) ) {
|
|
|
|
p = f.map(pathString.c_str());
|
2009-10-27 19:25:45 +01:00
|
|
|
if( p ) {
|
|
|
|
len = f.length();
|
|
|
|
if ( len % (1024*1024) != 0 ){
|
|
|
|
log() << "bad .ns file: " << pathString << endl;
|
|
|
|
uassert( "bad .ns file length, cannot open database", len % (1024*1024) == 0 );
|
|
|
|
}
|
2009-09-03 16:34:31 +02:00
|
|
|
}
|
2009-08-20 21:00:25 +02:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
// use lenForNewNsFiles, we are making a new database
|
|
|
|
massert( "bad lenForNewNsFiles", lenForNewNsFiles >= 1024*1024 );
|
|
|
|
long l = lenForNewNsFiles;
|
|
|
|
p = f.map(pathString.c_str(), l);
|
2009-10-27 19:25:45 +01:00
|
|
|
if( p ) {
|
|
|
|
len = (int) l;
|
|
|
|
assert( len == lenForNewNsFiles );
|
|
|
|
}
|
2009-08-20 21:00:25 +02:00
|
|
|
}
|
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
if ( p == 0 ) {
|
2009-01-18 23:48:44 +01:00
|
|
|
problem() << "couldn't open file " << pathString << " terminating" << endl;
|
2009-08-07 21:37:50 +02:00
|
|
|
dbexit( EXIT_FS );
|
2009-01-15 16:17:11 +01:00
|
|
|
}
|
2009-08-20 21:00:25 +02:00
|
|
|
ht = new HashTable<Namespace,NamespaceDetails>(p, len, "namespace index");
|
2008-12-29 02:28:49 +01:00
|
|
|
}
|
2009-01-15 16:17:11 +01:00
|
|
|
|
|
|
|
void NamespaceDetails::addDeletedRec(DeletedRecord *d, DiskLoc dloc) {
|
|
|
|
{
|
|
|
|
// defensive code: try to make us notice if we reference a deleted record
|
|
|
|
(unsigned&) (((Record *) d)->data) = 0xeeeeeeee;
|
|
|
|
}
|
|
|
|
dassert( dloc.drec() == d );
|
2009-01-15 17:26:38 +01:00
|
|
|
DEBUGGING out() << "TEMP: add deleted rec " << dloc.toString() << ' ' << hex << d->extentOfs << endl;
|
2009-01-15 16:17:11 +01:00
|
|
|
if ( capped ) {
|
|
|
|
if ( !deletedList[ 1 ].isValid() ) {
|
|
|
|
// Initial extent allocation. Insert at end.
|
|
|
|
d->nextDeleted = DiskLoc();
|
|
|
|
if ( deletedList[ 0 ].isNull() )
|
|
|
|
deletedList[ 0 ] = dloc;
|
|
|
|
else {
|
|
|
|
DiskLoc i = deletedList[ 0 ];
|
|
|
|
for (; !i.drec()->nextDeleted.isNull(); i = i.drec()->nextDeleted );
|
|
|
|
i.drec()->nextDeleted = dloc;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
d->nextDeleted = firstDeletedInCapExtent();
|
|
|
|
firstDeletedInCapExtent() = dloc;
|
2009-01-13 16:30:01 +01:00
|
|
|
}
|
|
|
|
} else {
|
2009-01-15 16:17:11 +01:00
|
|
|
int b = bucket(d->lengthWithHeaders);
|
|
|
|
DiskLoc& list = deletedList[b];
|
|
|
|
DiskLoc oldHead = list;
|
|
|
|
list = dloc;
|
|
|
|
d->nextDeleted = oldHead;
|
2009-01-13 16:30:01 +01:00
|
|
|
}
|
|
|
|
}
|
2008-08-28 02:33:47 +02:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
/*
|
|
|
|
lenToAlloc is WITH header
|
|
|
|
*/
|
|
|
|
DiskLoc NamespaceDetails::alloc(const char *ns, int lenToAlloc, DiskLoc& extentLoc) {
|
|
|
|
lenToAlloc = (lenToAlloc + 3) & 0xfffffffc;
|
|
|
|
DiskLoc loc = _alloc(ns, lenToAlloc);
|
|
|
|
if ( loc.isNull() )
|
|
|
|
return loc;
|
2008-12-29 02:28:49 +01:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
DeletedRecord *r = loc.drec();
|
2008-12-29 02:28:49 +01:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
/* note we want to grab from the front so our next pointers on disk tend
|
|
|
|
to go in a forward direction which is important for performance. */
|
|
|
|
int regionlen = r->lengthWithHeaders;
|
|
|
|
extentLoc.set(loc.a(), r->extentOfs);
|
|
|
|
assert( r->extentOfs < loc.getOfs() );
|
2008-12-29 02:28:49 +01:00
|
|
|
|
2009-01-15 17:26:38 +01:00
|
|
|
DEBUGGING out() << "TEMP: alloc() returns " << loc.toString() << ' ' << ns << " lentoalloc:" << lenToAlloc << " ext:" << extentLoc.toString() << endl;
|
2008-12-29 02:28:49 +01:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
int left = regionlen - lenToAlloc;
|
|
|
|
if ( capped == 0 ) {
|
|
|
|
if ( left < 24 || left < (lenToAlloc >> 3) ) {
|
|
|
|
// you get the whole thing.
|
|
|
|
return loc;
|
|
|
|
}
|
2009-01-13 16:30:01 +01:00
|
|
|
}
|
2008-12-29 02:28:49 +01:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
/* split off some for further use. */
|
|
|
|
r->lengthWithHeaders = lenToAlloc;
|
|
|
|
DiskLoc newDelLoc = loc;
|
|
|
|
newDelLoc.inc(lenToAlloc);
|
|
|
|
DeletedRecord *newDel = newDelLoc.drec();
|
|
|
|
newDel->extentOfs = r->extentOfs;
|
|
|
|
newDel->lengthWithHeaders = left;
|
|
|
|
newDel->nextDeleted.Null();
|
2009-01-14 23:17:24 +01:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
addDeletedRec(newDel, newDelLoc);
|
2009-01-14 23:17:24 +01:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
return loc;
|
|
|
|
}
|
2008-08-28 02:33:47 +02:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
/* for non-capped collections.
|
|
|
|
returned item is out of the deleted list upon return
|
|
|
|
*/
|
|
|
|
DiskLoc NamespaceDetails::__stdAlloc(int len) {
|
|
|
|
DiskLoc *prev;
|
|
|
|
DiskLoc *bestprev = 0;
|
|
|
|
DiskLoc bestmatch;
|
|
|
|
int bestmatchlen = 0x7fffffff;
|
|
|
|
int b = bucket(len);
|
|
|
|
DiskLoc cur = deletedList[b];
|
|
|
|
prev = &deletedList[b];
|
|
|
|
int extra = 5; // look for a better fit, a little.
|
|
|
|
int chain = 0;
|
|
|
|
while ( 1 ) {
|
|
|
|
{
|
|
|
|
int a = cur.a();
|
|
|
|
if ( a < -1 || a >= 100000 ) {
|
|
|
|
problem() << "~~ Assertion - cur out of range in _alloc() " << cur.toString() <<
|
|
|
|
" a:" << a << " b:" << b << " chain:" << chain << '\n';
|
|
|
|
sayDbContext();
|
|
|
|
if ( cur == *prev )
|
|
|
|
prev->Null();
|
|
|
|
cur.Null();
|
|
|
|
}
|
2008-12-29 02:28:49 +01:00
|
|
|
}
|
2009-01-15 16:17:11 +01:00
|
|
|
if ( cur.isNull() ) {
|
|
|
|
// move to next bucket. if we were doing "extra", just break
|
|
|
|
if ( bestmatchlen < 0x7fffffff )
|
|
|
|
break;
|
|
|
|
b++;
|
|
|
|
if ( b > MaxBucket ) {
|
|
|
|
// out of space. alloc a new extent.
|
|
|
|
return DiskLoc();
|
|
|
|
}
|
|
|
|
cur = deletedList[b];
|
|
|
|
prev = &deletedList[b];
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
DeletedRecord *r = cur.drec();
|
|
|
|
if ( r->lengthWithHeaders >= len &&
|
|
|
|
r->lengthWithHeaders < bestmatchlen ) {
|
|
|
|
bestmatchlen = r->lengthWithHeaders;
|
|
|
|
bestmatch = cur;
|
|
|
|
bestprev = prev;
|
|
|
|
}
|
|
|
|
if ( bestmatchlen < 0x7fffffff && --extra <= 0 )
|
2008-12-29 02:28:49 +01:00
|
|
|
break;
|
2009-01-15 16:17:11 +01:00
|
|
|
if ( ++chain > 30 && b < MaxBucket ) {
|
|
|
|
// too slow, force move to next bucket to grab a big chunk
|
|
|
|
//b++;
|
|
|
|
chain = 0;
|
|
|
|
cur.Null();
|
2008-12-29 02:28:49 +01:00
|
|
|
}
|
2009-01-15 16:17:11 +01:00
|
|
|
else {
|
2009-02-03 00:18:22 +01:00
|
|
|
/*this defensive check only made sense for the mmap storage engine:
|
|
|
|
if ( r->nextDeleted.getOfs() == 0 ) {
|
2009-01-15 16:17:11 +01:00
|
|
|
problem() << "~~ Assertion - bad nextDeleted " << r->nextDeleted.toString() <<
|
|
|
|
" b:" << b << " chain:" << chain << ", fixing.\n";
|
|
|
|
r->nextDeleted.Null();
|
2009-02-03 00:18:22 +01:00
|
|
|
}*/
|
2009-01-15 16:17:11 +01:00
|
|
|
cur = r->nextDeleted;
|
|
|
|
prev = &r->nextDeleted;
|
2008-12-29 02:28:49 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
/* unlink ourself from the deleted list */
|
|
|
|
{
|
|
|
|
DeletedRecord *bmr = bestmatch.drec();
|
|
|
|
*bestprev = bmr->nextDeleted;
|
|
|
|
bmr->nextDeleted.setInvalid(); // defensive.
|
|
|
|
assert(bmr->extentOfs < bestmatch.getOfs());
|
|
|
|
}
|
2008-12-29 02:28:49 +01:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
return bestmatch;
|
|
|
|
}
|
2008-08-28 02:33:47 +02:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
void NamespaceDetails::dumpDeleted(set<DiskLoc> *extents) {
|
|
|
|
for ( int i = 0; i < Buckets; i++ ) {
|
|
|
|
DiskLoc dl = deletedList[i];
|
|
|
|
while ( !dl.isNull() ) {
|
|
|
|
DeletedRecord *r = dl.drec();
|
|
|
|
DiskLoc extLoc(dl.a(), r->extentOfs);
|
|
|
|
if ( extents == 0 || extents->count(extLoc) <= 0 ) {
|
2009-01-15 17:26:38 +01:00
|
|
|
out() << " bucket " << i << endl;
|
|
|
|
out() << " " << dl.toString() << " ext:" << extLoc.toString();
|
2009-01-15 16:17:11 +01:00
|
|
|
if ( extents && extents->count(extLoc) <= 0 )
|
2009-01-15 17:26:38 +01:00
|
|
|
out() << '?';
|
|
|
|
out() << " len:" << r->lengthWithHeaders << endl;
|
2009-01-15 16:17:11 +01:00
|
|
|
}
|
|
|
|
dl = r->nextDeleted;
|
2008-12-29 02:28:49 +01:00
|
|
|
}
|
|
|
|
}
|
2009-01-15 16:17:11 +01:00
|
|
|
}
|
2008-08-28 02:33:47 +02:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
/* combine adjacent deleted records
|
2008-08-28 02:33:47 +02:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
this is O(n^2) but we call it for capped tables where typically n==1 or 2!
|
|
|
|
(or 3...there will be a little unused sliver at the end of the extent.)
|
|
|
|
*/
|
|
|
|
void NamespaceDetails::compact() {
|
|
|
|
assert(capped);
|
|
|
|
|
|
|
|
list<DiskLoc> drecs;
|
|
|
|
|
|
|
|
// Pull out capExtent's DRs from deletedList
|
|
|
|
DiskLoc i = firstDeletedInCapExtent();
|
|
|
|
for (; !i.isNull() && inCapExtent( i ); i = i.drec()->nextDeleted )
|
|
|
|
drecs.push_back( i );
|
|
|
|
firstDeletedInCapExtent() = i;
|
|
|
|
|
|
|
|
// This is the O(n^2) part.
|
|
|
|
drecs.sort();
|
|
|
|
|
|
|
|
list<DiskLoc>::iterator j = drecs.begin();
|
|
|
|
assert( j != drecs.end() );
|
|
|
|
DiskLoc a = *j;
|
|
|
|
while ( 1 ) {
|
2008-12-29 02:28:49 +01:00
|
|
|
j++;
|
|
|
|
if ( j == drecs.end() ) {
|
2009-01-15 17:26:38 +01:00
|
|
|
DEBUGGING out() << "TEMP: compact adddelrec\n";
|
2008-12-29 02:28:49 +01:00
|
|
|
addDeletedRec(a.drec(), a);
|
2009-01-15 16:17:11 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
DiskLoc b = *j;
|
|
|
|
while ( a.a() == b.a() && a.getOfs() + a.drec()->lengthWithHeaders == b.getOfs() ) {
|
|
|
|
// a & b are adjacent. merge.
|
|
|
|
a.drec()->lengthWithHeaders += b.drec()->lengthWithHeaders;
|
|
|
|
j++;
|
|
|
|
if ( j == drecs.end() ) {
|
2009-01-15 17:26:38 +01:00
|
|
|
DEBUGGING out() << "temp: compact adddelrec2\n";
|
2009-01-15 16:17:11 +01:00
|
|
|
addDeletedRec(a.drec(), a);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
b = *j;
|
2008-12-29 02:28:49 +01:00
|
|
|
}
|
2009-01-15 17:26:38 +01:00
|
|
|
DEBUGGING out() << "temp: compact adddelrec3\n";
|
2009-01-15 16:17:11 +01:00
|
|
|
addDeletedRec(a.drec(), a);
|
|
|
|
a = b;
|
2008-12-29 02:28:49 +01:00
|
|
|
}
|
|
|
|
}
|
2008-08-28 02:33:47 +02:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
DiskLoc NamespaceDetails::firstRecord( const DiskLoc &startExtent ) const {
|
|
|
|
for (DiskLoc i = startExtent.isNull() ? firstExtent : startExtent;
|
|
|
|
!i.isNull(); i = i.ext()->xnext ) {
|
|
|
|
if ( !i.ext()->firstRecord.isNull() )
|
|
|
|
return i.ext()->firstRecord;
|
2009-01-13 16:30:01 +01:00
|
|
|
}
|
2009-01-15 16:17:11 +01:00
|
|
|
return DiskLoc();
|
2009-01-13 16:30:01 +01:00
|
|
|
}
|
2009-01-15 16:17:11 +01:00
|
|
|
|
|
|
|
DiskLoc NamespaceDetails::lastRecord( const DiskLoc &startExtent ) const {
|
|
|
|
for (DiskLoc i = startExtent.isNull() ? lastExtent : startExtent;
|
|
|
|
!i.isNull(); i = i.ext()->xprev ) {
|
|
|
|
if ( !i.ext()->lastRecord.isNull() )
|
|
|
|
return i.ext()->lastRecord;
|
2009-01-13 16:30:01 +01:00
|
|
|
}
|
2009-01-15 16:17:11 +01:00
|
|
|
return DiskLoc();
|
2009-01-13 16:30:01 +01:00
|
|
|
}
|
2009-01-14 23:17:24 +01:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
DiskLoc &NamespaceDetails::firstDeletedInCapExtent() {
|
|
|
|
if ( deletedList[ 1 ].isNull() )
|
|
|
|
return deletedList[ 0 ];
|
2009-01-13 16:30:01 +01:00
|
|
|
else
|
2009-01-15 16:17:11 +01:00
|
|
|
return deletedList[ 1 ].drec()->nextDeleted;
|
2009-01-13 16:30:01 +01:00
|
|
|
}
|
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
bool NamespaceDetails::inCapExtent( const DiskLoc &dl ) const {
|
|
|
|
assert( !dl.isNull() );
|
|
|
|
// We could have a rec or drec, doesn't matter.
|
|
|
|
return dl.drec()->myExtent( dl ) == capExtent.ext();
|
|
|
|
}
|
2009-01-13 16:30:01 +01:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
bool NamespaceDetails::nextIsInCapExtent( const DiskLoc &dl ) const {
|
|
|
|
assert( !dl.isNull() );
|
|
|
|
DiskLoc next = dl.drec()->nextDeleted;
|
|
|
|
if ( next.isNull() )
|
|
|
|
return false;
|
|
|
|
return inCapExtent( next );
|
|
|
|
}
|
|
|
|
|
|
|
|
void NamespaceDetails::advanceCapExtent( const char *ns ) {
|
|
|
|
// We want deletedList[ 1 ] to be the last DeletedRecord of the prev cap extent
|
|
|
|
// (or DiskLoc() if new capExtent == firstExtent)
|
|
|
|
if ( capExtent == lastExtent )
|
|
|
|
deletedList[ 1 ] = DiskLoc();
|
|
|
|
else {
|
|
|
|
DiskLoc i = firstDeletedInCapExtent();
|
|
|
|
for (; !i.isNull() && nextIsInCapExtent( i ); i = i.drec()->nextDeleted );
|
|
|
|
deletedList[ 1 ] = i;
|
2009-01-13 16:30:01 +01:00
|
|
|
}
|
2009-01-14 23:17:24 +01:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
capExtent = theCapExtent()->xnext.isNull() ? firstExtent : theCapExtent()->xnext;
|
2009-10-19 21:30:16 +02:00
|
|
|
|
|
|
|
/* this isn't true if a collection has been renamed...that is ok just used for diagnostics */
|
|
|
|
//dassert( theCapExtent()->ns == ns );
|
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
theCapExtent()->assertOk();
|
|
|
|
capFirstNewRecord = DiskLoc();
|
2009-01-14 23:17:24 +01:00
|
|
|
}
|
2009-01-15 16:17:11 +01:00
|
|
|
|
|
|
|
int n_complaints_cap = 0;
|
|
|
|
void NamespaceDetails::maybeComplain( const char *ns, int len ) const {
|
|
|
|
if ( ++n_complaints_cap < 8 ) {
|
2009-01-15 17:26:38 +01:00
|
|
|
out() << "couldn't make room for new record (len: " << len << ") in capped ns " << ns << '\n';
|
2009-01-15 16:17:11 +01:00
|
|
|
int i = 0;
|
|
|
|
for ( DiskLoc e = firstExtent; !e.isNull(); e = e.ext()->xnext, ++i ) {
|
2009-01-15 17:26:38 +01:00
|
|
|
out() << " Extent " << i;
|
2009-01-15 16:17:11 +01:00
|
|
|
if ( e == capExtent )
|
2009-01-15 17:26:38 +01:00
|
|
|
out() << " (capExtent)";
|
|
|
|
out() << '\n';
|
2009-10-19 21:30:16 +02:00
|
|
|
out() << " magic: " << hex << e.ext()->magic << dec << " extent->ns: " << e.ext()->nsDiagnostic.buf << '\n';
|
2009-01-15 17:26:38 +01:00
|
|
|
out() << " fr: " << e.ext()->firstRecord.toString() <<
|
2009-01-15 16:17:11 +01:00
|
|
|
" lr: " << e.ext()->lastRecord.toString() << " extent->len: " << e.ext()->length << '\n';
|
|
|
|
}
|
|
|
|
assert( len * 5 > lastExtentSize ); // assume it is unusually large record; if not, something is broken
|
2008-12-29 02:28:49 +01:00
|
|
|
}
|
2009-01-15 16:17:11 +01:00
|
|
|
}
|
2008-12-29 02:28:49 +01:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
DiskLoc NamespaceDetails::__capAlloc( int len ) {
|
|
|
|
DiskLoc prev = deletedList[ 1 ];
|
|
|
|
DiskLoc i = firstDeletedInCapExtent();
|
|
|
|
DiskLoc ret;
|
|
|
|
for (; !i.isNull() && inCapExtent( i ); prev = i, i = i.drec()->nextDeleted ) {
|
|
|
|
// We need to keep at least one DR per extent in deletedList[ 0 ],
|
|
|
|
// so make sure there's space to create a DR at the end.
|
|
|
|
if ( i.drec()->lengthWithHeaders >= len + 24 ) {
|
|
|
|
ret = i;
|
|
|
|
break;
|
|
|
|
}
|
2009-01-13 16:30:01 +01:00
|
|
|
}
|
2009-01-14 23:17:24 +01:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
/* unlink ourself from the deleted list */
|
|
|
|
if ( !ret.isNull() ) {
|
|
|
|
if ( prev.isNull() )
|
|
|
|
deletedList[ 0 ] = ret.drec()->nextDeleted;
|
|
|
|
else
|
|
|
|
prev.drec()->nextDeleted = ret.drec()->nextDeleted;
|
|
|
|
ret.drec()->nextDeleted.setInvalid(); // defensive.
|
|
|
|
assert( ret.drec()->extentOfs < ret.getOfs() );
|
2009-01-13 16:30:01 +01:00
|
|
|
}
|
2009-01-14 23:17:24 +01:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void NamespaceDetails::checkMigrate() {
|
|
|
|
// migrate old NamespaceDetails format
|
|
|
|
if ( capped && capExtent.a() == 0 && capExtent.getOfs() == 0 ) {
|
|
|
|
capFirstNewRecord = DiskLoc();
|
|
|
|
capFirstNewRecord.setInvalid();
|
|
|
|
// put all the DeletedRecords in deletedList[ 0 ]
|
|
|
|
for ( int i = 1; i < Buckets; ++i ) {
|
|
|
|
DiskLoc first = deletedList[ i ];
|
|
|
|
if ( first.isNull() )
|
|
|
|
continue;
|
|
|
|
DiskLoc last = first;
|
|
|
|
for (; !last.drec()->nextDeleted.isNull(); last = last.drec()->nextDeleted );
|
|
|
|
last.drec()->nextDeleted = deletedList[ 0 ];
|
|
|
|
deletedList[ 0 ] = first;
|
|
|
|
deletedList[ i ] = DiskLoc();
|
2008-12-29 02:28:49 +01:00
|
|
|
}
|
2009-01-15 16:17:11 +01:00
|
|
|
// NOTE deletedList[ 1 ] set to DiskLoc() in above
|
2008-12-29 02:28:49 +01:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
// Last, in case we're killed before getting here
|
|
|
|
capExtent = firstExtent;
|
|
|
|
}
|
2008-12-29 02:28:49 +01:00
|
|
|
}
|
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
/* alloc with capped table handling. */
|
|
|
|
DiskLoc NamespaceDetails::_alloc(const char *ns, int len) {
|
|
|
|
if ( !capped )
|
|
|
|
return __stdAlloc(len);
|
|
|
|
|
|
|
|
// capped.
|
|
|
|
|
|
|
|
// signal done allocating new extents.
|
|
|
|
if ( !deletedList[ 1 ].isValid() )
|
|
|
|
deletedList[ 1 ] = DiskLoc();
|
|
|
|
|
|
|
|
assert( len < 400000000 );
|
|
|
|
int passes = 0;
|
|
|
|
DiskLoc loc;
|
|
|
|
|
|
|
|
// delete records until we have room and the max # objects limit achieved.
|
2009-10-19 21:30:16 +02:00
|
|
|
|
|
|
|
/* this fails on a rename -- that is ok but must keep commented out */
|
|
|
|
//assert( theCapExtent()->ns == ns );
|
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
theCapExtent()->assertOk();
|
|
|
|
DiskLoc firstEmptyExtent;
|
|
|
|
while ( 1 ) {
|
|
|
|
if ( nrecords < max ) {
|
|
|
|
loc = __capAlloc( len );
|
|
|
|
if ( !loc.isNull() )
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If on first iteration through extents, don't delete anything.
|
|
|
|
if ( !capFirstNewRecord.isValid() ) {
|
|
|
|
advanceCapExtent( ns );
|
|
|
|
if ( capExtent != firstExtent )
|
|
|
|
capFirstNewRecord.setInvalid();
|
|
|
|
// else signal done with first iteration through extents.
|
|
|
|
continue;
|
|
|
|
}
|
2009-01-13 16:30:01 +01:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
if ( !capFirstNewRecord.isNull() &&
|
|
|
|
theCapExtent()->firstRecord == capFirstNewRecord ) {
|
|
|
|
// We've deleted all records that were allocated on the previous
|
|
|
|
// iteration through this extent.
|
|
|
|
advanceCapExtent( ns );
|
|
|
|
continue;
|
|
|
|
}
|
2008-08-28 02:33:47 +02:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
if ( theCapExtent()->firstRecord.isNull() ) {
|
|
|
|
if ( firstEmptyExtent.isNull() )
|
|
|
|
firstEmptyExtent = capExtent;
|
|
|
|
advanceCapExtent( ns );
|
|
|
|
if ( firstEmptyExtent == capExtent ) {
|
|
|
|
maybeComplain( ns, len );
|
|
|
|
return DiskLoc();
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2008-08-28 02:33:47 +02:00
|
|
|
|
2009-03-06 17:08:30 +01:00
|
|
|
massert( "Capped collection full and delete not allowed", cappedMayDelete() );
|
2009-01-15 16:17:11 +01:00
|
|
|
DiskLoc fr = theCapExtent()->firstRecord;
|
|
|
|
theDataFileMgr.deleteRecord(ns, fr.rec(), fr, true);
|
|
|
|
compact();
|
2009-05-14 16:18:46 +02:00
|
|
|
if( ++passes >= 5000 ) {
|
|
|
|
log() << "passes ns:" << ns << " len:" << len << '\n';
|
|
|
|
log() << "passes max:" << max << " nrecords:" << nrecords << " datasize: " << datasize << endl;
|
|
|
|
massert( "passes >= 5000 in capped collection alloc", false );
|
|
|
|
}
|
2009-01-15 16:17:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Remember first record allocated on this iteration through capExtent.
|
|
|
|
if ( capFirstNewRecord.isValid() && capFirstNewRecord.isNull() )
|
|
|
|
capFirstNewRecord = loc;
|
|
|
|
|
|
|
|
return loc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* you MUST call when adding an index. see pdfile.cpp */
|
2009-10-21 22:00:40 +02:00
|
|
|
IndexDetails& NamespaceDetails::addIndex(const char *thisns) {
|
2009-01-15 16:17:11 +01:00
|
|
|
assert( nsdetails(thisns) == this );
|
2009-10-21 22:00:40 +02:00
|
|
|
|
|
|
|
if( nIndexes == NIndexesBase && extraOffset == 0 ) {
|
|
|
|
nsindex(thisns)->allocExtra(thisns);
|
|
|
|
}
|
|
|
|
|
|
|
|
IndexDetails& id = idx(nIndexes);
|
2009-01-15 16:17:11 +01:00
|
|
|
nIndexes++;
|
2009-12-10 00:13:36 +01:00
|
|
|
NamespaceDetailsTransient::get_w(thisns).addedIndex();
|
2009-10-21 22:00:40 +02:00
|
|
|
return id;
|
|
|
|
}
|
|
|
|
|
|
|
|
// must be called when renaming a NS to fix up extra
|
|
|
|
void NamespaceDetails::copyingFrom(const char *thisns, NamespaceDetails *src) {
|
|
|
|
if( extraOffset ) {
|
|
|
|
extraOffset = 0; // so allocExtra() doesn't assert.
|
|
|
|
Extra *e = nsindex(thisns)->allocExtra(thisns);
|
|
|
|
memcpy(e, src->extra(), sizeof(Extra));
|
|
|
|
}
|
2008-12-29 02:28:49 +01:00
|
|
|
}
|
2008-08-28 02:33:47 +02:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
/* returns index of the first index in which the field is present. -1 if not present.
|
|
|
|
(aug08 - this method not currently used)
|
|
|
|
*/
|
|
|
|
int NamespaceDetails::fieldIsIndexed(const char *fieldName) {
|
2009-10-21 22:00:40 +02:00
|
|
|
massert("not implemented", false);
|
|
|
|
/*
|
2009-01-15 16:17:11 +01:00
|
|
|
for ( int i = 0; i < nIndexes; i++ ) {
|
|
|
|
IndexDetails& idx = indexes[i];
|
|
|
|
BSONObj idxKey = idx.info.obj().getObjectField("key"); // e.g., { ts : -1 }
|
|
|
|
if ( !idxKey.findElement(fieldName).eoo() )
|
|
|
|
return i;
|
2009-10-21 22:00:40 +02:00
|
|
|
}*/
|
2009-01-15 16:17:11 +01:00
|
|
|
return -1;
|
|
|
|
}
|
2009-08-12 19:53:52 +02:00
|
|
|
|
|
|
|
long long NamespaceDetails::storageSize(){
|
|
|
|
Extent * e = firstExtent.ext();
|
|
|
|
assert( e );
|
|
|
|
|
|
|
|
long long total = 0;
|
|
|
|
while ( e ){
|
|
|
|
total += e->length;
|
|
|
|
e = e->getNextExtent();
|
|
|
|
}
|
|
|
|
return total;
|
|
|
|
}
|
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
/* ------------------------------------------------------------------------- */
|
2008-08-28 02:33:47 +02:00
|
|
|
|
2009-12-10 00:13:36 +01:00
|
|
|
boost::mutex NamespaceDetailsTransient::_qcMutex;
|
2009-12-09 22:36:31 +01:00
|
|
|
map< string, shared_ptr< NamespaceDetailsTransient > > NamespaceDetailsTransient::_map;
|
2009-03-03 21:27:22 +01:00
|
|
|
typedef map< string, shared_ptr< NamespaceDetailsTransient > >::iterator ouriter;
|
2008-08-28 02:33:47 +02:00
|
|
|
|
2009-02-26 17:08:23 +01:00
|
|
|
void NamespaceDetailsTransient::reset() {
|
2009-03-03 23:30:26 +01:00
|
|
|
clearQueryCache();
|
2009-12-09 22:36:31 +01:00
|
|
|
_keysComputed = false;
|
2009-02-26 17:08:23 +01:00
|
|
|
}
|
|
|
|
|
2009-12-09 22:36:31 +01:00
|
|
|
/* NamespaceDetailsTransient& NamespaceDetailsTransient::get(const char *ns) {
|
2009-03-04 19:54:56 +01:00
|
|
|
shared_ptr< NamespaceDetailsTransient > &t = map_[ ns ];
|
2009-03-03 21:27:22 +01:00
|
|
|
if ( t.get() == 0 )
|
|
|
|
t.reset( new NamespaceDetailsTransient(ns) );
|
2009-01-15 16:17:11 +01:00
|
|
|
return *t;
|
|
|
|
}
|
2009-12-09 22:36:31 +01:00
|
|
|
*/
|
|
|
|
void NamespaceDetailsTransient::clearForPrefix(const char *prefix) {
|
2009-12-10 00:13:36 +01:00
|
|
|
assertInWriteLock();
|
2009-03-03 21:27:22 +01:00
|
|
|
vector< string > found;
|
2009-12-09 22:36:31 +01:00
|
|
|
for( ouriter i = _map.begin(); i != _map.end(); ++i )
|
2009-03-03 21:32:51 +01:00
|
|
|
if ( strncmp( i->first.c_str(), prefix, strlen( prefix ) ) == 0 )
|
2009-03-03 21:27:22 +01:00
|
|
|
found.push_back( i->first );
|
|
|
|
for( vector< string >::iterator i = found.begin(); i != found.end(); ++i ) {
|
2009-12-09 22:36:31 +01:00
|
|
|
_map[ *i ].reset();
|
2009-03-03 21:27:22 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
void NamespaceDetailsTransient::computeIndexKeys() {
|
2009-12-09 22:36:31 +01:00
|
|
|
_keysComputed = true;
|
|
|
|
_indexKeys.clear();
|
2009-12-08 21:30:13 +01:00
|
|
|
NamespaceDetails *d = nsdetails(_ns.c_str());
|
2009-10-21 22:00:40 +02:00
|
|
|
NamespaceDetails::IndexIterator i = d->ii();
|
|
|
|
while( i.more() )
|
2009-12-09 22:36:31 +01:00
|
|
|
i.next().keyPattern().getFieldNames(_indexKeys);
|
2008-08-28 23:05:15 +02:00
|
|
|
}
|
2009-03-04 21:57:35 +01:00
|
|
|
|
2009-12-09 22:57:53 +01:00
|
|
|
void NamespaceDetailsTransient::cllStart( int logSizeMb ) {
|
2009-12-09 23:04:14 +01:00
|
|
|
assertInWriteLock();
|
2009-12-09 22:57:53 +01:00
|
|
|
_cll_ns = "local.temp.oplog." + _ns;
|
|
|
|
_cll_enabled = true;
|
2009-03-04 21:57:35 +01:00
|
|
|
stringstream spec;
|
|
|
|
// 128MB
|
2009-04-21 21:42:22 +02:00
|
|
|
spec << "{size:" << logSizeMb * 1024 * 1024 << ",capped:true,autoIndexId:false}";
|
2009-12-09 22:57:53 +01:00
|
|
|
setClient( _cll_ns.c_str() );
|
2009-03-04 21:57:35 +01:00
|
|
|
string err;
|
2009-12-09 22:57:53 +01:00
|
|
|
massert( "Could not create log ns", userCreateNS( _cll_ns.c_str(), fromjson( spec.str() ), err, false ) );
|
|
|
|
NamespaceDetails *d = nsdetails( _cll_ns.c_str() );
|
2009-03-06 17:08:30 +01:00
|
|
|
d->cappedDisallowDelete();
|
2009-03-04 21:57:35 +01:00
|
|
|
}
|
2009-03-06 16:45:35 +01:00
|
|
|
|
2009-12-09 22:57:53 +01:00
|
|
|
void NamespaceDetailsTransient::cllInvalidate() {
|
2009-12-09 23:04:14 +01:00
|
|
|
assertInWriteLock();
|
2009-12-09 22:57:53 +01:00
|
|
|
cllDrop();
|
|
|
|
_cll_enabled = false;
|
2009-03-06 16:45:35 +01:00
|
|
|
}
|
|
|
|
|
2009-12-09 22:57:53 +01:00
|
|
|
bool NamespaceDetailsTransient::cllValidateComplete() {
|
2009-12-09 23:04:14 +01:00
|
|
|
assertInWriteLock();
|
2009-12-09 22:57:53 +01:00
|
|
|
cllDrop();
|
|
|
|
bool ret = _cll_enabled;
|
|
|
|
_cll_enabled = false;
|
|
|
|
_cll_ns = "";
|
2009-03-06 16:45:35 +01:00
|
|
|
return ret;
|
|
|
|
}
|
2009-03-04 21:57:35 +01:00
|
|
|
|
2009-12-09 22:57:53 +01:00
|
|
|
void NamespaceDetailsTransient::cllDrop() {
|
2009-12-09 23:04:14 +01:00
|
|
|
assertInWriteLock();
|
2009-12-09 22:57:53 +01:00
|
|
|
if ( !_cll_enabled )
|
2009-03-06 16:45:35 +01:00
|
|
|
return;
|
2009-12-09 22:57:53 +01:00
|
|
|
setClient( _cll_ns.c_str() );
|
|
|
|
dropNS( _cll_ns );
|
2009-03-06 16:45:35 +01:00
|
|
|
}
|
2008-08-28 02:33:47 +02:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
/* ------------------------------------------------------------------------- */
|
2008-08-28 02:33:47 +02:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
/* add a new namespace to the system catalog (<dbname>.system.namespaces).
|
|
|
|
options: { capped : ..., size : ... }
|
|
|
|
*/
|
2009-03-19 21:23:04 +01:00
|
|
|
void addNewNamespaceToCatalog(const char *ns, const BSONObj *options = 0) {
|
2009-01-20 20:30:59 +01:00
|
|
|
log(1) << "New namespace: " << ns << '\n';
|
2009-01-15 16:17:11 +01:00
|
|
|
if ( strstr(ns, "system.namespaces") ) {
|
|
|
|
// system.namespaces holds all the others, so it is not explicitly listed in the catalog.
|
|
|
|
// TODO: fix above should not be strstr!
|
|
|
|
return;
|
|
|
|
}
|
2008-12-29 02:28:49 +01:00
|
|
|
|
2009-01-15 16:17:11 +01:00
|
|
|
{
|
|
|
|
BSONObjBuilder b;
|
|
|
|
b.append("name", ns);
|
|
|
|
if ( options )
|
|
|
|
b.append("options", *options);
|
|
|
|
BSONObj j = b.done();
|
|
|
|
char database[256];
|
|
|
|
nsToClient(ns, database);
|
|
|
|
string s = database;
|
|
|
|
s += ".system.namespaces";
|
|
|
|
theDataFileMgr.insert(s.c_str(), j.objdata(), j.objsize(), true);
|
|
|
|
}
|
2008-12-29 02:28:49 +01:00
|
|
|
}
|
2008-08-28 02:33:47 +02:00
|
|
|
|
2009-08-19 20:46:27 +02:00
|
|
|
void renameNamespace( const char *from, const char *to ) {
|
|
|
|
NamespaceIndex *ni = nsindex( from );
|
|
|
|
assert( ni && ni->details( from ) && !ni->details( to ) );
|
|
|
|
|
|
|
|
// Our namespace and index details will move to a different
|
|
|
|
// memory location. The only references to namespace and
|
|
|
|
// index details across commands are in cursors and nsd
|
|
|
|
// transient (including query cache) so clear these.
|
|
|
|
ClientCursor::invalidate( from );
|
2009-12-09 22:36:31 +01:00
|
|
|
NamespaceDetailsTransient::clearForPrefix( from );
|
2009-10-21 22:00:40 +02:00
|
|
|
|
2009-08-19 20:46:27 +02:00
|
|
|
NamespaceDetails *details = ni->details( from );
|
2009-10-19 22:53:58 +02:00
|
|
|
ni->add_ns( to, *details );
|
2009-10-21 22:00:40 +02:00
|
|
|
NamespaceDetails *todetails = ni->details( to );
|
|
|
|
try {
|
|
|
|
todetails->copyingFrom(to, details); // fixes extraOffset
|
|
|
|
}
|
|
|
|
catch( DBException& ) {
|
|
|
|
// could end up here if .ns is full - if so try to clean up / roll back a little
|
|
|
|
ni->kill_ns(to);
|
|
|
|
throw;
|
|
|
|
}
|
2009-10-19 22:53:58 +02:00
|
|
|
ni->kill_ns( from );
|
2009-10-21 22:00:40 +02:00
|
|
|
details = todetails;
|
2009-08-19 20:46:27 +02:00
|
|
|
|
|
|
|
BSONObj oldSpec;
|
|
|
|
char database[MaxClientLen];
|
|
|
|
nsToClient(from, database);
|
|
|
|
string s = database;
|
|
|
|
s += ".system.namespaces";
|
|
|
|
assert( Helpers::findOne( s.c_str(), BSON( "name" << from ), oldSpec ) );
|
|
|
|
|
|
|
|
BSONObjBuilder newSpecB;
|
|
|
|
BSONObjIterator i( oldSpec.getObjectField( "options" ) );
|
|
|
|
while( i.more() ) {
|
|
|
|
BSONElement e = i.next();
|
|
|
|
if ( strcmp( e.fieldName(), "create" ) != 0 )
|
|
|
|
newSpecB.append( e );
|
|
|
|
else
|
|
|
|
newSpecB << "create" << to;
|
|
|
|
}
|
|
|
|
BSONObj newSpec = newSpecB.done();
|
|
|
|
addNewNamespaceToCatalog( to, newSpec.isEmpty() ? 0 : &newSpec );
|
|
|
|
|
|
|
|
deleteObjects( s.c_str(), BSON( "name" << from ), false, false, true );
|
|
|
|
// oldSpec variable no longer valid memory
|
|
|
|
|
|
|
|
BSONObj oldIndexSpec;
|
|
|
|
s = database;
|
|
|
|
s += ".system.indexes";
|
|
|
|
while( Helpers::findOne( s.c_str(), BSON( "ns" << from ), oldIndexSpec ) ) {
|
|
|
|
BSONObjBuilder newIndexSpecB;
|
|
|
|
BSONObjIterator i( oldIndexSpec );
|
|
|
|
while( i.more() ) {
|
|
|
|
BSONElement e = i.next();
|
|
|
|
if ( strcmp( e.fieldName(), "ns" ) != 0 )
|
|
|
|
newIndexSpecB.append( e );
|
|
|
|
else
|
|
|
|
newIndexSpecB << "ns" << to;
|
|
|
|
}
|
|
|
|
BSONObj newIndexSpec = newIndexSpecB.done();
|
|
|
|
DiskLoc newIndexSpecLoc = theDataFileMgr.insert( s.c_str(), newIndexSpec.objdata(), newIndexSpec.objsize(), true, BSONElement(), false );
|
|
|
|
int indexI = details->findIndexByName( oldIndexSpec.getStringField( "name" ) );
|
2009-10-21 22:00:40 +02:00
|
|
|
IndexDetails &indexDetails = details->idx(indexI);
|
2009-08-19 20:46:27 +02:00
|
|
|
string oldIndexNs = indexDetails.indexNamespace();
|
|
|
|
indexDetails.info = newIndexSpecLoc;
|
|
|
|
string newIndexNs = indexDetails.indexNamespace();
|
|
|
|
|
|
|
|
BtreeBucket::renameIndexNamespace( oldIndexNs.c_str(), newIndexNs.c_str() );
|
|
|
|
deleteObjects( s.c_str(), oldIndexSpec.getOwned(), true, false, true );
|
|
|
|
}
|
|
|
|
}
|
2009-09-29 22:54:31 +02:00
|
|
|
|
|
|
|
bool legalClientSystemNS( const string& ns , bool write ){
|
|
|
|
if ( ns.find( ".system.users" ) != string::npos )
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if ( ns.find( ".system.js" ) != string::npos ){
|
|
|
|
if ( write )
|
|
|
|
Scope::storedFuncMod();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2009-08-19 20:46:27 +02:00
|
|
|
|
2009-01-14 23:09:51 +01:00
|
|
|
} // namespace mongo
|