0
0
mirror of https://github.com/mongodb/mongo.git synced 2024-11-28 07:59:02 +01:00

SERVER-15176 remove CollectionMetadataPtr from ClientCursor

This was used for filtering orphaned chunks. However, it no longer needs to be stashed
in the ClientCursor, as it now lives in the ShardFilterStage.
This commit is contained in:
David Storch 2015-03-20 15:43:42 -04:00
parent 565f050f41
commit 93254f6576
3 changed files with 6 additions and 28 deletions

View File

@ -131,13 +131,6 @@ namespace mongo {
_leftoverMaxTimeMicros = leftoverMaxTimeMicros;
}
//
// Sharding-specific data. TODO: Document.
//
void setCollMetadata( CollectionMetadataPtr metadata ){ _collMetadata = metadata; }
CollectionMetadataPtr getCollMetadata(){ return _collMetadata; }
//
// Replication-related stuff. TODO: Document and clean.
//

View File

@ -140,8 +140,6 @@ namespace mongo {
}
else {
size_t options = QueryPlannerParams::DEFAULT;
// TODO: The version attached to the TLS cannot be relied upon, the shard
// version should be passed as part of the command parameter.
if (shardingState.needCollectionMetadata(cq->getParsed().ns())) {
options |= QueryPlannerParams::INCLUDE_SHARD_FILTER;
}
@ -175,6 +173,10 @@ namespace mongo {
* 5) Generate the first batch.
* 6) Save state for getMore.
* 7) Generate response to send to the client.
*
* TODO: Rather than using the sharding version available in thread-local storage
* (i.e. call to shardingState.needCollectionMetadata() below), shard version
* information should be passed as part of the command parameter.
*/
virtual bool run(OperationContext* txn,
const std::string& dbname,
@ -231,12 +233,12 @@ namespace mongo {
{
PlanExecutor* rawExec;
size_t options = QueryPlannerParams::DEFAULT;
// TODO: The version attached to the TLS cannot be relied upon, the shard
// version should be passed as part of the command parameter.
if (shardingState.needCollectionMetadata(cq->getParsed().ns())) {
options |= QueryPlannerParams::INCLUDE_SHARD_FILTER;
}
// TODO (SERVER-17284): This can yield before creating a ClientCursor, which means
// that we have to throw an error if the shard version changes during yield.
Status execStatus = getExecutor(txn,
collection,
cq.release(),
@ -318,7 +320,6 @@ namespace mongo {
// State will be restored on getMore.
exec->saveState();
// TODO: Do we also need to set collection metadata here?
cursor->setLeftoverMaxTimeMicros(txn->getCurOp()->getRemainingMaxTimeMicros());
cursor->setPos(numResults);

View File

@ -398,8 +398,6 @@ namespace mongo {
ctx.reset(); // unlocks
}
CollectionMetadataPtr collMetadata = cc->getCollMetadata();
// If we're replaying the oplog, we save the last time that we read.
OpTime slaveReadTill;
@ -803,18 +801,6 @@ namespace mongo {
slaveOK);
uassertStatusOK(status);
// If this exists, the collection is sharded.
// If it doesn't exist, we can assume we're not sharded.
// If we're sharded, we might encounter data that is not consistent with our sharding state.
// We must ignore this data.
CollectionMetadataPtr collMetadata;
if (!shardingState.needCollectionMetadata(nss.ns())) {
collMetadata = CollectionMetadataPtr();
}
else {
collMetadata = shardingState.getCollectionMetadata(nss.ns());
}
// Run the query.
// bb is used to hold query results
// this buffer should contain either requested documents per query or
@ -931,8 +917,6 @@ namespace mongo {
curop.debug().exhaust = true;
}
// Set attributes for getMore.
cc->setCollMetadata(collMetadata);
cc->setPos(numResults);
// If the query had a time limit, remaining time is "rolled over" to the cursor (for