0
0
mirror of https://github.com/mongodb/mongo.git synced 2024-11-30 17:10:48 +01:00
mongodb/client/gridfs.cpp

234 lines
6.9 KiB
C++
Raw Normal View History

2009-02-02 23:19:55 +01:00
// gridfs.cpp
/* Copyright 2009 10gen
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
2009-02-10 16:48:41 +01:00
#include "../stdafx.h"
2009-02-02 23:19:55 +01:00
#include <fcntl.h>
#include <utility>
2009-02-02 23:19:55 +01:00
#include "gridfs.h"
2009-10-16 19:46:29 +02:00
#include <boost/smart_ptr.hpp>
2009-02-02 23:19:55 +01:00
2009-02-10 16:48:41 +01:00
#if defined(_WIN32)
#include <io.h>
#endif
2009-02-02 23:19:55 +01:00
#ifndef MIN
#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
#endif
namespace mongo {
2009-02-09 21:38:26 +01:00
const unsigned DEFAULT_CHUNK_SIZE = 256 * 1024;
2009-02-02 23:19:55 +01:00
Chunk::Chunk( BSONObj o ){
_data = o;
}
Chunk::Chunk( BSONObj fileObject , int chunkNumber , const char * data , int len ){
2009-02-02 23:19:55 +01:00
BSONObjBuilder b;
b.appendAs( fileObject["_id"] , "files_id" );
2009-02-09 21:38:26 +01:00
b.append( "n" , chunkNumber );
2009-02-02 23:19:55 +01:00
b.appendBinDataArray( "data" , data , len );
2009-02-09 19:04:32 +01:00
_data = b.obj();
2009-02-02 23:19:55 +01:00
}
GridFS::GridFS( DBClientBase& client , const string& dbName , const string& prefix ) : _client( client ) , _dbName( dbName ) , _prefix( prefix ){
2009-02-02 23:19:55 +01:00
_filesNS = dbName + "." + prefix + ".files";
_chunksNS = dbName + "." + prefix + ".chunks";
client.ensureIndex( _filesNS , BSON( "filename" << 1 ) );
client.ensureIndex( _chunksNS , BSON( "files_id" << 1 << "n" << 1 ) );
}
GridFS::~GridFS(){
2009-08-06 20:12:32 +02:00
2009-02-02 23:19:55 +01:00
}
2009-08-06 20:12:32 +02:00
BSONObj GridFS::storeFile( const char* data , size_t length , const string& remoteName , const string& contentType){
massert( 10279 , "large files not yet implemented", length <= 0xffffffff);
char const * const end = data + length;
OID id;
id.init();
BSONObj idObj = BSON("_id" << id);
int chunkNumber = 0;
while (data < end){
2009-11-30 16:35:54 +01:00
int chunkLen = MIN(DEFAULT_CHUNK_SIZE, (unsigned)(end-data));
Chunk c(idObj, chunkNumber, data, chunkLen);
_client.insert( _chunksNS.c_str() , c._data );
chunkNumber++;
data += chunkLen;
}
return insertFile(remoteName, id, length, contentType);
}
BSONObj GridFS::storeFile( const string& fileName , const string& remoteName , const string& contentType){
uassert( 10012 , "file doesn't exist" , fileName == "-" || boost::filesystem::exists( fileName ) );
2009-08-06 20:12:32 +02:00
FILE* fd;
if (fileName == "-")
fd = stdin;
else
fd = fopen( fileName.c_str() , "rb" );
uassert( 10013 , "error opening file", fd);
2009-08-06 20:12:32 +02:00
OID id;
id.init();
BSONObj idObj = BSON("_id" << id);
2009-08-06 20:12:32 +02:00
2009-02-02 23:19:55 +01:00
int chunkNumber = 0;
gridfs_offset length = 0;
while (!feof(fd)){
2009-10-16 19:46:29 +02:00
boost::scoped_array<char>buf (new char[DEFAULT_CHUNK_SIZE]);
char* bufPos = buf.get();
unsigned int chunkLen = 0; // how much in the chunk now
while(chunkLen != DEFAULT_CHUNK_SIZE && !feof(fd)){
int readLen = fread(bufPos, 1, DEFAULT_CHUNK_SIZE - chunkLen, fd);
chunkLen += readLen;
bufPos += readLen;
assert(chunkLen <= DEFAULT_CHUNK_SIZE);
}
2009-10-16 19:46:29 +02:00
Chunk c(idObj, chunkNumber, buf.get(), chunkLen);
2009-02-02 23:19:55 +01:00
_client.insert( _chunksNS.c_str() , c._data );
2009-08-06 20:12:32 +02:00
length += chunkLen;
2009-02-02 23:19:55 +01:00
chunkNumber++;
}
2009-08-06 20:12:32 +02:00
if (fd != stdin)
fclose( fd );
massert( 10280 , "large files not yet implemented", length <= 0xffffffff);
2009-02-02 23:19:55 +01:00
return insertFile((remoteName.empty() ? fileName : remoteName), id, length, contentType);
}
BSONObj GridFS::insertFile(const string& name, const OID& id, unsigned length, const string& contentType){
BSONObj res;
2009-02-18 19:11:22 +01:00
if ( ! _client.runCommand( _dbName.c_str() , BSON( "filemd5" << id << "root" << _prefix ) , res ) )
throw UserException( "filemd5 failed" );
2009-08-06 20:12:32 +02:00
BSONObjBuilder file;
file << "_id" << id
<< "filename" << name
<< "length" << (unsigned) length
<< "chunkSize" << DEFAULT_CHUNK_SIZE
<< "uploadDate" << DATENOW
<< "md5" << res["md5"]
;
if (!contentType.empty())
file << "contentType" << contentType;
2009-02-02 23:19:55 +01:00
BSONObj ret = file.obj();
_client.insert(_filesNS.c_str(), ret);
return ret;
2009-02-02 23:19:55 +01:00
}
void GridFS::removeFile( const string& fileName ){
auto_ptr<DBClientCursor> files = _client.query( _filesNS , BSON( "filename" << fileName ) );
while (files->more()){
BSONObj file = files->next();
BSONElement id = file["_id"];
_client.remove( _filesNS.c_str() , BSON( "_id" << id ) );
_client.remove( _chunksNS.c_str() , BSON( "files_id" << id ) );
2009-08-06 20:12:32 +02:00
}
}
2009-02-02 23:19:55 +01:00
GridFile::GridFile( GridFS * grid , BSONObj obj ){
_grid = grid;
_obj = obj;
}
2009-08-06 20:12:32 +02:00
GridFile GridFS::findFile( const string& fileName ){
return findFile( BSON( "filename" << fileName ) );
2009-02-02 23:19:55 +01:00
};
GridFile GridFS::findFile( BSONObj query ){
query = BSON("query" << query << "orderby" << BSON("uploadDate" << -1));
2009-02-02 23:19:55 +01:00
return GridFile( this , _client.findOne( _filesNS.c_str() , query ) );
}
auto_ptr<DBClientCursor> GridFS::list(){
2009-03-19 21:23:04 +01:00
return _client.query( _filesNS.c_str() , BSONObj() );
2009-02-02 23:19:55 +01:00
}
2009-02-03 04:16:39 +01:00
auto_ptr<DBClientCursor> GridFS::list( BSONObj o ){
return _client.query( _filesNS.c_str() , o );
}
BSONObj GridFile::getMetadata(){
BSONElement meta_element = _obj["metadata"];
if( meta_element.eoo() ){
return BSONObj();
}
return meta_element.embeddedObject();
}
2009-02-02 23:19:55 +01:00
Chunk GridFile::getChunk( int n ){
_exists();
BSONObjBuilder b;
b.appendAs( _obj["_id"] , "files_id" );
2009-02-09 21:38:26 +01:00
b.append( "n" , n );
2009-02-02 23:19:55 +01:00
2009-02-09 19:04:32 +01:00
BSONObj o = _grid->_client.findOne( _grid->_chunksNS.c_str() , b.obj() );
uassert( 10014 , "chunk is empty!" , ! o.isEmpty() );
2009-02-02 23:19:55 +01:00
return Chunk(o);
}
gridfs_offset GridFile::write( ostream & out ){
_exists();
2009-08-06 20:12:32 +02:00
2009-02-02 23:19:55 +01:00
const int num = getNumChunks();
2009-08-06 20:12:32 +02:00
2009-02-02 23:19:55 +01:00
for ( int i=0; i<num; i++ ){
Chunk c = getChunk( i );
int len;
const char * data = c.data( len );
out.write( data , len );
}
return getContentLength();
}
2009-08-06 20:12:32 +02:00
gridfs_offset GridFile::write( const string& where ){
if (where == "-"){
return write( cout );
} else {
ofstream out(where.c_str() , ios::out | ios::binary );
return write( out );
}
2009-02-02 23:19:55 +01:00
}
void GridFile::_exists(){
uassert( 10015 , "doesn't exists" , exists() );
2009-02-02 23:19:55 +01:00
}
2009-08-06 20:12:32 +02:00
2009-02-02 23:19:55 +01:00
}