Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(95)

Unified Diff: content/browser/indexed_db/indexed_db_backing_store.cc

Issue 18023022: Blob support for IDB [Chromium] (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Use ScopedVector and stl_utils for BlobDataHandles. Created 7 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: content/browser/indexed_db/indexed_db_backing_store.cc
diff --git a/content/browser/indexed_db/indexed_db_backing_store.cc b/content/browser/indexed_db/indexed_db_backing_store.cc
index ae40d22846ece0d25f9a18e89f9eb9aaa39390cf..821148c9cde11f6ecdb85f3881658fb061ec92b9 100644
--- a/content/browser/indexed_db/indexed_db_backing_store.cc
+++ b/content/browser/indexed_db/indexed_db_backing_store.cc
@@ -9,10 +9,14 @@
#include "base/metrics/histogram.h"
#include "base/strings/string_piece.h"
#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
+#include "content/browser/child_process_security_policy_impl.h"
+#include "content/browser/indexed_db/indexed_db_blob_info.h"
#include "content/browser/indexed_db/indexed_db_leveldb_coding.h"
#include "content/browser/indexed_db/indexed_db_metadata.h"
#include "content/browser/indexed_db/indexed_db_tracing.h"
+#include "content/browser/indexed_db/indexed_db_value.h"
#include "content/browser/indexed_db/leveldb/leveldb_comparator.h"
#include "content/browser/indexed_db/leveldb/leveldb_database.h"
#include "content/browser/indexed_db/leveldb/leveldb_iterator.h"
@@ -20,17 +24,46 @@
#include "content/common/indexed_db/indexed_db_key.h"
#include "content/common/indexed_db/indexed_db_key_path.h"
#include "content/common/indexed_db/indexed_db_key_range.h"
+#include "content/public/browser/browser_thread.h"
+#include "net/url_request/url_request_context.h"
#include "third_party/WebKit/public/platform/WebIDBTypes.h"
#include "third_party/WebKit/public/web/WebSerializedScriptValueVersion.h"
#include "third_party/leveldatabase/env_chromium.h"
+#include "webkit/browser/blob/blob_data_handle.h"
+#include "webkit/browser/fileapi/file_stream_writer.h"
+#include "webkit/browser/fileapi/file_writer_delegate.h"
+#include "webkit/browser/fileapi/local_file_stream_writer.h"
#include "webkit/common/database/database_identifier.h"
+using base::FilePath;
using base::StringPiece;
+using fileapi::FileWriterDelegate;
namespace content {
namespace {
+FilePath GetIDBBlobDirectoryName(const FilePath& pathBase,
jsbell 2013/12/18 23:04:40 Suggestion: Drop the 'IDB' in function names here
ericu 2013/12/19 05:19:11 Done.
+ int64 database_id) {
jsbell 2013/12/18 23:04:40 Indentation... although I'm going to assume you'll
ericu 2013/12/19 05:19:11 Just tried git cl format for the first time. Awes
+ return pathBase.AppendASCII(base::StringPrintf("%lx", database_id));
+}
+
+FilePath GetIDBBlobDirectoryNameForKey(const FilePath& pathBase,
+ int64 database_id, int64 key) {
+ FilePath path = GetIDBBlobDirectoryName(pathBase, database_id);
+ path = path.AppendASCII(
+ base::StringPrintf("%x", static_cast<int>(key & 0x0000ff00) >> 8));
jsbell 2013/12/18 23:04:40 Would there be any disadvantage to using %02x so t
ericu 2013/12/19 05:19:11 Well, the first few will be bigger than they need
ericu 2014/02/25 21:19:44 Note that I haven't done this for the filenames or
+ return path;
+}
+
+// This assumes a file path of dbId/3rd-byte-of-counter/counter.
+bool MakeIDBBlobDirectory(
+ const FilePath& pathBase, int64 database_id, int64 key) {
+ FilePath path =
+ GetIDBBlobDirectoryNameForKey(pathBase, database_id, key);
+ return file_util::CreateDirectory(path);
+}
+
static std::string ComputeOriginIdentifier(const GURL& origin_url) {
return webkit_database::GetIdentifierFromOrigin(origin_url) + "@1";
}
@@ -41,6 +74,12 @@ static base::FilePath ComputeFileName(const GURL& origin_url) {
.AddExtension(FILE_PATH_LITERAL(".indexeddb.leveldb"));
}
+static base::FilePath ComputeBlobPath(const GURL& origin_url) {
+ return base::FilePath()
+ .AppendASCII(webkit_database::GetIdentifierFromOrigin(origin_url))
+ .AddExtension(FILE_PATH_LITERAL(".indexeddb.blob"));
+}
+
} // namespace
static const int64 kKeyGeneratorInitialNumber =
@@ -68,6 +107,9 @@ enum IndexedDBBackingStoreErrorSource {
CREATE_IDBDATABASE_METADATA,
DELETE_DATABASE,
TRANSACTION_COMMIT_METHOD, // TRANSACTION_COMMIT is a WinNT.h macro
+ GET_BLOB_KEY_GENERATOR_CURRENT_NUMBER,
jsbell 2013/12/18 23:04:40 Need to append (after GET_DATABASE_NAMES) to retai
ericu 2013/12/19 05:19:11 Done.
+ GET_BLOB_INFO_FOR_RECORD,
+ DECODE_BLOB_JOURNAL,
GET_DATABASE_NAMES,
INTERNAL_ERROR_MAX,
};
@@ -242,15 +284,14 @@ WARN_UNUSED_RESULT static bool IsSchemaKnown(LevelDBDatabase* db, bool* known) {
return true;
}
-WARN_UNUSED_RESULT static bool SetUpMetadata(
- LevelDBDatabase* db,
- const std::string& origin_identifier) {
+WARN_UNUSED_RESULT bool IndexedDBBackingStore::SetUpMetadata() {
jsbell 2013/12/18 23:04:40 Land separately?
ericu 2013/12/19 05:19:11 Sure; that's just there to help the unit test, so
const uint32 latest_known_data_version =
blink::kSerializedScriptValueVersion;
const std::string schema_version_key = SchemaVersionKey::Encode();
const std::string data_version_key = DataVersionKey::Encode();
- scoped_refptr<LevelDBTransaction> transaction = new LevelDBTransaction(db);
+ scoped_refptr<LevelDBTransaction> transaction =
+ new LevelDBTransaction(db_.get());
int64 db_schema_version = 0;
int64 db_data_version = 0;
@@ -274,10 +315,10 @@ WARN_UNUSED_RESULT static bool SetUpMetadata(
db_schema_version = 1;
PutInt(transaction.get(), schema_version_key, db_schema_version);
const std::string start_key =
- DatabaseNameKey::EncodeMinKeyForOrigin(origin_identifier);
+ DatabaseNameKey::EncodeMinKeyForOrigin(origin_identifier_);
const std::string stop_key =
- DatabaseNameKey::EncodeStopKeyForOrigin(origin_identifier);
- scoped_ptr<LevelDBIterator> it = db->CreateIterator();
+ DatabaseNameKey::EncodeStopKeyForOrigin(origin_identifier_);
+ scoped_ptr<LevelDBIterator> it = db_->CreateIterator();
for (it->Seek(start_key);
it->IsValid() && CompareKeys(it->Key(), stop_key) < 0;
it->Next()) {
@@ -369,21 +410,281 @@ class DefaultLevelDBFactory : public LevelDBFactory {
bool* is_disk_full) OVERRIDE {
return LevelDBDatabase::Open(file_name, comparator, db, is_disk_full);
}
- virtual bool DestroyLevelDB(const base::FilePath& file_name) OVERRIDE {
+ virtual bool DestroyLevelDB(const FilePath& file_name) OVERRIDE {
return LevelDBDatabase::Destroy(file_name);
}
};
+static bool GetBlobKeyGeneratorCurrentNumber(
+ LevelDBTransaction* leveldb_transaction, int64 database_id,
+ int64& blob_key_generator_current_number) {
jsbell 2013/12/18 23:04:40 Non-const ref; use pointer instead.
ericu 2013/12/19 05:19:11 Done.
+ const std::string key_gen_key =
+ DatabaseMetaDataKey::Encode(
+ database_id, DatabaseMetaDataKey::BLOB_KEY_GENERATOR_CURRENT_NUMBER);
+
+ // Default to initial number if not found.
+ int64 cur_number = DatabaseMetaDataKey::kBlobKeyGeneratorInitialNumber;
+ std::string data;
+
+ bool found = false;
+ bool ok = leveldb_transaction->Get(key_gen_key, &data, &found);
+ if (!ok) {
+ INTERNAL_READ_ERROR(GET_BLOB_KEY_GENERATOR_CURRENT_NUMBER);
+ return false;
+ }
+ if (found) {
+ StringPiece slice(data);
+ if (!DecodeVarInt(&slice, &cur_number) ||
jsbell 2013/12/18 23:04:40 Also test to see if the slice was completely consu
ericu 2013/12/19 05:19:11 Done. In GetDatabaseNames, CheckObjectStoreAndMet
jsbell 2013/12/20 00:59:28 Yes, yes, yes (although GetObjectStores is just te
ericu 2014/02/20 00:50:29 There's a weird mix of DCHECK, continue, and break
+ !DatabaseMetaDataKey::IsValidBlobKey(cur_number)) {
+ INTERNAL_READ_ERROR(GET_BLOB_KEY_GENERATOR_CURRENT_NUMBER);
+ return false;
+ }
+ }
+ blob_key_generator_current_number = cur_number;
+ return true;
+}
+
+static bool UpdateBlobKeyGeneratorCurrentNumber(
+ LevelDBTransaction* leveldb_transaction, int64 database_id,
+ int64 blob_key_generator_current_number) {
+#ifndef NDEBUG
+ int64 old_number;
+ if (!GetBlobKeyGeneratorCurrentNumber(leveldb_transaction, database_id,
+ old_number))
+ return false;
+ DCHECK_LT(old_number, blob_key_generator_current_number);
+#endif
+ DCHECK(DatabaseMetaDataKey::IsValidBlobKey(
+ blob_key_generator_current_number));
+ const std::string key =
+ DatabaseMetaDataKey::Encode(
+ database_id, DatabaseMetaDataKey::BLOB_KEY_GENERATOR_CURRENT_NUMBER);
+
+ PutInt(leveldb_transaction, key, blob_key_generator_current_number);
+ return true;
+}
+
+static bool DecodeBlobJournal(const std::string& data,
+ IndexedDBBackingStore::Transaction::BlobJournalType& journal) {
jsbell 2013/12/18 23:04:40 non-const ref; should be a pointer
ericu 2013/12/19 05:19:11 Done.
+ // TODO(ericu): Yell something on errors. If we persistently can't read the
+ // blob journal, the safe thing to do is to clear it and leak the blobs,
+ // though that may be costly. Still, database/directory deletion should always
+ // clean things up, and we can write an fsck that will do a full correction if
+ // need be.
+ IndexedDBBackingStore::Transaction::BlobJournalType output;
+ StringPiece slice(data);
+ while (!slice.empty()) {
+ int64 database_id = -1;
+ int64 blob_key = -1;
+ if (!DecodeVarInt(&slice, &database_id))
+ return false;
+ else if (!KeyPrefix::IsValidDatabaseId(database_id))
jsbell 2013/12/18 23:04:40 No need for 'else' here
ericu 2013/12/19 05:19:11 Done.
+ return false;
+ if (!DecodeVarInt(&slice, &blob_key)) {
+ return false;
+ } else if (!DatabaseMetaDataKey::IsValidBlobKey(blob_key) &&
+ (blob_key != DatabaseMetaDataKey::kAllBlobsKey)) {
+ return false;
+ }
+ output.push_back(std::make_pair(database_id, blob_key));
+ }
+ journal.swap(output);
+ return true;
+}
+
+static bool GetBlobJournalHelper(
+ bool ok, bool found,
+ const std::string& data,
+ IndexedDBBackingStore::Transaction::BlobJournalType& journal) {
jsbell 2013/12/18 23:04:40 non-const ref
ericu 2013/12/19 05:19:11 Done.
+ if (!ok) {
+ INTERNAL_READ_ERROR(KEY_EXISTS_IN_OBJECT_STORE);
+ return false;
+ }
+ journal.clear();
+ if (!found)
+ return true;
+ if (!data.size())
+ return true;
+ if (!DecodeBlobJournal(data, journal)) {
+ INTERNAL_READ_ERROR(DECODE_BLOB_JOURNAL);
+ return false;
+ }
+ return true;
+}
+
+static bool GetBlobJournal(
+ const StringPiece& leveldb_key,
+ LevelDBTransaction* leveldb_transaction,
+ IndexedDBBackingStore::Transaction::BlobJournalType& journal) {
jsbell 2013/12/18 23:04:40 non-const ref
ericu 2013/12/19 05:19:11 Done.
+ std::string data;
+ bool found = false;
+ bool ok = leveldb_transaction->Get(leveldb_key, &data, &found);
+ return GetBlobJournalHelper(ok, found, data, journal);
+}
+
+static bool GetBlobJournal(
jsbell 2013/12/18 23:04:40 Can GetBlobJournal be a template function? Looks l
ericu 2013/12/19 05:19:11 Done.
+ const StringPiece& leveldb_key,
+ LevelDBUncachedTransaction* leveldb_transaction,
+ IndexedDBBackingStore::Transaction::BlobJournalType& journal) {
+ std::string data;
+ bool found = false;
+ bool ok = leveldb_transaction->Get(leveldb_key, &data, &found);
+ return GetBlobJournalHelper(ok, found, data, journal);
+}
+
+static std::string EncodeBlobJournalWithBlobList(
jsbell 2013/12/18 23:04:40 Does "WithBlobList" add anything meaningful to the
ericu 2013/12/19 05:19:11 Not any more. Removed. Below it's as opposed to
+ const IndexedDBBackingStore::Transaction::BlobJournalType& journal) {
+ std::string data;
+ if (journal.size()) {
jsbell 2013/12/18 23:04:40 Nit: Probably not worth the clutter to have this t
ericu 2013/12/19 05:19:11 Done.
+ IndexedDBBackingStore::Transaction::BlobJournalType::const_iterator iter;
+ for (iter = journal.begin(); iter != journal.end(); ++iter) {
+ EncodeVarInt(iter->first, &data);
+ EncodeVarInt(iter->second, &data);
+ }
+ }
+ return data;
+}
+
+static void ClearBlobJournal(LevelDBTransaction* leveldb_transaction,
+ const std::string& level_db_key) {
+ leveldb_transaction->Remove(level_db_key);
+}
+
+static void UpdatePrimaryJournalWithBlobList(
+ LevelDBTransaction* leveldb_transaction,
+ const IndexedDBBackingStore::Transaction::BlobJournalType& journal) {
+ const std::string leveldbKey = BlobJournalKey::Encode();
jsbell 2013/12/18 23:04:40 Nit: Don't use camelCase for locals/members.
ericu 2013/12/19 05:19:11 Done. This, like many of your other finds, is lef
jsbell 2013/12/20 00:59:28 I hear you. As you can see from my other patches,
+ std::string data = EncodeBlobJournalWithBlobList(journal);
+ leveldb_transaction->Put(leveldbKey, &data);
+}
+
+static void UpdateLiveBlobJournalWithBlobList(
+ LevelDBTransaction* leveldb_transaction,
+ const IndexedDBBackingStore::Transaction::BlobJournalType& journal) {
+ const std::string leveldbKey = LiveBlobJournalKey::Encode();
jsbell 2013/12/18 23:04:40 Ditto.
ericu 2013/12/19 05:19:11 Done.
+ std::string data = EncodeBlobJournalWithBlobList(journal);
+ leveldb_transaction->Put(leveldbKey, &data);
+}
+
+static bool MergeBlobsIntoLiveBlobJournal(
+ LevelDBTransaction* leveldb_transaction,
+ const IndexedDBBackingStore::Transaction::BlobJournalType& journal) {
+ IndexedDBBackingStore::Transaction::BlobJournalType old_journal;
+ std::string key = LiveBlobJournalKey::Encode();
+ if (!GetBlobJournal(key, leveldb_transaction, old_journal))
+ return false;
+
+ old_journal.insert(old_journal.end(), journal.begin(), journal.end());
+
+ UpdateLiveBlobJournalWithBlobList(leveldb_transaction, old_journal);
+ return true;
+}
+
+static void UpdateBlobJournalWithDatabase(
+ LevelDBUncachedTransaction* leveldb_transaction, int64 database_id) {
+ IndexedDBBackingStore::Transaction::BlobJournalType journal;
+ journal.push_back(
+ std::make_pair(database_id, DatabaseMetaDataKey::kAllBlobsKey));
+ const std::string key = BlobJournalKey::Encode();
+ std::string data = EncodeBlobJournalWithBlobList(journal);
+ leveldb_transaction->Put(key, &data);
+}
+
+static bool MergeDatabaseIntoLiveBlobJournal(
+ LevelDBUncachedTransaction* leveldb_transaction, int64 database_id) {
+ IndexedDBBackingStore::Transaction::BlobJournalType journal;
+ std::string key = LiveBlobJournalKey::Encode();
+ if (!GetBlobJournal(key, leveldb_transaction, journal))
+ return false;
+ journal.push_back(
+ std::make_pair(database_id, DatabaseMetaDataKey::kAllBlobsKey));
+ std::string data = EncodeBlobJournalWithBlobList(journal);
+ leveldb_transaction->Put(key, &data);
+ return true;
+}
+
+// Blob Data is encoded as { is_file [bool], key [int64 as varInt],
jsbell 2013/12/18 23:04:40 Huh... I'm surprised that we need to distinguish f
ericu 2013/12/19 05:19:11 It's partly for spec correctness in a way I haven'
jsbell 2013/12/20 00:59:28 Thanks I hadn't considered that things other than
+// type [string-with-length, may be empty], then [for Blobs] size
+// [int64 as varInt] or [for Files] fileName [string-with-length] }
+static std::string EncodeBlobData(
+ const std::vector<IndexedDBBlobInfo*>& blob_info) {
+ std::string ret;
+ std::vector<IndexedDBBlobInfo*>::const_iterator iter;
+ for (iter = blob_info.begin(); iter != blob_info.end(); ++iter) {
+ const IndexedDBBlobInfo& info = **iter;
+ EncodeBool(info.is_file(), &ret);
+ EncodeVarInt(info.key(), &ret);
+ EncodeStringWithLength(info.type(), &ret);
+ if (info.is_file())
+ EncodeStringWithLength(info.file_name(), &ret);
+ else
+ EncodeVarInt(info.size(), &ret);
+ }
+ return ret;
+}
+
+static bool DecodeBlobData(
+ const std::string& data,
+ std::vector<IndexedDBBlobInfo>* output) {
+ std::vector<IndexedDBBlobInfo> ret;
+ output->clear();
+ StringPiece slice(data);
+ while (!slice.empty()) {
+ bool is_file;
+ int64 key;
+ string16 type;
+ int64 size;
+ string16 file_name;
+
+ if (!DecodeBool(&slice, &is_file))
+ return false;
+ if (!DecodeVarInt(&slice, &key) ||
+ !DatabaseMetaDataKey::IsValidBlobKey(key))
+ return false;
+ if (!DecodeStringWithLength(&slice, &type))
+ return false;
+ if (is_file) {
+ if (!DecodeStringWithLength(&slice, &file_name))
+ return false;
+ ret.push_back(IndexedDBBlobInfo(key, type, file_name));
+ } else {
+ if (!DecodeVarInt(&slice, &size) || size < 0)
+ return false;
+ ret.push_back(IndexedDBBlobInfo(type, static_cast<uint64>(size), key));
+ }
+ }
+ output->swap(ret);
+
+ return true;
+}
+
IndexedDBBackingStore::IndexedDBBackingStore(
+ IndexedDBFactory* indexed_db_factory,
const GURL& origin_url,
+ const FilePath& blob_path,
+ net::URLRequestContext* request_context,
scoped_ptr<LevelDBDatabase> db,
- scoped_ptr<LevelDBComparator> comparator)
- : origin_url_(origin_url),
+ scoped_ptr<LevelDBComparator> comparator,
+ base::TaskRunner* task_runner)
+ : indexed_db_factory_(indexed_db_factory),
+ origin_url_(origin_url),
origin_identifier_(ComputeOriginIdentifier(origin_url)),
+ blob_path_(blob_path),
+ request_context_(request_context),
+ task_runner_(task_runner),
db_(db.Pass()),
- comparator_(comparator.Pass()) {}
+ comparator_(comparator.Pass()),
+ active_blob_registry_(this) {}
IndexedDBBackingStore::~IndexedDBBackingStore() {
+ if (!blob_path_.empty() && !child_process_ids_granted_.empty()) {
+ ChildProcessSecurityPolicyImpl* policy =
+ ChildProcessSecurityPolicyImpl::GetInstance();
+ for (std::set<int>::iterator iter = child_process_ids_granted_.begin();
+ iter != child_process_ids_granted_.end(); ++iter) {
+ policy->RevokeAllPermissionsForFile(*iter, blob_path_);
+ }
+ }
// db_'s destructor uses comparator_. The order of destruction is important.
db_.reset();
comparator_.reset();
@@ -422,19 +723,27 @@ enum IndexedDBBackingStoreOpenResult {
// static
scoped_refptr<IndexedDBBackingStore> IndexedDBBackingStore::Open(
+ IndexedDBFactory* indexed_db_factory,
const GURL& origin_url,
const base::FilePath& path_base,
+ net::URLRequestContext* request_context,
blink::WebIDBDataLoss* data_loss,
std::string* data_loss_message,
- bool* disk_full) {
+ bool* disk_full,
+ base::TaskRunner* task_runner,
+ bool clean_journal) {
*data_loss = blink::WebIDBDataLossNone;
DefaultLevelDBFactory leveldb_factory;
- return IndexedDBBackingStore::Open(origin_url,
+ return IndexedDBBackingStore::Open(indexed_db_factory,
+ origin_url,
path_base,
+ request_context,
data_loss,
data_loss_message,
disk_full,
- &leveldb_factory);
+ &leveldb_factory,
+ task_runner,
+ clean_journal);
}
static std::string OriginToCustomHistogramSuffix(const GURL& origin_url) {
@@ -495,12 +804,16 @@ static bool IsPathTooLong(const base::FilePath& leveldb_dir) {
// static
scoped_refptr<IndexedDBBackingStore> IndexedDBBackingStore::Open(
+ IndexedDBFactory* indexed_db_factory,
const GURL& origin_url,
const base::FilePath& path_base,
+ net::URLRequestContext* request_context,
blink::WebIDBDataLoss* data_loss,
std::string* data_loss_message,
bool* is_disk_full,
- LevelDBFactory* leveldb_factory) {
+ LevelDBFactory* leveldb_factory,
+ base::TaskRunner* task_runner,
+ bool clean_journal) {
IDB_TRACE("IndexedDBBackingStore::Open");
DCHECK(!path_base.empty());
*data_loss = blink::WebIDBDataLossNone;
@@ -523,6 +836,8 @@ scoped_refptr<IndexedDBBackingStore> IndexedDBBackingStore::Open(
const base::FilePath file_path =
path_base.Append(ComputeFileName(origin_url));
+ const base::FilePath blob_path =
+ path_base.Append(ComputeBlobPath(origin_url));
if (IsPathTooLong(file_path)) {
HistogramOpenStatus(INDEXED_DB_BACKING_STORE_OPEN_ORIGIN_TOO_LONG,
@@ -607,20 +922,29 @@ scoped_refptr<IndexedDBBackingStore> IndexedDBBackingStore::Open(
return scoped_refptr<IndexedDBBackingStore>();
}
- return Create(origin_url, db.Pass(), comparator.Pass());
+ scoped_refptr<IndexedDBBackingStore> backing_store = Create(
+ indexed_db_factory, origin_url, blob_path, request_context, db.Pass(),
+ comparator.Pass(), task_runner);
+
+ if (clean_journal && !backing_store->CleanUpBlobJournal(
+ LiveBlobJournalKey::Encode()))
+ return scoped_refptr<IndexedDBBackingStore>();
+ return backing_store;
}
// static
scoped_refptr<IndexedDBBackingStore> IndexedDBBackingStore::OpenInMemory(
- const GURL& origin_url) {
+ const GURL& origin_url, base::TaskRunner* task_runner) {
DefaultLevelDBFactory leveldb_factory;
- return IndexedDBBackingStore::OpenInMemory(origin_url, &leveldb_factory);
+ return IndexedDBBackingStore::OpenInMemory(origin_url, &leveldb_factory,
+ task_runner);
}
// static
scoped_refptr<IndexedDBBackingStore> IndexedDBBackingStore::OpenInMemory(
const GURL& origin_url,
- LevelDBFactory* leveldb_factory) {
+ LevelDBFactory* leveldb_factory,
+ base::TaskRunner* task_runner) {
IDB_TRACE("IndexedDBBackingStore::OpenInMemory");
scoped_ptr<LevelDBComparator> comparator(new Comparator());
@@ -634,25 +958,37 @@ scoped_refptr<IndexedDBBackingStore> IndexedDBBackingStore::OpenInMemory(
}
HistogramOpenStatus(INDEXED_DB_BACKING_STORE_OPEN_MEMORY_SUCCESS, origin_url);
- return Create(origin_url, db.Pass(), comparator.Pass());
+ return Create(NULL, origin_url, FilePath(), NULL, db.Pass(),
+ comparator.Pass(), task_runner);
}
// static
scoped_refptr<IndexedDBBackingStore> IndexedDBBackingStore::Create(
+ IndexedDBFactory* indexed_db_factory,
const GURL& origin_url,
+ const FilePath& blob_path,
+ net::URLRequestContext* request_context,
scoped_ptr<LevelDBDatabase> db,
- scoped_ptr<LevelDBComparator> comparator) {
+ scoped_ptr<LevelDBComparator> comparator,
+ base::TaskRunner* task_runner) {
// TODO(jsbell): Handle comparator name changes.
-
scoped_refptr<IndexedDBBackingStore> backing_store(
- new IndexedDBBackingStore(origin_url, db.Pass(), comparator.Pass()));
- if (!SetUpMetadata(backing_store->db_.get(),
- backing_store->origin_identifier_))
+ new IndexedDBBackingStore(indexed_db_factory, origin_url, blob_path,
+ request_context, db.Pass(), comparator.Pass(), task_runner));
+ if (!backing_store->SetUpMetadata())
return scoped_refptr<IndexedDBBackingStore>();
return backing_store;
}
+void IndexedDBBackingStore::GrantChildProcessPermissions(int child_process_id) {
+ if (!child_process_ids_granted_.count(child_process_id)) {
+ child_process_ids_granted_.insert(child_process_id);
+ ChildProcessSecurityPolicyImpl::GetInstance()->GrantReadFile(
+ child_process_id, blob_path_);
+ }
+}
+
std::vector<string16> IndexedDBBackingStore::GetDatabaseNames() {
std::vector<string16> found_names;
const std::string start_key =
@@ -730,6 +1066,28 @@ bool IndexedDBBackingStore::GetIDBDatabaseMetaData(
return false;
}
+ int64 blob_key_generator_current_number =
+ DatabaseMetaDataKey::kInvalidBlobKey;
+
+ ok = GetVarInt(db_.get(),
+ DatabaseMetaDataKey::Encode(
+ metadata->id,
+ DatabaseMetaDataKey::BLOB_KEY_GENERATOR_CURRENT_NUMBER),
+ &blob_key_generator_current_number,
+ found);
+ if (!ok) {
+ INTERNAL_READ_ERROR(GET_IDBDATABASE_METADATA);
+ return false;
+ }
+ if (!*found) {
+ // This database predates blob support.
+ *found = true;
+ } else if (!DatabaseMetaDataKey::IsValidBlobKey(
+ blob_key_generator_current_number)) {
+ INTERNAL_CONSISTENCY_ERROR(GET_IDBDATABASE_METADATA);
+ return false;
+ }
+
return true;
}
@@ -781,6 +1139,11 @@ bool IndexedDBBackingStore::CreateIDBDatabaseMetaData(const string16& name,
DatabaseMetaDataKey::Encode(*row_id,
DatabaseMetaDataKey::USER_INT_VERSION),
int_version);
+ PutVarInt(
+ transaction.get(),
+ DatabaseMetaDataKey::Encode(*row_id,
+ DatabaseMetaDataKey::BLOB_KEY_GENERATOR_CURRENT_NUMBER),
+ DatabaseMetaDataKey::kBlobKeyGeneratorInitialNumber);
if (!transaction->Commit()) {
INTERNAL_WRITE_ERROR(CREATE_IDBDATABASE_METADATA);
return false;
@@ -802,19 +1165,100 @@ bool IndexedDBBackingStore::UpdateIDBDatabaseIntVersion(
return true;
}
-static void DeleteRange(LevelDBTransaction* transaction,
- const std::string& begin,
- const std::string& end) {
+// Note that if you're deleting a range that contains user keys that have blob
+// info, this won't clean up the blobs.
+static void DeleteRangeByKeys(LevelDBTransaction* transaction,
+ const std::string& begin,
+ const std::string& end) {
scoped_ptr<LevelDBIterator> it = transaction->CreateIterator();
for (it->Seek(begin); it->IsValid() && CompareKeys(it->Key(), end) < 0;
it->Next())
transaction->Remove(it->Key());
}
+// For a whole-object-store deletion, we still use the one-blob-record-at-a-time
+// deletion mechanism designed for normal transactions. We could go with the
+// nuke-the-whole-directory method used for deleteDatabase if we structured the
+// directories accordingly, but that would complicate the kind of info we store
+// in the LevelDBTransaction and lengthen paths.
+static void DeleteBlobsInObjectStore(
+ IndexedDBBackingStore::Transaction* transaction,
+ int64 database_id, int64 object_store_id) {
+ std::string start_key, end_key;
+ start_key =
+ BlobEntryKey::EncodeMinForObjectStore(database_id, object_store_id);
+ end_key =
+ BlobEntryKey::EncodeMaxForObjectStore(database_id, object_store_id);
+
+ scoped_ptr<LevelDBIterator> it =
+ transaction->transaction()->CreateIterator();
+ for (it->Seek(start_key);
+ it->IsValid() && CompareKeys(it->Key(), end_key) < 0; it->Next()) {
+ StringPiece key_piece(it->Key());
+ std::string user_key =
+ BlobEntryKey::ReencodeToObjectStoreDataKey(&key_piece);
+ if (user_key.size())
+ transaction->PutBlobInfo(database_id, object_store_id, user_key, NULL,
+ NULL);
+ else
+ INTERNAL_CONSISTENCY_ERROR(GET_IDBDATABASE_METADATA);
jsbell 2013/12/18 23:04:40 Intentionally keep going in case of error here? M
ericu 2013/12/19 05:19:11 Actually, we should stop on error. Fixed.
+ }
+}
+
+static bool GetBlobInfoForRecord(
+ IndexedDBBackingStore* backing_store,
+ LevelDBTransaction* leveldb_transaction,
+ int64 database_id,
+ const std::string& leveldb_key,
+ IndexedDBValue* value) {
+
+ BlobEntryKey blob_entry_key;
+ StringPiece leveldb_key_piece(leveldb_key);
+ if (!BlobEntryKey::FromObjectStoreDataKey(
+ &leveldb_key_piece, &blob_entry_key)) {
+ NOTREACHED();
+ return false;
+ }
+ scoped_ptr<LevelDBIterator> it = leveldb_transaction->CreateIterator();
+ std::string encoded_key = blob_entry_key.Encode();
+ it->Seek(encoded_key);
+ if (it->IsValid() && CompareKeys(it->Key(), encoded_key) == 0) {
+ if (!DecodeBlobData(it->Value().as_string(), &value->blob_info)) {
+ INTERNAL_READ_ERROR(GET_BLOB_INFO_FOR_RECORD);
+ return false;
+ }
+ std::vector<IndexedDBBlobInfo>::iterator iter;
+ for (iter = value->blob_info.begin(); iter != value->blob_info.end();
+ ++iter) {
+ iter->set_file_path(
+ backing_store->GetIDBBlobFileName(database_id, iter->key()));
+ iter->set_mark_used_callback(
+ backing_store->active_blob_registry()->GetAddBlobRefCallback(
+ database_id, iter->key()));
+ iter->set_release_callback(
+ backing_store->active_blob_registry()->GetFinalReleaseCallback(
+ database_id, iter->key()));
+ if (iter->is_file()) {
+ base::PlatformFileInfo info;
+ if (file_util::GetFileInfo(iter->file_path(), &info)) {
+ // This should always work, but it isn't fatal if it doesn't; it just
+ // means a potential slow synchronous call from the renderer later.
+ iter->set_last_modified(info.last_modified);
+ iter->set_size(info.size);
+ }
+ }
+ }
+ }
+ return true;
+}
+
bool IndexedDBBackingStore::DeleteDatabase(const string16& name) {
IDB_TRACE("IndexedDBBackingStore::DeleteDatabase");
- scoped_ptr<LevelDBWriteOnlyTransaction> transaction =
- LevelDBWriteOnlyTransaction::Create(db_.get());
+ scoped_ptr<LevelDBUncachedTransaction> transaction =
+ LevelDBUncachedTransaction::Create(db_.get());
+
+ if (!CleanUpBlobJournal(BlobJournalKey::Encode()))
+ return false;
IndexedDBDatabaseMetadata metadata;
bool success = false;
@@ -837,10 +1281,24 @@ bool IndexedDBBackingStore::DeleteDatabase(const string16& name) {
const std::string key = DatabaseNameKey::Encode(origin_identifier_, name);
transaction->Remove(key);
+ bool need_cleanup = false;
+ if (active_blob_registry()->MarkDeletedCheckIfUsed(
+ metadata.id, DatabaseMetaDataKey::kAllBlobsKey)) {
+ if (!MergeDatabaseIntoLiveBlobJournal(transaction.get(), metadata.id))
+ return false;
+ } else {
+ UpdateBlobJournalWithDatabase(transaction.get(), metadata.id);
+ need_cleanup = true;
+ }
+
if (!transaction->Commit()) {
INTERNAL_WRITE_ERROR(DELETE_DATABASE);
return false;
}
+
+ if (need_cleanup)
+ CleanUpBlobJournal(BlobJournalKey::Encode());
+
return true;
}
@@ -933,7 +1391,7 @@ bool IndexedDBBackingStore::GetObjectStores(
INTERNAL_CONSISTENCY_ERROR(GET_OBJECT_STORES);
}
- it->Next(); // Is evicatble.
+ it->Next(); // Is evictable.
if (!CheckObjectStoreAndMetaDataType(it.get(),
stop_key,
object_store_id,
@@ -1120,7 +1578,8 @@ bool IndexedDBBackingStore::DeleteObjectStore(
return false;
}
- DeleteRange(
+ DeleteBlobsInObjectStore(transaction, database_id, object_store_id);
+ DeleteRangeByKeys(
leveldb_transaction,
ObjectStoreMetaDataKey::Encode(database_id, object_store_id, 0),
ObjectStoreMetaDataKey::EncodeMaxKey(database_id, object_store_id));
@@ -1128,12 +1587,14 @@ bool IndexedDBBackingStore::DeleteObjectStore(
leveldb_transaction->Remove(
ObjectStoreNamesKey::Encode(database_id, object_store_name));
- DeleteRange(leveldb_transaction,
- IndexFreeListKey::Encode(database_id, object_store_id, 0),
- IndexFreeListKey::EncodeMaxKey(database_id, object_store_id));
- DeleteRange(leveldb_transaction,
- IndexMetaDataKey::Encode(database_id, object_store_id, 0, 0),
- IndexMetaDataKey::EncodeMaxKey(database_id, object_store_id));
+ DeleteRangeByKeys(
+ leveldb_transaction,
+ IndexFreeListKey::Encode(database_id, object_store_id, 0),
+ IndexFreeListKey::EncodeMaxKey(database_id, object_store_id));
+ DeleteRangeByKeys(
+ leveldb_transaction,
+ IndexMetaDataKey::Encode(database_id, object_store_id, 0, 0),
+ IndexMetaDataKey::EncodeMaxKey(database_id, object_store_id));
return ClearObjectStore(transaction, database_id, object_store_id);
}
@@ -1143,7 +1604,7 @@ bool IndexedDBBackingStore::GetRecord(
int64 database_id,
int64 object_store_id,
const IndexedDBKey& key,
- std::string* record) {
+ IndexedDBValue* record) {
IDB_TRACE("IndexedDBBackingStore::GetRecord");
if (!KeyPrefix::ValidIds(database_id, object_store_id))
return false;
@@ -1175,8 +1636,9 @@ bool IndexedDBBackingStore::GetRecord(
return false;
}
- *record = slice.as_string();
- return true;
+ record->bits = slice.as_string();
+ return GetBlobInfoForRecord(
+ this, leveldb_transaction, database_id, leveldb_key, record);
}
WARN_UNUSED_RESULT static bool GetNewVersionNumber(
@@ -1215,7 +1677,8 @@ bool IndexedDBBackingStore::PutRecord(
int64 database_id,
int64 object_store_id,
const IndexedDBKey& key,
- const std::string& value,
+ IndexedDBValue& value,
+ ScopedVector<webkit_blob::BlobDataHandle>* handles,
RecordIdentifier* record_identifier) {
IDB_TRACE("IndexedDBBackingStore::PutRecord");
if (!KeyPrefix::ValidIds(database_id, object_store_id))
@@ -1229,14 +1692,17 @@ bool IndexedDBBackingStore::PutRecord(
if (!ok)
return false;
DCHECK_GE(version, 0);
- const std::string object_storedata_key =
+ const std::string object_store_data_key =
ObjectStoreDataKey::Encode(database_id, object_store_id, key);
std::string v;
EncodeVarInt(version, &v);
- v.append(value);
+ v.append(value.bits);
- leveldb_transaction->Put(object_storedata_key, &v);
+ leveldb_transaction->Put(object_store_data_key, &v);
+ transaction->PutBlobInfo(database_id, object_store_id, object_store_data_key,
+ &value.blob_info, handles);
+ DCHECK(!handles->size());
const std::string exists_entry_key =
ExistsEntryKey::Encode(database_id, object_store_id, key);
@@ -1262,7 +1728,8 @@ bool IndexedDBBackingStore::ClearObjectStore(
const std::string stop_key =
KeyPrefix(database_id, object_store_id + 1).Encode();
- DeleteRange(transaction->transaction(), start_key, stop_key);
+ DeleteRangeByKeys(transaction->transaction(), start_key, stop_key);
+ DeleteBlobsInObjectStore(transaction, database_id, object_store_id);
return true;
}
@@ -1279,6 +1746,8 @@ bool IndexedDBBackingStore::DeleteRecord(
const std::string object_store_data_key = ObjectStoreDataKey::Encode(
database_id, object_store_id, record_identifier.primary_key());
leveldb_transaction->Remove(object_store_data_key);
+ transaction->PutBlobInfo(database_id, object_store_id, object_store_data_key,
+ NULL, NULL);
const std::string exists_entry_key = ExistsEntryKey::Encode(
database_id, object_store_id, record_identifier.primary_key());
@@ -1286,6 +1755,31 @@ bool IndexedDBBackingStore::DeleteRecord(
return true;
}
+bool IndexedDBBackingStore::DeleteRange(
+ IndexedDBBackingStore::Transaction* transaction,
+ int64 database_id,
+ int64 object_store_id,
+ const IndexedDBKeyRange& key_range) {
+ scoped_ptr<IndexedDBBackingStore::Cursor> backing_store_cursor =
+ OpenObjectStoreCursor(transaction, database_id, object_store_id,
+ key_range, indexed_db::CURSOR_NEXT);
+ // TODO(ericu): This does a PutBlobInfo for every record, even if it doesn't
+ // have any blobs associated with it. We could skip that, and just scan
+ // through the blob table to see which ones we need to remove. That might
+ // take a little more time here, but would also potentially allocate a lot
+ // fewer BlobChangeRecords, shrink the eventual WriteBatch, and do a lot fewer
+ // seeks in CollectBlobFilesToRemove.
+ if (backing_store_cursor) {
+ do {
+ if (!DeleteRecord(
+ transaction, database_id, object_store_id,
+ backing_store_cursor->record_identifier()))
+ return false;
+ } while (backing_store_cursor->Continue());
+ }
+ return true;
+}
+
bool IndexedDBBackingStore::GetKeyGeneratorCurrentNumber(
IndexedDBBackingStore::Transaction* transaction,
int64 database_id,
@@ -1421,6 +1915,270 @@ bool IndexedDBBackingStore::KeyExistsInObjectStore(
return true;
}
+class IndexedDBBackingStore::Transaction::ChainedBlobWriterImpl : public
+ IndexedDBBackingStore::Transaction::ChainedBlobWriter {
+public:
+ typedef IndexedDBBackingStore::Transaction::WriteDescriptorVec
+ WriteDescriptorVec;
+ ChainedBlobWriterImpl(
+ int64 database_id,
+ IndexedDBBackingStore* backingStore,
+ WriteDescriptorVec& blobs,
+ scoped_refptr<IndexedDBBackingStore::BlobWriteCallback> callback)
+ : waiting_for_callback_(false),
+ database_id_(database_id),
+ backing_store_(backingStore),
+ callback_(callback),
+ aborted_(false) {
+ blobs_.swap(blobs);
+ iter_ = blobs_.begin();
+ WriteNextFile();
+ }
+
+ void set_delegate(scoped_ptr<FileWriterDelegate> delegate) {
+ delegate_.reset(delegate.release());
+ }
+
+ void ReportWriteCompletion(bool succeeded, int64 bytes_written) {
+ // TODO(ericu): Check bytes_written against the blob's snapshot value.
+ DCHECK(waiting_for_callback_);
+ DCHECK(!succeeded || bytes_written >= 0);
+ waiting_for_callback_ = false;
+ content::BrowserThread::DeleteSoon(
+ content::BrowserThread::IO, FROM_HERE,
+ delegate_.release());
+ if (aborted_) {
+ self_ref_ = NULL;
+ return;
+ }
+ if (succeeded)
+ WriteNextFile();
+ else
+ callback_->didFail();
+ }
+
+ void Abort() {
+ if (!waiting_for_callback_)
+ return;
+ self_ref_ = this;
+ aborted_ = true;
+ }
+
+private:
+ void WriteNextFile() {
+ DCHECK(!waiting_for_callback_);
+ DCHECK(!aborted_);
+ if (iter_ == blobs_.end()) {
+ DCHECK(!self_ref_);
+ callback_->didSucceed();
+ return;
+ } else {
+ if (!backing_store_->WriteBlobFile(database_id_, *iter_, this)) {
+ callback_->didFail();
+ return;
+ }
+ waiting_for_callback_ = true;
+ ++iter_;
+ }
+ }
+
+ bool waiting_for_callback_;
+ scoped_refptr<ChainedBlobWriterImpl> self_ref_;
+ WriteDescriptorVec blobs_;
+ WriteDescriptorVec::const_iterator iter_;
+ int64 database_id_;
+ IndexedDBBackingStore* backing_store_;
+ scoped_refptr<IndexedDBBackingStore::BlobWriteCallback> callback_;
+ scoped_ptr<FileWriterDelegate> delegate_;
+ bool aborted_;
+};
+
+class LocalWriteClosure : public FileWriterDelegate::DelegateWriteCallback,
+ public base::RefCounted<LocalWriteClosure> {
+ public:
+ LocalWriteClosure(
+ IndexedDBBackingStore::Transaction::ChainedBlobWriter*
+ chained_blob_writer_,
+ base::TaskRunner* task_runner)
+ : chained_blob_writer_(chained_blob_writer_),
+ task_runner_(task_runner),
+ bytes_written_(-1) {
+ }
+
+ void Run(
+ base::PlatformFileError rv,
+ int64 bytes,
+ FileWriterDelegate::WriteProgressStatus write_status) {
+ if (write_status == FileWriterDelegate::SUCCESS_IO_PENDING)
+ return; // We don't care about progress events.
+ if (rv == base::PLATFORM_FILE_OK) {
+ DCHECK(bytes >= 0);
+ DCHECK(write_status == FileWriterDelegate::SUCCESS_COMPLETED);
+ bytes_written_ = bytes;
+ } else {
+ DCHECK(write_status == FileWriterDelegate::ERROR_WRITE_STARTED ||
+ write_status == FileWriterDelegate::ERROR_WRITE_NOT_STARTED);
+ }
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(
+ &LocalWriteClosure::callBlobCallbackOnIDBTaskRunner, this,
+ write_status == FileWriterDelegate::SUCCESS_COMPLETED));
+ }
+
+ void writeBlobToFileOnIOThread(
+ const FilePath& file_path, const GURL& blob_url,
+ net::URLRequestContext* request_context) {
+ DCHECK(content::BrowserThread::CurrentlyOn(content::BrowserThread::IO));
+ scoped_ptr<fileapi::FileStreamWriter> writer(
+ fileapi::FileStreamWriter::CreateForLocalFile(
+ task_runner_, file_path, 0, false));
+ scoped_ptr<FileWriterDelegate> delegate(
+ new FileWriterDelegate(writer.Pass()));
+
+ DCHECK(blob_url.is_valid());
+ scoped_ptr<net::URLRequest> blob_request(request_context->CreateRequest(
+ blob_url, net::DEFAULT_PRIORITY, delegate.get()));
+
+ delegate->Start(blob_request.Pass(),
+ base::Bind(&LocalWriteClosure::Run, this));
+ chained_blob_writer_->set_delegate(delegate.Pass());
+ }
+
+ private:
+ void callBlobCallbackOnIDBTaskRunner(bool succeeded) {
+ DCHECK(task_runner_->RunsTasksOnCurrentThread());
+ chained_blob_writer_->ReportWriteCompletion(succeeded, bytes_written_);
+ }
+
+ IndexedDBBackingStore::Transaction::ChainedBlobWriter* chained_blob_writer_;
+ base::TaskRunner* task_runner_;
+ int64 bytes_written_;
+};
+
+bool IndexedDBBackingStore::WriteBlobFile(
+ int64 database_id,
+ const Transaction::WriteDescriptor& descriptor,
+ Transaction::ChainedBlobWriter* chained_blob_writer) {
+
+ if (!MakeIDBBlobDirectory(blob_path_, database_id, descriptor.key()))
+ return false;
+
+ FilePath path = GetIDBBlobFileName(database_id, descriptor.key());
+
+ if (descriptor.is_file()) {
+ DCHECK(!descriptor.file_path().empty());
+ if (!base::CopyFile(descriptor.file_path(), path))
+ return false;
+
+ base::PlatformFileInfo info;
+ if (file_util::GetFileInfo(descriptor.file_path(), &info)) {
+ // TODO(ericu): Validate the snapshot date here. Expand WriteDescriptor
+ // to include snapshot date and file size, and check both.
+ if (!file_util::TouchFile(path, info.last_accessed, info.last_modified))
+ ; // TODO(ericu): Complain quietly; timestamp's probably not vital.
+ } else {
+ ; // TODO(ericu): Complain quietly; timestamp's probably not vital.
+ }
+
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(
+ &Transaction::ChainedBlobWriter::ReportWriteCompletion,
+ chained_blob_writer, true, info.size));
+ } else {
+ DCHECK(descriptor.url().is_valid());
+ scoped_refptr<LocalWriteClosure> write_closure(
+ new LocalWriteClosure(chained_blob_writer, task_runner_));
+ content::BrowserThread::PostTask(
+ content::BrowserThread::IO, FROM_HERE,
+ base::Bind(
+ &LocalWriteClosure::writeBlobToFileOnIOThread, write_closure.get(),
+ path, descriptor.url(), request_context_));
+ }
+ return true;
+}
+
+void IndexedDBBackingStore::ReportBlobUnused(
+ int64 database_id,
+ int64 blob_key) {
+ DCHECK(KeyPrefix::IsValidDatabaseId(database_id));
+ bool all_blobs = blob_key == DatabaseMetaDataKey::kAllBlobsKey;
+ DCHECK(all_blobs || DatabaseMetaDataKey::IsValidBlobKey(blob_key));
+ scoped_refptr<LevelDBTransaction> transaction =
+ new LevelDBTransaction(db_.get());
+
+ std::string live_blob_key = LiveBlobJournalKey::Encode();
+ IndexedDBBackingStore::Transaction::BlobJournalType live_blob_journal;
+ if (!GetBlobJournal(live_blob_key, transaction.get(), live_blob_journal))
+ return;
+ DCHECK(live_blob_journal.size());
+
+ std::string primary_key = BlobJournalKey::Encode();
+ IndexedDBBackingStore::Transaction::BlobJournalType primary_journal;
+ if (!GetBlobJournal(primary_key, transaction.get(), primary_journal))
+ return;
+
+ IndexedDBBackingStore::Transaction::BlobJournalType::iterator journal_iter;
+ // There are several cases to handle. If blob_key is kAllBlobsKey, we want to
+ // remove all entries with database_id from the live_blob journal and add only
+ // kAllBlobsKey to the primary journal. Otherwise if IsValidBlobKey(blob_key)
+ // and we hit kAllBlobsKey for the right database_id in the journal, we leave
+ // the kAllBlobsKey entry in the live_blob journal but add the specific blob
+ // to the primary. Otherwise if IsValidBlobKey(blob_key) and we find a
+ // matching (database_id, blob_key) tuple, we should move it to the primary
+ // journal.
+ IndexedDBBackingStore::Transaction::BlobJournalType new_live_blob_journal;
+ for (journal_iter = live_blob_journal.begin();
+ journal_iter != live_blob_journal.end(); ++journal_iter) {
+ int64 current_database_id = journal_iter->first;
+ int64 current_blob_key = journal_iter->second;
+ bool current_all_blobs =
+ current_blob_key == DatabaseMetaDataKey::kAllBlobsKey;
+ DCHECK(KeyPrefix::IsValidDatabaseId(current_database_id) ||
+ current_all_blobs);
+ if (current_database_id == database_id && (all_blobs ||
+ current_all_blobs || blob_key == current_blob_key)) {
+ if (!all_blobs) {
+ primary_journal.push_back(
+ std::make_pair(database_id, current_blob_key));
+ if (current_all_blobs)
+ new_live_blob_journal.push_back(*journal_iter);
+ new_live_blob_journal.insert(new_live_blob_journal.end(),
+ ++journal_iter, live_blob_journal.end()); // All the rest.
+ break;
+ }
+ } else {
+ new_live_blob_journal.push_back(*journal_iter);
+ }
+ }
+ if (all_blobs) {
+ primary_journal.push_back(std::make_pair(
+ database_id, DatabaseMetaDataKey::kAllBlobsKey));
+ }
+ UpdatePrimaryJournalWithBlobList(transaction.get(), primary_journal);
+ UpdateLiveBlobJournalWithBlobList(transaction.get(), new_live_blob_journal);
+ transaction->Commit();
+ // We could just do the deletions/cleaning here, but if there are a lot of
+ // blobs about to be garbage collected, it'd be better to wait and do them all
+ // at once.
+ StartJournalCleaningTimer();
+}
+
+void IndexedDBBackingStore::StartJournalCleaningTimer() {
+ journal_cleaning_timer_.Start(
+ FROM_HERE, base::TimeDelta::FromSeconds(5), this,
jsbell 2013/12/18 23:04:40 Need to rationalize this time with the "lazy close
ericu 2013/12/19 05:19:11 Yeah, this is kind of arbitrary here. Also, if th
+ &IndexedDBBackingStore::CleanPrimaryJournalIgnoreReturn);
+}
+
+// This assumes a file path of dbId/3rd-byte-of-counter/counter.
+FilePath IndexedDBBackingStore::GetIDBBlobFileName(
+ int64 database_id, int64 key) {
+ FilePath path = GetIDBBlobDirectoryNameForKey(blob_path_, database_id, key);
+ path = path.AppendASCII(base::StringPrintf("%lx", key));
+ return path;
+}
+
static bool CheckIndexAndMetaDataKey(const LevelDBIterator* it,
const std::string& stop_key,
int64 index_id,
@@ -1523,6 +2281,51 @@ bool IndexedDBBackingStore::GetIndexes(
return true;
}
+bool IndexedDBBackingStore::RemoveBlobFile(int64 database_id, int64 key) {
+ FilePath fileName = GetIDBBlobFileName(database_id, key);
+ return base::DeleteFile(fileName, false);
+}
+
+bool IndexedDBBackingStore::RemoveBlobDirectory(int64 database_id) {
+ FilePath dirName = GetIDBBlobDirectoryName(blob_path_, database_id);
+ return base::DeleteFile(dirName, true);
+}
+
+bool IndexedDBBackingStore::CleanUpBlobJournal(
+ const std::string& level_db_key) {
+ scoped_refptr<LevelDBTransaction> journal_transaction =
+ new LevelDBTransaction(db_.get());
+ IndexedDBBackingStore::Transaction::BlobJournalType journal;
+ if (!GetBlobJournal(level_db_key, journal_transaction.get(),
+ journal)) {
+ return false;
+ }
+ if (!journal.size()) {
+ return true;
+ }
+ if (journal.size()) {
jsbell 2013/12/18 23:04:40 Redundant with above check.
ericu 2013/12/19 05:19:11 Done.
+ IndexedDBBackingStore::Transaction::BlobJournalType::iterator journal_iter;
+ for (journal_iter = journal.begin(); journal_iter != journal.end();
+ ++journal_iter) {
+ int64 database_id = journal_iter->first;
+ int64 blob_key = journal_iter->second;
+ DCHECK(KeyPrefix::IsValidDatabaseId(database_id));
+ if (blob_key == DatabaseMetaDataKey::kAllBlobsKey) {
+ RemoveBlobDirectory(database_id);
+ } else {
+ DCHECK(DatabaseMetaDataKey::IsValidBlobKey(blob_key));
+ RemoveBlobFile(database_id, blob_key);
+ }
+ }
+ }
+ ClearBlobJournal(journal_transaction.get(), level_db_key);
+ return journal_transaction->Commit();
+}
+
+void IndexedDBBackingStore::CleanPrimaryJournalIgnoreReturn() {
+ CleanUpBlobJournal(BlobJournalKey::Encode());
+}
+
WARN_UNUSED_RESULT static bool SetMaxIndexId(LevelDBTransaction* transaction,
int64 database_id,
int64 object_store_id,
@@ -1595,13 +2398,14 @@ bool IndexedDBBackingStore::DeleteIndex(
IndexMetaDataKey::Encode(database_id, object_store_id, index_id, 0);
const std::string index_meta_data_end =
IndexMetaDataKey::EncodeMaxKey(database_id, object_store_id, index_id);
- DeleteRange(leveldb_transaction, index_meta_data_start, index_meta_data_end);
+ DeleteRangeByKeys(leveldb_transaction, index_meta_data_start,
+ index_meta_data_end);
const std::string index_data_start =
IndexDataKey::EncodeMinKey(database_id, object_store_id, index_id);
const std::string index_data_end =
IndexDataKey::EncodeMaxKey(database_id, object_store_id, index_id);
- DeleteRange(leveldb_transaction, index_data_start, index_data_end);
+ DeleteRangeByKeys(leveldb_transaction, index_data_start, index_data_end);
return true;
}
@@ -1818,7 +2622,9 @@ bool IndexedDBBackingStore::KeyExistsInIndex(
IndexedDBBackingStore::Cursor::Cursor(
const IndexedDBBackingStore::Cursor* other)
- : transaction_(other->transaction_),
+ : backing_store_(other->backing_store_),
+ transaction_(other->transaction_),
+ database_id_(other->database_id_),
cursor_options_(other->cursor_options_),
current_key_(new IndexedDBKey(*other->current_key_)) {
if (other->iterator_) {
@@ -1831,9 +2637,16 @@ IndexedDBBackingStore::Cursor::Cursor(
}
}
-IndexedDBBackingStore::Cursor::Cursor(LevelDBTransaction* transaction,
- const CursorOptions& cursor_options)
- : transaction_(transaction), cursor_options_(cursor_options) {}
+IndexedDBBackingStore::Cursor::Cursor(
+ scoped_refptr<IndexedDBBackingStore> backing_store,
jsbell 2013/12/18 23:04:40 This is an interesting new arc in the ownership di
ericu 2013/12/19 05:19:11 OK.
+ LevelDBTransaction* transaction,
+ int64 database_id,
+ const CursorOptions& cursor_options)
+ : backing_store_(backing_store),
+ transaction_(transaction),
+ database_id_(database_id),
+ cursor_options_(cursor_options) {
+}
IndexedDBBackingStore::Cursor::~Cursor() {}
bool IndexedDBBackingStore::Cursor::FirstSeek() {
@@ -2013,16 +2826,19 @@ IndexedDBBackingStore::Cursor::record_identifier() const {
class ObjectStoreKeyCursorImpl : public IndexedDBBackingStore::Cursor {
public:
ObjectStoreKeyCursorImpl(
+ scoped_refptr<IndexedDBBackingStore> backing_store,
LevelDBTransaction* transaction,
+ int64 database_id,
const IndexedDBBackingStore::Cursor::CursorOptions& cursor_options)
- : IndexedDBBackingStore::Cursor(transaction, cursor_options) {}
+ : IndexedDBBackingStore::Cursor(
+ backing_store, transaction, database_id, cursor_options) {}
virtual Cursor* Clone() OVERRIDE {
return new ObjectStoreKeyCursorImpl(this);
}
// IndexedDBBackingStore::Cursor
- virtual std::string* Value() OVERRIDE {
+ virtual IndexedDBValue* Value() OVERRIDE {
NOTREACHED();
return NULL;
}
@@ -2072,14 +2888,17 @@ bool ObjectStoreKeyCursorImpl::LoadCurrentRow() {
class ObjectStoreCursorImpl : public IndexedDBBackingStore::Cursor {
public:
ObjectStoreCursorImpl(
+ scoped_refptr<IndexedDBBackingStore> backing_store,
LevelDBTransaction* transaction,
+ int64 database_id,
const IndexedDBBackingStore::Cursor::CursorOptions& cursor_options)
- : IndexedDBBackingStore::Cursor(transaction, cursor_options) {}
+ : IndexedDBBackingStore::Cursor(
+ backing_store, transaction, database_id, cursor_options) {}
virtual Cursor* Clone() OVERRIDE { return new ObjectStoreCursorImpl(this); }
// IndexedDBBackingStore::Cursor
- virtual std::string* Value() OVERRIDE { return &current_value_; }
+ virtual IndexedDBValue* Value() OVERRIDE { return &current_value_; }
virtual bool LoadCurrentRow() OVERRIDE;
protected:
@@ -2098,13 +2917,13 @@ class ObjectStoreCursorImpl : public IndexedDBBackingStore::Cursor {
: IndexedDBBackingStore::Cursor(other),
current_value_(other->current_value_) {}
- std::string current_value_;
+ IndexedDBValue current_value_;
};
bool ObjectStoreCursorImpl::LoadCurrentRow() {
- StringPiece slice(iterator_->Key());
+ StringPiece key_slice(iterator_->Key());
ObjectStoreDataKey object_store_data_key;
- if (!ObjectStoreDataKey::Decode(&slice, &object_store_data_key)) {
+ if (!ObjectStoreDataKey::Decode(&key_slice, &object_store_data_key)) {
INTERNAL_READ_ERROR(LOAD_CURRENT_ROW);
return false;
}
@@ -2112,8 +2931,8 @@ bool ObjectStoreCursorImpl::LoadCurrentRow() {
current_key_ = object_store_data_key.user_key();
int64 version;
- slice = StringPiece(iterator_->Value());
- if (!DecodeVarInt(&slice, &version)) {
+ StringPiece value_slice = StringPiece(iterator_->Value());
+ if (!DecodeVarInt(&value_slice, &version)) {
INTERNAL_READ_ERROR(LOAD_CURRENT_ROW);
return false;
}
@@ -2123,29 +2942,36 @@ bool ObjectStoreCursorImpl::LoadCurrentRow() {
EncodeIDBKey(*current_key_, &encoded_key);
record_identifier_.Reset(encoded_key, version);
- current_value_ = slice.as_string();
+ if (!GetBlobInfoForRecord(backing_store_, transaction_, database_id_,
+ iterator_->Key().as_string(), &current_value_)) {
+ return false;
+ }
+ current_value_.bits = value_slice.as_string();
return true;
}
class IndexKeyCursorImpl : public IndexedDBBackingStore::Cursor {
public:
IndexKeyCursorImpl(
+ scoped_refptr<IndexedDBBackingStore> backing_store,
LevelDBTransaction* transaction,
+ int64 database_id,
const IndexedDBBackingStore::Cursor::CursorOptions& cursor_options)
- : IndexedDBBackingStore::Cursor(transaction, cursor_options) {}
+ : IndexedDBBackingStore::Cursor(
+ backing_store, transaction, database_id, cursor_options) {}
virtual Cursor* Clone() OVERRIDE { return new IndexKeyCursorImpl(this); }
// IndexedDBBackingStore::Cursor
- virtual std::string* Value() OVERRIDE {
+ virtual IndexedDBValue* Value() OVERRIDE {
NOTREACHED();
return NULL;
}
virtual const IndexedDBKey& primary_key() const OVERRIDE {
return *primary_key_;
}
- virtual const IndexedDBBackingStore::RecordIdentifier& RecordIdentifier()
- const {
+ virtual const IndexedDBBackingStore::RecordIdentifier& record_identifier()
+ const OVERRIDE {
NOTREACHED();
return record_identifier_;
}
@@ -2237,19 +3063,22 @@ bool IndexKeyCursorImpl::LoadCurrentRow() {
class IndexCursorImpl : public IndexedDBBackingStore::Cursor {
public:
IndexCursorImpl(
+ scoped_refptr<IndexedDBBackingStore> backing_store,
LevelDBTransaction* transaction,
+ int64 database_id,
const IndexedDBBackingStore::Cursor::CursorOptions& cursor_options)
- : IndexedDBBackingStore::Cursor(transaction, cursor_options) {}
+ : IndexedDBBackingStore::Cursor(
+ backing_store, transaction, database_id, cursor_options) {}
virtual Cursor* Clone() OVERRIDE { return new IndexCursorImpl(this); }
// IndexedDBBackingStore::Cursor
- virtual std::string* Value() OVERRIDE { return &current_value_; }
+ virtual IndexedDBValue* Value() OVERRIDE { return &current_value_; }
virtual const IndexedDBKey& primary_key() const OVERRIDE {
return *primary_key_;
}
- virtual const IndexedDBBackingStore::RecordIdentifier& RecordIdentifier()
- const {
+ virtual const IndexedDBBackingStore::RecordIdentifier& record_identifier()
+ const OVERRIDE {
NOTREACHED();
return record_identifier_;
}
@@ -2279,7 +3108,7 @@ class IndexCursorImpl : public IndexedDBBackingStore::Cursor {
primary_leveldb_key_(other->primary_leveldb_key_) {}
scoped_ptr<IndexedDBKey> primary_key_;
- std::string current_value_;
+ IndexedDBValue current_value_;
std::string primary_leveldb_key_;
};
@@ -2305,6 +3134,7 @@ bool IndexCursorImpl::LoadCurrentRow() {
return false;
}
+ DCHECK_EQ(index_data_key.DatabaseId(), database_id_);
primary_leveldb_key_ =
ObjectStoreDataKey::Encode(index_data_key.DatabaseId(),
index_data_key.ObjectStoreId(),
@@ -2338,8 +3168,9 @@ bool IndexCursorImpl::LoadCurrentRow() {
return false;
}
- current_value_ = slice.as_string();
- return true;
+ current_value_.bits = slice.as_string();
+ return GetBlobInfoForRecord(backing_store_, transaction_, database_id_,
+ primary_leveldb_key_, &current_value_);
}
bool ObjectStoreCursorOptions(
@@ -2496,7 +3327,8 @@ IndexedDBBackingStore::OpenObjectStoreCursor(
&cursor_options))
return scoped_ptr<IndexedDBBackingStore::Cursor>();
scoped_ptr<ObjectStoreCursorImpl> cursor(
- new ObjectStoreCursorImpl(leveldb_transaction, cursor_options));
+ new ObjectStoreCursorImpl(
+ this, leveldb_transaction, database_id, cursor_options));
if (!cursor->FirstSeek())
return scoped_ptr<IndexedDBBackingStore::Cursor>();
@@ -2521,7 +3353,8 @@ IndexedDBBackingStore::OpenObjectStoreKeyCursor(
&cursor_options))
return scoped_ptr<IndexedDBBackingStore::Cursor>();
scoped_ptr<ObjectStoreKeyCursorImpl> cursor(
- new ObjectStoreKeyCursorImpl(leveldb_transaction, cursor_options));
+ new ObjectStoreKeyCursorImpl(
+ this, leveldb_transaction, database_id, cursor_options));
if (!cursor->FirstSeek())
return scoped_ptr<IndexedDBBackingStore::Cursor>();
@@ -2548,7 +3381,8 @@ IndexedDBBackingStore::OpenIndexKeyCursor(
&cursor_options))
return scoped_ptr<IndexedDBBackingStore::Cursor>();
scoped_ptr<IndexKeyCursorImpl> cursor(
- new IndexKeyCursorImpl(leveldb_transaction, cursor_options));
+ new IndexKeyCursorImpl(
+ this, leveldb_transaction, database_id, cursor_options));
if (!cursor->FirstSeek())
return scoped_ptr<IndexedDBBackingStore::Cursor>();
@@ -2575,7 +3409,8 @@ IndexedDBBackingStore::OpenIndexCursor(
&cursor_options))
return scoped_ptr<IndexedDBBackingStore::Cursor>();
scoped_ptr<IndexCursorImpl> cursor(
- new IndexCursorImpl(leveldb_transaction, cursor_options));
+ new IndexCursorImpl(
+ this, leveldb_transaction, database_id, cursor_options));
if (!cursor->FirstSeek())
return scoped_ptr<IndexedDBBackingStore::Cursor>();
@@ -2584,9 +3419,16 @@ IndexedDBBackingStore::OpenIndexCursor(
IndexedDBBackingStore::Transaction::Transaction(
IndexedDBBackingStore* backing_store)
- : backing_store_(backing_store) {}
+ : backing_store_(backing_store),
+ database_id_(-1) {
+}
-IndexedDBBackingStore::Transaction::~Transaction() {}
+IndexedDBBackingStore::Transaction::~Transaction() {
+ BlobChangeMap::iterator iter = blob_change_map_.begin();
+ for (; iter != blob_change_map_.end(); ++iter) {
+ delete iter->second;
+ }
+}
void IndexedDBBackingStore::Transaction::Begin() {
IDB_TRACE("IndexedDBBackingStore::Transaction::Begin");
@@ -2594,21 +3436,302 @@ void IndexedDBBackingStore::Transaction::Begin() {
transaction_ = new LevelDBTransaction(backing_store_->db_.get());
}
-bool IndexedDBBackingStore::Transaction::Commit() {
- IDB_TRACE("IndexedDBBackingStore::Transaction::Commit");
- DCHECK(transaction_.get());
+static GURL getURLFromUUID(const string& uuid) {
+ return GURL("blob:uuid/" + uuid);
+}
+
+void IndexedDBBackingStore::Transaction::BlobChangeRecord::SetBlobInfo(
+ std::vector<IndexedDBBlobInfo>* blob_info) {
+ blob_info_.clear();
+ if (blob_info)
+ blob_info_.swap(*blob_info);
+}
+
+void IndexedDBBackingStore::Transaction::BlobChangeRecord::SetHandles(
+ ScopedVector<webkit_blob::BlobDataHandle>* handles) {
+ handles_.clear();
+ if (handles)
+ handles_.swap(*handles);
+}
+
+bool IndexedDBBackingStore::Transaction::HandleBlobPreTransaction(
+ BlobEntryKeyValuePairVec* new_blob_entries,
+ WriteDescriptorVec* new_files_to_write) {
+ BlobChangeMap::iterator iter = blob_change_map_.begin();
+ new_blob_entries->clear();
+ new_files_to_write->clear();
+ if (iter != blob_change_map_.end()) {
+ // Create LevelDBTransaction for the name generator seed and add-journal.
+ scoped_refptr<LevelDBTransaction> pre_transaction =
+ new LevelDBTransaction(backing_store_->db_.get());
+ BlobJournalType journal;
+ for (; iter != blob_change_map_.end(); ++iter) {
+ std::vector<IndexedDBBlobInfo>::iterator info_iter;
+ std::vector<IndexedDBBlobInfo*> new_blob_keys;
+ for (info_iter = iter->second->mutable_blob_info().begin();
+ info_iter != iter->second->mutable_blob_info().end(); ++info_iter) {
+ int64 next_blob_key = -1;
+ bool result = GetBlobKeyGeneratorCurrentNumber(
+ pre_transaction.get(), database_id_, next_blob_key);
+ if (!result || next_blob_key < 0)
+ return false;
+ BlobJournalEntryType journal_entry =
+ std::make_pair(database_id_, next_blob_key);
+ journal.push_back(journal_entry);
+ if (info_iter->is_file()) {
+ new_files_to_write->push_back(
+ WriteDescriptor(info_iter->file_path(), next_blob_key));
+ } else {
+ new_files_to_write->push_back(
+ WriteDescriptor(
+ getURLFromUUID(info_iter->uuid()), next_blob_key));
+ }
+ info_iter->set_key(next_blob_key);
+ new_blob_keys.push_back(&*info_iter);
+ result = UpdateBlobKeyGeneratorCurrentNumber(
+ pre_transaction.get(), database_id_, next_blob_key + 1);
+ if (!result)
+ return result;
+ }
+ BlobEntryKey blob_entry_key;
+ StringPiece key_piece(iter->second->key());
+ if (!BlobEntryKey::FromObjectStoreDataKey(&key_piece, &blob_entry_key)) {
+ NOTREACHED();
+ return false;
+ }
+ new_blob_entries->push_back(std::make_pair(blob_entry_key,
+ EncodeBlobData(new_blob_keys)));
+ }
+ UpdatePrimaryJournalWithBlobList(pre_transaction.get(), journal);
+ if (!pre_transaction->Commit())
+ return false;
+ }
+ return true;
+}
+
+bool IndexedDBBackingStore::Transaction::CollectBlobFilesToRemove() {
+ BlobChangeMap::iterator iter = blob_change_map_.begin();
+ // Look up all old files to remove as part of the transaction, store their
+ // names in blobs_to_remove_, and remove their old blob data entries.
+ if (iter != blob_change_map_.end()) {
+ scoped_ptr<LevelDBIterator> db_iter = transaction_->CreateIterator();
+ for (; iter != blob_change_map_.end(); ++iter) {
+ BlobEntryKey blob_entry_key;
+ StringPiece key_piece(iter->second->key());
+ if (!BlobEntryKey::FromObjectStoreDataKey(&key_piece, &blob_entry_key)) {
+ NOTREACHED();
+ INTERNAL_WRITE_ERROR(TRANSACTION_COMMIT_METHOD);
+ transaction_ = NULL;
+ return false;
+ }
+ if (database_id_ < 0)
+ database_id_ = blob_entry_key.database_id();
+ else
+ DCHECK_EQ(database_id_, blob_entry_key.database_id());
+ std::string blob_entry_key_bytes = blob_entry_key.Encode();
+ db_iter->Seek(blob_entry_key_bytes);
+ if (db_iter->IsValid() &&
+ !CompareKeys(db_iter->Key(), blob_entry_key_bytes)) {
+ std::vector<IndexedDBBlobInfo> blob_info;
+ if (!DecodeBlobData(db_iter->Value().as_string(), &blob_info)) {
+ INTERNAL_READ_ERROR(TRANSACTION_COMMIT_METHOD);
+ transaction_ = NULL;
+ return false;
+ }
+ std::vector<IndexedDBBlobInfo>::iterator blob_info_iter;
+ for (blob_info_iter = blob_info.begin();
+ blob_info_iter != blob_info.end(); ++blob_info_iter)
+ blobs_to_remove_.push_back(
+ std::make_pair(database_id_, blob_info_iter->key()));
+ transaction_->Remove(blob_entry_key_bytes);
+ }
+ }
+ }
+ return true;
+}
+
+class IndexedDBBackingStore::Transaction::BlobWriteCallbackWrapper :
+ public IndexedDBBackingStore::BlobWriteCallback {
+ public:
+ BlobWriteCallbackWrapper(
+ IndexedDBBackingStore::Transaction* transaction,
+ scoped_refptr<BlobWriteCallback> callback)
+ : transaction_(transaction),
+ callback_(callback) {
+ }
+ virtual void didSucceed() {
+ callback_->didSucceed();
+ transaction_->chained_blob_writer_ = NULL;
+ }
+ virtual void didFail() {
+ callback_->didFail();
+ transaction_->chained_blob_writer_ = NULL;
+ }
+ private:
+ IndexedDBBackingStore::Transaction* transaction_;
+ scoped_refptr<BlobWriteCallback> callback_;
+};
+
+void IndexedDBBackingStore::Transaction::WriteNewBlobs(
+ BlobEntryKeyValuePairVec& new_blob_entries,
+ WriteDescriptorVec& new_files_to_write,
+ scoped_refptr<BlobWriteCallback> callback) {
+ DCHECK_GT(new_files_to_write.size(), 0UL);
+ DCHECK_GT(database_id_, 0);
+ BlobEntryKeyValuePairVec::iterator blob_entry_iter;
+ for (blob_entry_iter = new_blob_entries.begin();
+ blob_entry_iter != new_blob_entries.end(); ++blob_entry_iter) {
+ // Add the new blob-table entry for each blob to the main transaction, or
+ // remove any entry that may exist if there's no new one.
+ if (!blob_entry_iter->second.size())
+ transaction_->Remove(blob_entry_iter->first.Encode());
+ else
+ transaction_->Put(blob_entry_iter->first.Encode(),
+ &blob_entry_iter->second);
+ }
+ // Creating the writer will start it going asynchronously.
+ chained_blob_writer_ = new ChainedBlobWriterImpl(database_id_, backing_store_,
+ new_files_to_write, new BlobWriteCallbackWrapper(this, callback));
+}
+
+bool IndexedDBBackingStore::Transaction::SortBlobsToRemove() {
+ IndexedDBActiveBlobRegistry* registry =
+ backing_store_->active_blob_registry();
+ BlobJournalType::iterator iter;
+ BlobJournalType primary_journal, live_blob_journal;
+ for (iter = blobs_to_remove_.begin(); iter != blobs_to_remove_.end();
+ ++iter) {
+ if (registry->MarkDeletedCheckIfUsed(iter->first, iter->second))
+ live_blob_journal.push_back(*iter);
+ else
+ primary_journal.push_back(*iter);
+ }
+ UpdatePrimaryJournalWithBlobList(transaction_.get(), primary_journal);
+ if (!MergeBlobsIntoLiveBlobJournal(transaction_.get(), live_blob_journal))
+ return false;
+ // To signal how many blobs need attention right now.
+ blobs_to_remove_.swap(primary_journal);
+ return true;
+}
+
+bool IndexedDBBackingStore::Transaction::CommitPhaseOne(
+ scoped_refptr<BlobWriteCallback> callback) {
+ IDB_TRACE("IndexedDBBackingStore::Transaction::commit");
+ DCHECK(transaction_);
+ DCHECK(backing_store_->task_runner()->RunsTasksOnCurrentThread());
+
+ if (!backing_store_->CleanUpBlobJournal(BlobJournalKey::Encode())) {
+ INTERNAL_WRITE_ERROR(TRANSACTION_COMMIT_METHOD);
+ transaction_ = NULL;
+ return false;
+ }
+
+ BlobEntryKeyValuePairVec new_blob_entries;
+ WriteDescriptorVec new_files_to_write;
+ // This commits the journal of blob files we're about to add, if any.
+ if (!HandleBlobPreTransaction(&new_blob_entries, &new_files_to_write)) {
+ INTERNAL_WRITE_ERROR(TRANSACTION_COMMIT_METHOD);
+ transaction_ = NULL;
+ return false;
+ }
+
+ DCHECK(!new_files_to_write.size() ||
+ KeyPrefix::IsValidDatabaseId(database_id_));
+ if (!CollectBlobFilesToRemove()) {
+ INTERNAL_WRITE_ERROR(TRANSACTION_COMMIT_METHOD);
+ transaction_ = NULL;
+ return false;
+ }
+
+ if (new_files_to_write.size()) {
+ // This kicks off the writes of the new blobs, if any.
+ // This call will zero out new_blob_entries and new_files_to_write.
+ WriteNewBlobs(new_blob_entries, new_files_to_write, callback);
+ // Remove the add journal, if any; once the blobs are written, and we
+ // commit, this will do the cleanup.
+ ClearBlobJournal(transaction_.get(), BlobJournalKey::Encode());
+ } else {
+ callback->didSucceed();
+ }
+
+ return true;
+}
+
+bool IndexedDBBackingStore::Transaction::CommitPhaseTwo() {
+ if (blobs_to_remove_.size())
+ if (!SortBlobsToRemove()) {
+ INTERNAL_WRITE_ERROR(TRANSACTION_COMMIT_METHOD);
+ transaction_ = NULL;
+ return false;
+ }
+
bool result = transaction_->Commit();
transaction_ = NULL;
+
if (!result)
INTERNAL_WRITE_ERROR(TRANSACTION_COMMIT_METHOD);
+ else if (blobs_to_remove_.size())
+ backing_store_->CleanUpBlobJournal(BlobJournalKey::Encode());
+
return result;
}
void IndexedDBBackingStore::Transaction::Rollback() {
IDB_TRACE("IndexedDBBackingStore::Transaction::Rollback");
DCHECK(transaction_.get());
+ if (chained_blob_writer_) {
+ chained_blob_writer_->Abort();
+ chained_blob_writer_ = NULL;
+ }
transaction_->Rollback();
transaction_ = NULL;
}
+// This is storing an info, even if empty, even if the previous key had no blob
+// info that we know of. It duplicates a bunch of information stored in the
+// leveldb transaction, but only w.r.t. the user keys altered--we don't keep the
+// changes to exists or index keys here.
+void IndexedDBBackingStore::Transaction::PutBlobInfo(
+ int64 database_id,
+ int64 object_store_id,
+ const std::string& key,
+ std::vector<IndexedDBBlobInfo>* blob_info,
+ ScopedVector<webkit_blob::BlobDataHandle>* handles) {
+ DCHECK_GT(key.size(), 0UL);
+ if (database_id_ < 0)
+ database_id_ = database_id;
+ DCHECK_EQ(database_id_, database_id);
+
+ BlobChangeMap::iterator it = blob_change_map_.find(key);
+ BlobChangeRecord *record = NULL;
+ if (it == blob_change_map_.end()) {
+ record = new BlobChangeRecord();
+ blob_change_map_[key] = record;
+ record->set_key(key);
+ record->set_object_store_id(object_store_id);
+ } else {
+ record = it->second;
+ }
+ DCHECK_EQ(record->object_store_id(), object_store_id);
+ record->SetBlobInfo(blob_info);
+ record->SetHandles(handles);
+ DCHECK(!handles || !handles->size());
+}
+
+IndexedDBBackingStore::Transaction::WriteDescriptor::WriteDescriptor(
+ const GURL& url, int64_t key)
+ : is_file_(false),
+ url_(url),
+ key_(key)
+{
+}
+
+IndexedDBBackingStore::Transaction::WriteDescriptor::WriteDescriptor(
+ const FilePath& file_path, int64_t key)
+ : is_file_(true),
+ file_path_(file_path),
+ key_(key)
+{
+}
+
} // namespace content

Powered by Google App Engine
This is Rietveld 408576698