Index: content/browser/indexed_db/indexed_db_backing_store.cc |
diff --git a/content/browser/indexed_db/indexed_db_backing_store.cc b/content/browser/indexed_db/indexed_db_backing_store.cc |
index 9b6651879570004c9216db0067924df0fb92ec3b..c31be9aae1afd7bbd2d97eb01717d735861d992a 100644 |
--- a/content/browser/indexed_db/indexed_db_backing_store.cc |
+++ b/content/browser/indexed_db/indexed_db_backing_store.cc |
@@ -9,10 +9,14 @@ |
#include "base/metrics/histogram.h" |
#include "base/strings/string_piece.h" |
#include "base/strings/string_util.h" |
+#include "base/strings/stringprintf.h" |
#include "base/strings/utf_string_conversions.h" |
+#include "content/browser/child_process_security_policy_impl.h" |
+#include "content/browser/indexed_db/indexed_db_blob_info.h" |
#include "content/browser/indexed_db/indexed_db_leveldb_coding.h" |
#include "content/browser/indexed_db/indexed_db_metadata.h" |
#include "content/browser/indexed_db/indexed_db_tracing.h" |
+#include "content/browser/indexed_db/indexed_db_value.h" |
#include "content/browser/indexed_db/leveldb/leveldb_comparator.h" |
#include "content/browser/indexed_db/leveldb/leveldb_database.h" |
#include "content/browser/indexed_db/leveldb/leveldb_iterator.h" |
@@ -20,16 +24,47 @@ |
#include "content/common/indexed_db/indexed_db_key.h" |
#include "content/common/indexed_db/indexed_db_key_path.h" |
#include "content/common/indexed_db/indexed_db_key_range.h" |
+#include "content/public/browser/browser_thread.h" |
+#include "net/url_request/url_request_context.h" |
#include "third_party/WebKit/public/platform/WebIDBTypes.h" |
#include "third_party/leveldatabase/env_chromium.h" |
+#include "webkit/browser/fileapi/file_writer_delegate.h" |
+#include "webkit/browser/fileapi/local_file_stream_writer.h" |
+using base::FilePath; |
using base::StringPiece; |
+using fileapi::FileWriterDelegate; |
// TODO(jsbell): Make blink push the version during the open() call. |
static const uint32 kWireVersion = 2; |
namespace content { |
+namespace { |
+ |
+FilePath GetIDBBlobDirectoryName(const FilePath& pathBase, |
+ int64 database_id) { |
+ return pathBase.AppendASCII(base::StringPrintf("%lx", database_id)); |
+} |
+ |
+FilePath GetIDBBlobDirectoryNameForKey(const FilePath& pathBase, |
+ int64 database_id, int64 key) { |
+ FilePath path = GetIDBBlobDirectoryName(pathBase, database_id); |
+ path = path.AppendASCII( |
+ base::StringPrintf("%x", static_cast<int>(key & 0x0000ff00) >> 8)); |
+ return path; |
+} |
+ |
+// This assumes a file path of dbId/3rd-byte-of-counter/counter. |
+bool MakeIDBBlobDirectory( |
+ const FilePath& pathBase, int64 database_id, int64 key) { |
+ FilePath path = |
+ GetIDBBlobDirectoryNameForKey(pathBase, database_id, key); |
+ return file_util::CreateDirectory(path); |
+} |
+ |
+} // anonymous namespace |
+ |
static const int64 kKeyGeneratorInitialNumber = |
1; // From the IndexedDB specification. |
@@ -55,6 +90,9 @@ enum IndexedDBBackingStoreErrorSource { |
CREATE_IDBDATABASE_METADATA, |
DELETE_DATABASE, |
TRANSACTION_COMMIT_METHOD, // TRANSACTION_COMMIT is a WinNT.h macro |
+ GET_BLOB_KEY_GENERATOR_CURRENT_NUMBER, |
+ GET_BLOB_INFO_FOR_RECORD, |
+ DECODE_BLOB_JOURNAL, |
GET_DATABASE_NAMES, |
INTERNAL_ERROR_MAX, |
}; |
@@ -354,21 +392,283 @@ class DefaultLevelDBFactory : public LevelDBFactory { |
bool* is_disk_full) OVERRIDE { |
return LevelDBDatabase::Open(file_name, comparator, db, is_disk_full); |
} |
- virtual bool DestroyLevelDB(const base::FilePath& file_name) OVERRIDE { |
+ virtual bool DestroyLevelDB(const FilePath& file_name) OVERRIDE { |
return LevelDBDatabase::Destroy(file_name); |
} |
}; |
+static bool GetBlobKeyGeneratorCurrentNumber( |
+ LevelDBTransaction* leveldb_transaction, int64 database_id, |
+ int64& blob_key_generator_current_number) { |
+ const std::string key_gen_key = |
+ DatabaseMetaDataKey::Encode( |
+ database_id, DatabaseMetaDataKey::BLOB_KEY_GENERATOR_CURRENT_NUMBER); |
+ |
+ // Default to initial number if not found. |
+ int64 cur_number = DatabaseMetaDataKey::kBlobKeyGeneratorInitialNumber; |
+ std::string data; |
+ |
+ bool found = false; |
+ bool ok = leveldb_transaction->Get(key_gen_key, &data, &found); |
+ if (!ok) { |
+ INTERNAL_READ_ERROR(GET_BLOB_KEY_GENERATOR_CURRENT_NUMBER); |
+ return false; |
+ } |
+ if (found) { |
+ StringPiece slice(data); |
+ if (!DecodeVarInt(&slice, &cur_number) || |
+ !DatabaseMetaDataKey::IsValidBlobKey(cur_number)) { |
+ INTERNAL_READ_ERROR(GET_BLOB_KEY_GENERATOR_CURRENT_NUMBER); |
+ return false; |
+ } |
+ } |
+ blob_key_generator_current_number = cur_number; |
+ return true; |
+} |
+ |
+static bool UpdateBlobKeyGeneratorCurrentNumber( |
+ LevelDBTransaction* leveldb_transaction, int64 database_id, |
+ int64 blob_key_generator_current_number) { |
+#ifndef NDEBUG |
+ int64 old_number; |
+ if (!GetBlobKeyGeneratorCurrentNumber(leveldb_transaction, database_id, |
+ old_number)) |
+ return false; |
+ DCHECK_LT(old_number, blob_key_generator_current_number); |
+#endif |
+ DCHECK(DatabaseMetaDataKey::IsValidBlobKey( |
+ blob_key_generator_current_number)); |
+ const std::string key = |
+ DatabaseMetaDataKey::Encode( |
+ database_id, DatabaseMetaDataKey::BLOB_KEY_GENERATOR_CURRENT_NUMBER); |
+ |
+ PutInt(leveldb_transaction, key, blob_key_generator_current_number); |
+ return true; |
+} |
+ |
+static bool DecodeBlobJournal(const std::string& data, |
+ IndexedDBBackingStore::Transaction::BlobJournalType& journal) { |
+ // TODO(ericu): Yell something on errors. If we persistently can't read the |
+ // blob journal, the safe thing to do is to clear it and leak the blobs, |
+ // though that may be costly. Still, database/directory deletion should always |
+ // clean things up, and we can write an fsck that will do a full correction if |
+ // need be. |
+ IndexedDBBackingStore::Transaction::BlobJournalType output; |
+ StringPiece slice(data); |
+ while (!slice.empty()) { |
+ int64 database_id = -1; |
+ int64 blob_key = -1; |
+ if (!DecodeVarInt(&slice, &database_id)) |
+ return false; |
+ else if (!KeyPrefix::IsValidDatabaseId(database_id)) |
+ return false; |
+ if (!DecodeVarInt(&slice, &blob_key)) { |
+ return false; |
+ } else if (!DatabaseMetaDataKey::IsValidBlobKey(blob_key) && |
+ (blob_key != DatabaseMetaDataKey::kAllBlobsKey)) { |
+ return false; |
+ } |
+ output.push_back(std::make_pair(database_id, blob_key)); |
+ } |
+ journal.swap(output); |
+ return true; |
+} |
+ |
+static bool GetBlobJournalHelper( |
+ bool ok, bool found, |
+ const std::string& data, |
+ IndexedDBBackingStore::Transaction::BlobJournalType& journal) { |
+ if (!ok) { |
+ INTERNAL_READ_ERROR(KEY_EXISTS_IN_OBJECT_STORE); |
+ return false; |
+ } |
+ journal.clear(); |
+ if (!found) |
+ return true; |
+ if (!data.size()) |
+ return true; |
+ if (!DecodeBlobJournal(data, journal)) { |
+ INTERNAL_READ_ERROR(DECODE_BLOB_JOURNAL); |
+ return false; |
+ } |
+ return true; |
+} |
+ |
+static bool GetBlobJournal( |
+ const StringPiece& leveldb_key, |
+ LevelDBTransaction* leveldb_transaction, |
+ IndexedDBBackingStore::Transaction::BlobJournalType& journal) { |
+ std::string data; |
+ bool found = false; |
+ bool ok = leveldb_transaction->Get(leveldb_key, &data, &found); |
+ return GetBlobJournalHelper(ok, found, data, journal); |
+} |
+ |
+static bool GetBlobJournal( |
+ const StringPiece& leveldb_key, |
+ LevelDBWriteOnlyTransaction* leveldb_transaction, |
+ IndexedDBBackingStore::Transaction::BlobJournalType& journal) { |
+ std::string data; |
+ bool found = false; |
+ bool ok = leveldb_transaction->Get(leveldb_key, &data, &found); |
+ return GetBlobJournalHelper(ok, found, data, journal); |
+} |
+ |
+static std::string EncodeBlobJournalWithBlobList( |
+ const IndexedDBBackingStore::Transaction::BlobJournalType& journal) { |
+ std::string data; |
+ if (journal.size()) { |
+ IndexedDBBackingStore::Transaction::BlobJournalType::const_iterator iter; |
+ for (iter = journal.begin(); iter != journal.end(); ++iter) { |
+ EncodeVarInt(iter->first, &data); |
+ EncodeVarInt(iter->second, &data); |
+ } |
+ } |
+ return data; |
+} |
+ |
+static void ClearBlobJournal(LevelDBTransaction* leveldb_transaction, |
+ const std::string& level_db_key) { |
+ leveldb_transaction->Remove(level_db_key); |
+} |
+ |
+static void UpdatePrimaryJournalWithBlobList( |
+ LevelDBTransaction* leveldb_transaction, |
+ const IndexedDBBackingStore::Transaction::BlobJournalType& journal) { |
+ const std::string leveldbKey = BlobJournalKey::Encode(); |
+ std::string data = EncodeBlobJournalWithBlobList(journal); |
+ leveldb_transaction->Put(leveldbKey, &data); |
+} |
+ |
+static void UpdateSecondaryJournalWithBlobList( |
+ LevelDBTransaction* leveldb_transaction, |
+ const IndexedDBBackingStore::Transaction::BlobJournalType& journal) { |
+ const std::string leveldbKey = LiveBlobJournalKey::Encode(); |
+ std::string data = EncodeBlobJournalWithBlobList(journal); |
+ leveldb_transaction->Put(leveldbKey, &data); |
+} |
+ |
+static bool MergeBlobsIntoSecondaryJournal( |
+ LevelDBTransaction* leveldb_transaction, |
+ const IndexedDBBackingStore::Transaction::BlobJournalType& journal) { |
+ IndexedDBBackingStore::Transaction::BlobJournalType old_journal; |
+ std::string key = LiveBlobJournalKey::Encode(); |
+ if (!GetBlobJournal(key, leveldb_transaction, old_journal)) |
+ return false; |
+ |
+ fprintf(stderr, "ERICU: Secondary: old %lu entries, new %lu entries.\n", |
+ old_journal.size(), journal.size()); |
+ old_journal.insert(old_journal.end(), journal.begin(), journal.end()); |
+ fprintf(stderr, "ERICU: final %lu entries.\n", old_journal.size()); |
+ |
+ UpdateSecondaryJournalWithBlobList(leveldb_transaction, old_journal); |
+ return true; |
+} |
+ |
+static void UpdateBlobJournalWithDatabase( |
+ LevelDBWriteOnlyTransaction* leveldb_transaction, int64 database_id) { |
+ IndexedDBBackingStore::Transaction::BlobJournalType journal; |
+ journal.push_back( |
+ std::make_pair(database_id, DatabaseMetaDataKey::kAllBlobsKey)); |
+ const std::string key = BlobJournalKey::Encode(); |
+ std::string data = EncodeBlobJournalWithBlobList(journal); |
+ leveldb_transaction->Put(key, &data); |
+} |
+ |
+static bool MergeDatabaseIntoSecondaryJournal( |
+ LevelDBWriteOnlyTransaction* leveldb_transaction, int64 database_id) { |
+ IndexedDBBackingStore::Transaction::BlobJournalType journal; |
+ std::string key = LiveBlobJournalKey::Encode(); |
+ if (!GetBlobJournal(key, leveldb_transaction, journal)) |
+ return false; |
+ journal.push_back( |
+ std::make_pair(database_id, DatabaseMetaDataKey::kAllBlobsKey)); |
+ std::string data = EncodeBlobJournalWithBlobList(journal); |
+ leveldb_transaction->Put(key, &data); |
+ return true; |
+} |
+ |
+// Blob Data is encoded as { is_file [bool], key [int64 as varInt], |
+// type [string-with-length, may be empty], then [for Blobs] size |
+// [int64 as varInt] or [for Files] fileName [string-with-length] } |
+static std::string EncodeBlobData( |
+ const std::vector<IndexedDBBlobInfo*>& blob_info) { |
+ std::string ret; |
+ std::vector<IndexedDBBlobInfo*>::const_iterator iter; |
+ for (iter = blob_info.begin(); iter != blob_info.end(); ++iter) { |
+ const IndexedDBBlobInfo& info = **iter; |
+ EncodeBool(info.is_file(), &ret); |
+ EncodeVarInt(info.key(), &ret); |
+ EncodeStringWithLength(info.type(), &ret); |
+ if (info.is_file()) |
+ EncodeStringWithLength(info.file_name(), &ret); |
+ else |
+ EncodeVarInt(info.size(), &ret); |
+ } |
+ return ret; |
+} |
+ |
+static bool DecodeBlobData( |
+ const std::string& data, |
+ std::vector<IndexedDBBlobInfo>* output) { |
+ std::vector<IndexedDBBlobInfo> ret; |
+ output->clear(); |
+ StringPiece slice(data); |
+ while (!slice.empty()) { |
+ bool is_file; |
+ int64 key; |
+ string16 type; |
+ int64 size; |
+ string16 file_name; |
+ |
+ if (!DecodeBool(&slice, &is_file)) |
+ return false; |
+ if (!DecodeVarInt(&slice, &key) || |
+ !DatabaseMetaDataKey::IsValidBlobKey(key)) |
+ return false; |
+ if (!DecodeStringWithLength(&slice, &type)) |
+ return false; |
+ if (is_file) { |
+ if (!DecodeStringWithLength(&slice, &file_name)) |
+ return false; |
+ ret.push_back(IndexedDBBlobInfo(type, file_name, key)); |
+ } else { |
+ if (!DecodeVarInt(&slice, &size) || size < 0) |
+ return false; |
+ ret.push_back(IndexedDBBlobInfo(type, static_cast<uint64>(size), key)); |
+ } |
+ } |
+ output->swap(ret); |
+ |
+ return true; |
+} |
+ |
IndexedDBBackingStore::IndexedDBBackingStore( |
const std::string& identifier, |
+ const FilePath& blob_path, |
+ net::URLRequestContext* request_context, |
scoped_ptr<LevelDBDatabase> db, |
- scoped_ptr<LevelDBComparator> comparator) |
+ scoped_ptr<LevelDBComparator> comparator, |
+ base::TaskRunner* task_runner) |
: identifier_(identifier), |
+ blob_path_(blob_path), |
+ request_context_(request_context), |
+ task_runner_(task_runner), |
db_(db.Pass()), |
comparator_(comparator.Pass()), |
- weak_factory_(this) {} |
+ active_blob_registry_(this), |
+ weak_factory_(this) { |
+} |
IndexedDBBackingStore::~IndexedDBBackingStore() { |
+ if (!blob_path_.empty()) { |
+ ChildProcessSecurityPolicyImpl* policy = |
+ ChildProcessSecurityPolicyImpl::GetInstance(); |
+ for (std::set<int>::iterator iter = child_process_ids_granted_.begin(); |
+ iter != child_process_ids_granted_.end(); ++iter) { |
+ policy->RevokeAllPermissionsForFile(*iter, blob_path_); |
+ } |
+ } |
// db_'s destructor uses comparator_. The order of destruction is important. |
db_.reset(); |
comparator_.reset(); |
@@ -442,24 +742,33 @@ bool RecoveryCouldBeFruitful(leveldb::Status status) { |
scoped_refptr<IndexedDBBackingStore> IndexedDBBackingStore::Open( |
const std::string& origin_identifier, |
- const base::FilePath& path_base, |
+ const FilePath& path_base, |
const std::string& file_identifier, |
- WebKit::WebIDBCallbacks::DataLoss* data_loss) { |
+ net::URLRequestContext* request_context, |
+ WebKit::WebIDBCallbacks::DataLoss* data_loss, |
+ base::TaskRunner* task_runner, |
+ bool clean_journal) { |
*data_loss = WebKit::WebIDBCallbacks::DataLossNone; |
DefaultLevelDBFactory leveldb_factory; |
return IndexedDBBackingStore::Open(origin_identifier, |
path_base, |
file_identifier, |
+ request_context, |
data_loss, |
- &leveldb_factory); |
+ &leveldb_factory, |
+ task_runner, |
+ clean_journal); |
} |
scoped_refptr<IndexedDBBackingStore> IndexedDBBackingStore::Open( |
const std::string& origin_identifier, |
- const base::FilePath& path_base, |
+ const FilePath& path_base, |
const std::string& file_identifier, |
+ net::URLRequestContext* request_context, |
WebKit::WebIDBCallbacks::DataLoss* data_loss, |
- LevelDBFactory* leveldb_factory) { |
+ LevelDBFactory* leveldb_factory, |
+ base::TaskRunner* task_runner, |
+ bool clean_journal) { |
IDB_TRACE("IndexedDBBackingStore::Open"); |
DCHECK(!path_base.empty()); |
*data_loss = WebKit::WebIDBCallbacks::DataLossNone; |
@@ -486,9 +795,12 @@ scoped_refptr<IndexedDBBackingStore> IndexedDBBackingStore::Open( |
return scoped_refptr<IndexedDBBackingStore>(); |
} |
- base::FilePath identifier_path = |
- base::FilePath().AppendASCII(origin_identifier) |
- .AddExtension(FILE_PATH_LITERAL(".indexeddb.leveldb")); |
+ base::FilePath db_dir_name = |
+ FilePath().AppendASCII(origin_identifier). |
+ AddExtension(FILE_PATH_LITERAL(".indexeddb.leveldb")); |
+ base::FilePath blob_dir_name = |
+ FilePath().AppendASCII(origin_identifier). |
+ AddExtension(FILE_PATH_LITERAL(".indexeddb.blob")); |
int limit = file_util::GetMaximumPathComponentLength(path_base); |
if (limit == -1) { |
@@ -500,9 +812,9 @@ scoped_refptr<IndexedDBBackingStore> IndexedDBBackingStore::Open( |
limit = 255; |
#endif |
} |
- if (identifier_path.value().length() > static_cast<uint32_t>(limit)) { |
+ if (db_dir_name.value().length() > static_cast<uint32_t>(limit)) { |
DLOG(WARNING) << "Path component length (" |
- << identifier_path.value().length() << ") exceeds maximum (" |
+ << db_dir_name.value().length() << ") exceeds maximum (" |
<< limit << ") allowed by this filesystem."; |
const int min = 140; |
const int max = 300; |
@@ -510,7 +822,7 @@ scoped_refptr<IndexedDBBackingStore> IndexedDBBackingStore::Open( |
// TODO(dgrogan): Remove WebCore from these histogram names. |
UMA_HISTOGRAM_CUSTOM_COUNTS( |
"WebCore.IndexedDB.BackingStore.OverlyLargeOriginLength", |
- identifier_path.value().length(), |
+ db_dir_name.value().length(), |
min, |
max, |
num_buckets); |
@@ -526,7 +838,8 @@ scoped_refptr<IndexedDBBackingStore> IndexedDBBackingStore::Open( |
return scoped_refptr<IndexedDBBackingStore>(); |
} |
- base::FilePath file_path = path_base.Append(identifier_path); |
+ FilePath file_path = path_base.Append(db_dir_name); |
+ FilePath blob_path = path_base.Append(blob_dir_name); |
bool is_disk_full = false; |
scoped_ptr<LevelDBDatabase> db; |
@@ -635,7 +948,16 @@ scoped_refptr<IndexedDBBackingStore> IndexedDBBackingStore::Open( |
return scoped_refptr<IndexedDBBackingStore>(); |
} |
- return Create(file_identifier, db.Pass(), comparator.Pass()); |
+ scoped_refptr<IndexedDBBackingStore> backing_store = Create( |
+ file_identifier, blob_path, request_context, db.Pass(), comparator.Pass(), |
+ task_runner); |
+ |
+ if (clean_journal) |
+ fprintf(stderr, "ERICU: first open of DB since boot.\n"); |
+ if (clean_journal && !backing_store->CleanUpBlobJournal( |
+ LiveBlobJournalKey::Encode())) |
+ return scoped_refptr<IndexedDBBackingStore>(); |
+ return backing_store; |
} |
scoped_refptr<IndexedDBBackingStore> IndexedDBBackingStore::OpenInMemory( |
@@ -669,16 +991,21 @@ scoped_refptr<IndexedDBBackingStore> IndexedDBBackingStore::OpenInMemory( |
base::HistogramBase::kUmaTargetedHistogramFlag) |
->Add(INDEXED_DB_LEVEL_DB_BACKING_STORE_OPEN_MEMORY_SUCCESS); |
- return Create(file_identifier, db.Pass(), comparator.Pass()); |
+ return Create(file_identifier, FilePath(), NULL, db.Pass(), |
+ comparator.Pass(), NULL); |
} |
scoped_refptr<IndexedDBBackingStore> IndexedDBBackingStore::Create( |
const std::string& identifier, |
+ const FilePath& blob_path, |
+ net::URLRequestContext* request_context, |
scoped_ptr<LevelDBDatabase> db, |
- scoped_ptr<LevelDBComparator> comparator) { |
+ scoped_ptr<LevelDBComparator> comparator, |
+ base::TaskRunner* task_runner) { |
// TODO(jsbell): Handle comparator name changes. |
scoped_refptr<IndexedDBBackingStore> backing_store( |
- new IndexedDBBackingStore(identifier, db.Pass(), comparator.Pass())); |
+ new IndexedDBBackingStore(identifier, blob_path, request_context, |
+ db.Pass(), comparator.Pass(), task_runner)); |
if (!SetUpMetadata(backing_store->db_.get(), identifier)) |
return scoped_refptr<IndexedDBBackingStore>(); |
@@ -686,6 +1013,14 @@ scoped_refptr<IndexedDBBackingStore> IndexedDBBackingStore::Create( |
return backing_store; |
} |
+void IndexedDBBackingStore::GrantChildProcessPermissions(int child_process_id) { |
+ if (!child_process_ids_granted_.count(child_process_id)) { |
+ child_process_ids_granted_.insert(child_process_id); |
+ ChildProcessSecurityPolicyImpl::GetInstance()->GrantReadDirectory( |
+ child_process_id, blob_path_); |
+ } |
+} |
+ |
std::vector<string16> IndexedDBBackingStore::GetDatabaseNames() { |
std::vector<string16> found_names; |
const std::string start_key = |
@@ -763,6 +1098,32 @@ bool IndexedDBBackingStore::GetIDBDatabaseMetaData( |
return false; |
} |
+ int64 blob_key_generator_current_number = |
+ DatabaseMetaDataKey::kInvalidBlobKey; |
+ |
+ ok = GetVarInt(db_.get(), |
+ DatabaseMetaDataKey::Encode( |
+ metadata->id, |
+ DatabaseMetaDataKey::BLOB_KEY_GENERATOR_CURRENT_NUMBER), |
+ &blob_key_generator_current_number, |
+ found); |
+ if (!ok) { |
+ INTERNAL_READ_ERROR(GET_IDBDATABASE_METADATA); |
+ return false; |
+ } |
+ if (!*found || |
+ !DatabaseMetaDataKey::IsValidBlobKey(blob_key_generator_current_number)) { |
+ INTERNAL_CONSISTENCY_ERROR(GET_IDBDATABASE_METADATA); |
+ // TODO(ericu): If BLOB_KEY_GENERATOR_CURRENT_NUMBER isn't present, |
+ // initialize it to kBlobKeyGeneratorInitialNumber. We may also want to |
+ // verify that this object store predates blob support, or that there |
+ // aren't any blobs on disk. |
+ // This would be a read-modify-write, so we'd need a transaction, |
+ // and to double-check. It might just be easier to write it lazily |
+ // when we first try to increment it. |
+ return false; |
+ } |
+ |
return true; |
} |
@@ -818,6 +1179,11 @@ bool IndexedDBBackingStore::CreateIDBDatabaseMetaData(const string16& name, |
DatabaseMetaDataKey::Encode(*row_id, |
DatabaseMetaDataKey::USER_INT_VERSION), |
int_version); |
+ PutVarInt( |
+ transaction.get(), |
+ DatabaseMetaDataKey::Encode(*row_id, |
+ DatabaseMetaDataKey::BLOB_KEY_GENERATOR_CURRENT_NUMBER), |
+ DatabaseMetaDataKey::kBlobKeyGeneratorInitialNumber); |
if (!transaction->Commit()) { |
INTERNAL_WRITE_ERROR(CREATE_IDBDATABASE_METADATA); |
return false; |
@@ -850,20 +1216,100 @@ bool IndexedDBBackingStore::UpdateIDBDatabaseMetaData( |
return true; |
} |
-static void DeleteRange(LevelDBTransaction* transaction, |
- const std::string& begin, |
- const std::string& end) { |
+// Note that if you're deleting a range that contains user keys that have blob |
+// info, this won't clean up the blobs. |
+static void DeleteRangeHelper(LevelDBTransaction* transaction, |
+ const std::string& begin, |
+ const std::string& end) { |
scoped_ptr<LevelDBIterator> it = transaction->CreateIterator(); |
for (it->Seek(begin); it->IsValid() && CompareKeys(it->Key(), end) < 0; |
it->Next()) |
transaction->Remove(it->Key()); |
} |
+// For a whole-object-store deletion, we still use the one-blob-record-at-a-time |
+// deletion mechanism designed for normal transactions. We could go with the |
+// nuke-the-whole-directory method used for deleteDatabase, but that would |
+// complicate the kind of info we store in the LevelDBTransaction. |
+static void DeleteBlobsInObjectStore( |
+ IndexedDBBackingStore::Transaction* transaction, |
+ int64 database_id, int64 object_store_id) { |
+ fprintf(stderr, "ERICU: DeleteBlobsInObjectStore.\n"); |
+ std::string start_key, end_key; |
+ start_key = |
+ BlobEntryKey::EncodeMinForObjectStore(database_id, object_store_id); |
+ end_key = |
+ BlobEntryKey::EncodeMaxForObjectStore(database_id, object_store_id); |
+ |
+ scoped_ptr<LevelDBIterator> it = |
+ IndexedDBBackingStore::Transaction::LevelDBTransactionFrom( |
+ transaction)->CreateIterator(); |
+ for (it->Seek(start_key); |
+ it->IsValid() && CompareKeys(it->Key(), end_key) < 0; it->Next()) { |
+ StringPiece key_piece(it->Key()); |
+ std::string user_key = BlobEntryKey::EncodeToObjectStoreDataKey(&key_piece); |
+ if (user_key.size()) |
+ transaction->PutBlobInfo(database_id, object_store_id, user_key, NULL); |
+ else |
+ INTERNAL_CONSISTENCY_ERROR(GET_IDBDATABASE_METADATA); |
+ } |
+} |
+ |
+static bool GetBlobInfoForRecord( |
+ IndexedDBBackingStore* backing_store, |
+ LevelDBTransaction* leveldb_transaction, |
+ int64 database_id, |
+ const std::string& leveldb_key, |
+ IndexedDBValue* value) { |
+ |
+ BlobEntryKey blob_entry_key; |
+ StringPiece leveldb_key_piece(leveldb_key); |
+ if (!BlobEntryKey::FromObjectStoreDataKey( |
+ &leveldb_key_piece, &blob_entry_key)) { |
+ NOTREACHED(); |
+ return false; |
+ } |
+ scoped_ptr<LevelDBIterator> it = leveldb_transaction->CreateIterator(); |
+ std::string encoded_key = blob_entry_key.Encode(); |
+ it->Seek(encoded_key); |
+ if (it->IsValid() && CompareKeys(it->Key(), encoded_key) == 0) { |
+ if (!DecodeBlobData(it->Value().as_string(), &value->blob_info)) { |
+ INTERNAL_READ_ERROR(GET_BLOB_INFO_FOR_RECORD); |
+ return false; |
+ } |
+ std::vector<IndexedDBBlobInfo>::iterator iter; |
+ for (iter = value->blob_info.begin(); iter != value->blob_info.end(); |
+ ++iter) { |
+ iter->set_file_path( |
+ backing_store->GetIDBBlobFileName(database_id, iter->key())); |
+ iter->set_mark_used_callback( |
+ backing_store->active_blob_registry()->GetMarkUsedCallback( |
+ database_id, iter->key())); |
+ iter->set_release_callback( |
+ backing_store->active_blob_registry()->GetReleaseCallback( |
+ database_id, iter->key())); |
+ if (iter->is_file()) { |
+ base::PlatformFileInfo info; |
+ if (file_util::GetFileInfo(iter->file_path(), &info)) { |
+ // This should always work, but it isn't fatal if it doesn't; it just |
+ // means a potential slow synchronous call from the renderer later. |
+ iter->set_last_modified(info.last_modified); |
+ iter->set_size(info.size); |
+ } |
+ } |
+ } |
+ } |
+ return true; |
+} |
+ |
bool IndexedDBBackingStore::DeleteDatabase(const string16& name) { |
IDB_TRACE("IndexedDBBackingStore::DeleteDatabase"); |
scoped_ptr<LevelDBWriteOnlyTransaction> transaction = |
LevelDBWriteOnlyTransaction::Create(db_.get()); |
+ if (!CleanUpBlobJournal(BlobJournalKey::Encode())) |
+ return false; |
+ |
IndexedDBDatabaseMetadata metadata; |
bool success = false; |
bool ok = GetIDBDatabaseMetaData(name, &metadata, &success); |
@@ -885,10 +1331,24 @@ bool IndexedDBBackingStore::DeleteDatabase(const string16& name) { |
const std::string key = DatabaseNameKey::Encode(identifier_, name); |
transaction->Remove(key); |
+ bool need_cleanup = false; |
+ if (active_blob_registry()->MarkDeletedCheckIfUsed( |
+ metadata.id, DatabaseMetaDataKey::kAllBlobsKey)) { |
+ if (!MergeDatabaseIntoSecondaryJournal(transaction.get(), metadata.id)) |
+ return false; |
+ } else { |
+ UpdateBlobJournalWithDatabase(transaction.get(), metadata.id); |
+ need_cleanup = true; |
+ } |
+ |
if (!transaction->Commit()) { |
INTERNAL_WRITE_ERROR(DELETE_DATABASE); |
return false; |
} |
+ |
+ if (need_cleanup) |
+ CleanUpBlobJournal(BlobJournalKey::Encode()); |
+ |
return true; |
} |
@@ -981,7 +1441,7 @@ bool IndexedDBBackingStore::GetObjectStores( |
INTERNAL_CONSISTENCY_ERROR(GET_OBJECT_STORES); |
} |
- it->Next(); // Is evicatble. |
+ it->Next(); // Is evictable. |
if (!CheckObjectStoreAndMetaDataType(it.get(), |
stop_key, |
object_store_id, |
@@ -1170,7 +1630,8 @@ bool IndexedDBBackingStore::DeleteObjectStore( |
return false; |
} |
- DeleteRange( |
+ DeleteBlobsInObjectStore(transaction, database_id, object_store_id); |
+ DeleteRangeHelper( |
leveldb_transaction, |
ObjectStoreMetaDataKey::Encode(database_id, object_store_id, 0), |
ObjectStoreMetaDataKey::EncodeMaxKey(database_id, object_store_id)); |
@@ -1178,12 +1639,14 @@ bool IndexedDBBackingStore::DeleteObjectStore( |
leveldb_transaction->Remove( |
ObjectStoreNamesKey::Encode(database_id, object_store_name)); |
- DeleteRange(leveldb_transaction, |
- IndexFreeListKey::Encode(database_id, object_store_id, 0), |
- IndexFreeListKey::EncodeMaxKey(database_id, object_store_id)); |
- DeleteRange(leveldb_transaction, |
- IndexMetaDataKey::Encode(database_id, object_store_id, 0, 0), |
- IndexMetaDataKey::EncodeMaxKey(database_id, object_store_id)); |
+ DeleteRangeHelper( |
+ leveldb_transaction, |
+ IndexFreeListKey::Encode(database_id, object_store_id, 0), |
+ IndexFreeListKey::EncodeMaxKey(database_id, object_store_id)); |
+ DeleteRangeHelper( |
+ leveldb_transaction, |
+ IndexMetaDataKey::Encode(database_id, object_store_id, 0, 0), |
+ IndexMetaDataKey::EncodeMaxKey(database_id, object_store_id)); |
return ClearObjectStore(transaction, database_id, object_store_id); |
} |
@@ -1193,7 +1656,7 @@ bool IndexedDBBackingStore::GetRecord( |
int64 database_id, |
int64 object_store_id, |
const IndexedDBKey& key, |
- std::string* record) { |
+ IndexedDBValue* record) { |
IDB_TRACE("IndexedDBBackingStore::GetRecord"); |
if (!KeyPrefix::ValidIds(database_id, object_store_id)) |
return false; |
@@ -1226,8 +1689,9 @@ bool IndexedDBBackingStore::GetRecord( |
return false; |
} |
- *record = slice.as_string(); |
- return true; |
+ record->bits = slice.as_string(); |
+ return GetBlobInfoForRecord( |
+ this, leveldb_transaction, database_id, leveldb_key, record); |
} |
WARN_UNUSED_RESULT static bool GetNewVersionNumber( |
@@ -1266,9 +1730,10 @@ bool IndexedDBBackingStore::PutRecord( |
int64 database_id, |
int64 object_store_id, |
const IndexedDBKey& key, |
- const std::string& value, |
+ IndexedDBValue& value, |
RecordIdentifier* record_identifier) { |
IDB_TRACE("IndexedDBBackingStore::PutRecord"); |
+ fprintf(stderr, "ERICU: PutRecord.\n"); |
if (!KeyPrefix::ValidIds(database_id, object_store_id)) |
return false; |
DCHECK(key.IsValid()); |
@@ -1281,14 +1746,16 @@ bool IndexedDBBackingStore::PutRecord( |
if (!ok) |
return false; |
DCHECK_GE(version, 0); |
- const std::string object_storedata_key = |
+ const std::string object_store_data_key = |
ObjectStoreDataKey::Encode(database_id, object_store_id, key); |
std::string v; |
EncodeVarInt(version, &v); |
- v.append(value); |
+ v.append(value.bits); |
- leveldb_transaction->Put(object_storedata_key, &v); |
+ leveldb_transaction->Put(object_store_data_key, &v); |
+ transaction->PutBlobInfo(database_id, object_store_id, object_store_data_key, |
+ &value.blob_info); |
const std::string exists_entry_key = |
ExistsEntryKey::Encode(database_id, object_store_id, key); |
@@ -1316,7 +1783,8 @@ bool IndexedDBBackingStore::ClearObjectStore( |
const std::string stop_key = |
KeyPrefix(database_id, object_store_id + 1).Encode(); |
- DeleteRange(leveldb_transaction, start_key, stop_key); |
+ DeleteRangeHelper(leveldb_transaction, start_key, stop_key); |
+ DeleteBlobsInObjectStore(transaction, database_id, object_store_id); |
return true; |
} |
@@ -1325,6 +1793,7 @@ bool IndexedDBBackingStore::DeleteRecord( |
int64 database_id, |
int64 object_store_id, |
const RecordIdentifier& record_identifier) { |
+ fprintf(stderr, "ERICU: DeleteRecord.\n"); |
IDB_TRACE("IndexedDBBackingStore::DeleteRecord"); |
if (!KeyPrefix::ValidIds(database_id, object_store_id)) |
return false; |
@@ -1334,6 +1803,8 @@ bool IndexedDBBackingStore::DeleteRecord( |
const std::string object_store_data_key = ObjectStoreDataKey::Encode( |
database_id, object_store_id, record_identifier.primary_key()); |
leveldb_transaction->Remove(object_store_data_key); |
+ transaction->PutBlobInfo(database_id, object_store_id, object_store_data_key, |
+ NULL); |
const std::string exists_entry_key = ExistsEntryKey::Encode( |
database_id, object_store_id, record_identifier.primary_key()); |
@@ -1341,6 +1812,59 @@ bool IndexedDBBackingStore::DeleteRecord( |
return true; |
} |
+bool IndexedDBBackingStore::DeleteRange( |
+ IndexedDBBackingStore::Transaction* transaction, |
+ int64 database_id, |
+ int64 object_store_id, |
+ const IndexedDBKeyRange& key_range) { |
+ fprintf(stderr, "ERICU: DeleteRange.\n"); |
+ scoped_ptr<IndexedDBBackingStore::Cursor> backing_store_cursor = |
+ OpenObjectStoreCursor(transaction, database_id, object_store_id, |
+ key_range, indexed_db::CURSOR_NEXT); |
+ if (backing_store_cursor) { |
+ do { |
+ if (!DeleteRecord( |
+ transaction, database_id, object_store_id, |
+ backing_store_cursor->record_identifier())) |
+ return false; |
+ } while (backing_store_cursor->Continue()); |
+ } |
+ |
+ std::string blob_lower = |
+ BlobEntryKey::Encode(database_id, object_store_id, |
+ key_range.lower()); |
+ std::string blob_upper = |
+ BlobEntryKey::Encode(database_id, object_store_id, |
+ key_range.upper()); |
+ LevelDBTransaction* leveldb_transaction = |
+ IndexedDBBackingStore::Transaction::LevelDBTransactionFrom(transaction); |
+ scoped_ptr<LevelDBIterator> it = leveldb_transaction->CreateIterator(); |
+ it->Seek(blob_lower); |
+ while (it->IsValid() && |
+ ((key_range.lowerOpen() && |
+ CompareKeys(it->Key(), blob_lower) <= 0) || |
+ (!key_range.lowerOpen() && |
+ CompareKeys(it->Key(), blob_lower) < 0))) |
jsbell
2013/09/13 00:12:21
Given it->Seek(blob_lower), can CompareKeys(it->Ke
ericu
2013/11/20 23:05:39
Nope; fixed.
|
+ it->Next(); |
+ |
+ while (it->IsValid() && |
+ ((key_range.upperOpen() && |
+ CompareKeys(it->Key(), blob_upper) < 0) || |
+ (!key_range.upperOpen() && |
+ CompareKeys(it->Key(), blob_upper) <= 0))) { |
+ StringPiece key_piece(it->Key()); |
+ std::string object_store_data_key = |
+ BlobEntryKey::EncodeToObjectStoreDataKey(&key_piece); |
+ if (!object_store_data_key.size()) |
+ return false; |
+ transaction->PutBlobInfo(database_id, object_store_id, |
+ object_store_data_key, NULL); |
+ it->Next(); |
+ } |
+ |
+ return true; |
+} |
+ |
bool IndexedDBBackingStore::GetKeyGeneratorCurrentNumber( |
IndexedDBBackingStore::Transaction* transaction, |
int64 database_id, |
@@ -1480,6 +2004,278 @@ bool IndexedDBBackingStore::KeyExistsInObjectStore( |
return true; |
} |
+class IndexedDBBackingStore::Transaction::ChainedBlobWriter : public |
+ base::RefCounted<IndexedDBBackingStore::Transaction::ChainedBlobWriter> { |
+public: |
+ typedef IndexedDBBackingStore::Transaction::WriteDescriptorVec |
+ WriteDescriptorVec; |
+ ChainedBlobWriter( |
+ int64 database_id, |
+ IndexedDBBackingStore* backingStore, |
+ WriteDescriptorVec& blobs, |
+ scoped_refptr<IndexedDBBackingStore::BlobWriteCallback> callback) |
+ : waiting_for_callback_(false), |
+ database_id_(database_id), |
+ backing_store_(backingStore), |
+ callback_(callback), |
+ aborted_(false) { |
+ blobs_.swap(blobs); |
+ iter_ = blobs_.begin(); |
+ WriteNextFile(); |
+ } |
+ |
+ void set_delegate(scoped_ptr<FileWriterDelegate> delegate) { |
+ delegate_.reset(delegate.release()); |
+ } |
+ |
+ void ReportWriteCompletion(bool succeeded, int64 bytes_written) { |
+ // TODO(ericu): Is there any need to check bytes_written against what we |
+ // know, if we know it? |
+ DCHECK(waiting_for_callback_); |
+ DCHECK(bytes_written >= 0); |
+ waiting_for_callback_ = false; |
+ content::BrowserThread::DeleteSoon( |
+ content::BrowserThread::IO, FROM_HERE, |
+ delegate_.release()); |
+ if (aborted_) { |
+ self_ref_ = NULL; |
+ return; |
+ } |
+ if (succeeded) |
+ WriteNextFile(); |
+ else |
+ callback_->didFail(); |
+ } |
+ |
+ void Abort() { |
+ if (!waiting_for_callback_) |
+ return; |
+ self_ref_ = this; |
+ aborted_ = true; |
+ } |
+ |
+private: |
+ void WriteNextFile() { |
+ DCHECK(!waiting_for_callback_); |
+ DCHECK(!aborted_); |
+ if (iter_ == blobs_.end()) { |
+ callback_->didSucceed(); |
+ self_ref_ = NULL; |
+ } else { |
+ if (!backing_store_->WriteBlobFile(database_id_, *iter_, this)) { |
+ callback_->didFail(); |
+ return; |
+ } |
+ waiting_for_callback_ = true; |
+ ++iter_; |
+ } |
+ } |
+ |
+ bool waiting_for_callback_; |
+ scoped_refptr<ChainedBlobWriter> self_ref_; |
+ WriteDescriptorVec blobs_; |
+ WriteDescriptorVec::const_iterator iter_; |
+ int64 database_id_; |
+ IndexedDBBackingStore* backing_store_; |
+ scoped_refptr<IndexedDBBackingStore::BlobWriteCallback> callback_; |
+ scoped_ptr<FileWriterDelegate> delegate_; |
+ bool aborted_; |
+}; |
+ |
+class LocalWriteClosure : public FileWriterDelegate::DelegateWriteCallback, |
+ public base::RefCounted<LocalWriteClosure> { |
+ public: |
+ LocalWriteClosure( |
+ IndexedDBBackingStore::Transaction::ChainedBlobWriter* |
+ chained_blob_writer_, |
+ base::TaskRunner* task_runner) |
+ : chained_blob_writer_(chained_blob_writer_), |
+ task_runner_(task_runner), |
+ bytes_written_(-1) { |
+ } |
+ |
+ void Run( |
+ base::PlatformFileError rv, |
+ int64 bytes, |
+ FileWriterDelegate::WriteProgressStatus write_status) { |
+ if (write_status == FileWriterDelegate::SUCCESS_IO_PENDING) |
+ return; // We don't care about progress events. |
+ if (rv == base::PLATFORM_FILE_OK) { |
+ DCHECK(bytes >= 0); |
+ DCHECK(write_status == FileWriterDelegate::SUCCESS_COMPLETED); |
+ bytes_written_ = bytes; |
+ } else { |
+ DCHECK(write_status == FileWriterDelegate::ERROR_WRITE_STARTED || |
+ write_status == FileWriterDelegate::ERROR_WRITE_NOT_STARTED); |
+ } |
+ task_runner_->PostTask( |
+ FROM_HERE, |
+ base::Bind( |
+ &LocalWriteClosure::callBlobCallbackOnIDBTaskRunner, this, |
+ write_status == FileWriterDelegate::SUCCESS_COMPLETED)); |
+ } |
+ |
+ void writeBlobToFileOnIOThread( |
+ const FilePath& file_path, const GURL& blob_url, |
+ net::URLRequestContext* request_context) { |
+ DCHECK(content::BrowserThread::CurrentlyOn(content::BrowserThread::IO)); |
+ scoped_ptr<fileapi::FileStreamWriter> writer( |
+ new fileapi::LocalFileStreamWriter( |
+ task_runner_, file_path, 0, false)); |
+ scoped_ptr<FileWriterDelegate> delegate( |
+ new FileWriterDelegate(writer.Pass())); |
+ |
+ DCHECK(blob_url.is_valid()); |
+ scoped_ptr<net::URLRequest> blob_request(request_context->CreateRequest( |
+ blob_url, delegate.get())); |
+ |
+ delegate->Start(blob_request.Pass(), |
+ base::Bind(&LocalWriteClosure::Run, this)); |
+ chained_blob_writer_->set_delegate(delegate.Pass()); |
+ } |
+ |
+ private: |
+ void callBlobCallbackOnIDBTaskRunner(bool succeeded) { |
+ DCHECK(task_runner_->RunsTasksOnCurrentThread()); |
+ chained_blob_writer_->ReportWriteCompletion(succeeded, bytes_written_); |
+ } |
+ |
+ IndexedDBBackingStore::Transaction::ChainedBlobWriter* chained_blob_writer_; |
+ base::TaskRunner* task_runner_; |
+ int64 bytes_written_; |
+}; |
+ |
+bool IndexedDBBackingStore::WriteBlobFile( |
+ int64 database_id, |
+ const Transaction::WriteDescriptor& descriptor, |
+ Transaction::ChainedBlobWriter* chained_blob_writer) { |
+ |
+ if (!MakeIDBBlobDirectory(blob_path_, database_id, descriptor.key())) |
+ return false; |
+ |
+ FilePath path = GetIDBBlobFileName(database_id, descriptor.key()); |
+ |
+ if (descriptor.is_file()) { |
+ // TODO: Should we validate the snapshot date here? |
+ DCHECK(!descriptor.file_path().empty()); |
+ if (!base::CopyFile(descriptor.file_path(), path)) |
+ return false; |
+ |
+ base::PlatformFileInfo info; |
+ if (file_util::GetFileInfo(descriptor.file_path(), &info)) { |
+ if (!file_util::TouchFile(path, info.last_accessed, info.last_modified)) |
+ ; // TODO: Complain quietly; timestamp's probably not vital. |
+ } else { |
+ ; // TODO: Complain quietly; timestamp's probably not vital. |
+ } |
+ |
+ task_runner_->PostTask( |
+ FROM_HERE, |
+ base::Bind( |
+ &Transaction::ChainedBlobWriter::ReportWriteCompletion, |
+ chained_blob_writer, true, info.size)); |
+ } else { |
+ DCHECK(descriptor.url().is_valid()); |
+ scoped_refptr<LocalWriteClosure> write_closure( |
+ new LocalWriteClosure(chained_blob_writer, task_runner_)); |
+ content::BrowserThread::PostTask( |
+ content::BrowserThread::IO, FROM_HERE, |
+ base::Bind( |
+ &LocalWriteClosure::writeBlobToFileOnIOThread, write_closure.get(), |
+ path, descriptor.url(), request_context_)); |
+ } |
+ return true; |
+} |
+ |
+void IndexedDBBackingStore::ReportBlobUnused( |
+ int64 database_id, |
+ int64 blob_key) { |
+ fprintf(stderr, "ERICU: ReportBlobUnused(%ld, %ld).\n", database_id, |
+ blob_key); |
+ DCHECK(KeyPrefix::IsValidDatabaseId(database_id)); |
+ bool all_blobs = blob_key == DatabaseMetaDataKey::kAllBlobsKey; |
+ DCHECK(all_blobs || DatabaseMetaDataKey::IsValidBlobKey(blob_key)); |
+ scoped_refptr<LevelDBTransaction> transaction = |
+ new LevelDBTransaction(db_.get()); |
+ |
+ std::string secondary_key = LiveBlobJournalKey::Encode(); |
+ IndexedDBBackingStore::Transaction::BlobJournalType secondary_journal; |
+ if (!GetBlobJournal(secondary_key, transaction.get(), secondary_journal)) |
+ return; |
+ DCHECK(secondary_journal.size()); |
+ |
+ std::string primary_key = BlobJournalKey::Encode(); |
+ IndexedDBBackingStore::Transaction::BlobJournalType primary_journal; |
+ if (!GetBlobJournal(primary_key, transaction.get(), primary_journal)) |
+ return; |
+ |
+ IndexedDBBackingStore::Transaction::BlobJournalType::iterator journal_iter; |
+ // There are several cases to handle. If blob_key is kAllBlobsKey, we want to |
+ // remove all entries with database_id from the secondary journal and add only |
+ // kAllBlobsKey to the primary journal. Otherwise if IsValidBlobKey(blob_key) |
+ // and we hit kAllBlobsKey for the right database_id in the journal, we leave |
+ // the kAllBlobsKey entry in the secondary journal but add the specific blob |
+ // to the primary. Otherwise if IsValidBlobKey(blob_key) and we find a |
+ // matching (database_id, blob_key) tuple, we should move it to the primary |
+ // journal. |
+ fprintf(stderr, "ERICU: journal sizes are: %ld, %ld.\n", |
+ primary_journal.size(), secondary_journal.size()); |
+ IndexedDBBackingStore::Transaction::BlobJournalType new_secondary_journal; |
+ for (journal_iter = secondary_journal.begin(); |
+ journal_iter != secondary_journal.end(); ++journal_iter) { |
+ int64 current_database_id = journal_iter->first; |
+ int64 current_blob_key = journal_iter->second; |
+ bool current_all_blobs = |
+ current_blob_key == DatabaseMetaDataKey::kAllBlobsKey; |
+ DCHECK(KeyPrefix::IsValidDatabaseId(current_database_id) || |
+ current_all_blobs); |
+ if (current_database_id == database_id && (all_blobs || |
+ current_all_blobs || blob_key == current_blob_key)) { |
+ if (!all_blobs) { |
+ fprintf(stderr, "ERICU: moving to primary: %ld, %ld.\n", database_id, |
+ blob_key); |
+ primary_journal.push_back( |
+ std::make_pair(database_id, current_blob_key)); |
+ if (current_all_blobs) |
+ new_secondary_journal.push_back(*journal_iter); |
+ new_secondary_journal.insert(new_secondary_journal.end(), |
+ ++journal_iter, secondary_journal.end()); // All the rest. |
+ break; |
+ } else { |
+ fprintf(stderr, "ERICU: was all_blobs\n"); |
+ } |
+ } else { |
+ fprintf(stderr, "ERICU: leaving in secondary: %ld, %ld.\n", database_id, |
+ blob_key); |
+ new_secondary_journal.push_back(*journal_iter); |
+ } |
+ } |
+ if (all_blobs) { |
+ primary_journal.push_back(std::make_pair( |
+ database_id, DatabaseMetaDataKey::kAllBlobsKey)); |
+ } |
+ fprintf(stderr, "ERICU: journal sizes are now: %ld, %ld.\n", |
+ primary_journal.size(), new_secondary_journal.size()); |
+ UpdatePrimaryJournalWithBlobList(transaction.get(), primary_journal); |
+ UpdateSecondaryJournalWithBlobList(transaction.get(), new_secondary_journal); |
+ transaction->Commit(); |
+ fprintf(stderr, "ERICU: committed journal updates. Cleaning in 5 sec.\n"); |
+ // We could just do the deletions/cleaning here, but if there are a lot of |
+ // blobs about to be garbage collected, it'd be better to wait and do them all |
+ // at once. |
+ journal_cleaning_timer_.Start( |
+ FROM_HERE, base::TimeDelta::FromSeconds(5), this, |
jsbell
2013/09/13 00:12:21
I just landed a change which tweaks the lifetime r
ericu
2013/11/20 23:05:39
OK, we should talk about what the right thing to d
ericu
2013/12/02 23:35:19
Addressed in patch set 57.
|
+ &IndexedDBBackingStore::CleanPrimaryJournalIgnoreReturn); |
+} |
+ |
+// This assumes a file path of dbId/3rd-byte-of-counter/counter. |
+FilePath IndexedDBBackingStore::GetIDBBlobFileName( |
+ int64 database_id, int64 key) { |
+ FilePath path = GetIDBBlobDirectoryNameForKey(blob_path_, database_id, key); |
+ path = path.AppendASCII(base::StringPrintf("%lx", key)); |
+ return path; |
+} |
+ |
static bool CheckIndexAndMetaDataKey(const LevelDBIterator* it, |
const std::string& stop_key, |
int64 index_id, |
@@ -1582,6 +2378,52 @@ bool IndexedDBBackingStore::GetIndexes( |
return true; |
} |
+bool IndexedDBBackingStore::RemoveBlobFile(int64 database_id, int64 key) { |
+ FilePath fileName = GetIDBBlobFileName(database_id, key); |
+ return base::DeleteFile(fileName, false); |
+} |
+ |
+bool IndexedDBBackingStore::RemoveBlobDirectory(int64 database_id) { |
+ FilePath dirName = GetIDBBlobDirectoryName(blob_path_, database_id); |
+ return base::DeleteFile(dirName, true); |
+} |
+ |
+bool IndexedDBBackingStore::CleanUpBlobJournal( |
+ const std::string& level_db_key) { |
+ scoped_refptr<LevelDBTransaction> journal_transaction = |
+ new LevelDBTransaction(db_.get()); |
+ IndexedDBBackingStore::Transaction::BlobJournalType journal; |
+ if (!GetBlobJournal(level_db_key, journal_transaction.get(), |
+ journal)) { |
+ return false; |
+ } |
+ if (!journal.size()) { |
+ return true; |
+ } |
+ if (journal.size()) { |
+ IndexedDBBackingStore::Transaction::BlobJournalType::iterator journal_iter; |
+ for (journal_iter = journal.begin(); journal_iter != journal.end(); |
+ ++journal_iter) { |
+ int64 database_id = journal_iter->first; |
+ int64 blob_key = journal_iter->second; |
+ DCHECK(KeyPrefix::IsValidDatabaseId(database_id)); |
+ if (blob_key == DatabaseMetaDataKey::kAllBlobsKey) { |
+ RemoveBlobDirectory(database_id); |
+ } else { |
+ DCHECK(DatabaseMetaDataKey::IsValidBlobKey(blob_key)); |
+ RemoveBlobFile(database_id, blob_key); |
+ } |
+ } |
+ } |
+ ClearBlobJournal(journal_transaction.get(), level_db_key); |
+ return journal_transaction->Commit(); |
+} |
+ |
+void IndexedDBBackingStore::CleanPrimaryJournalIgnoreReturn() { |
+ fprintf(stderr, "ERICU: Cleaning primary journal.\n"); |
+ CleanUpBlobJournal(BlobJournalKey::Encode()); |
+} |
+ |
WARN_UNUSED_RESULT static bool SetMaxIndexId(LevelDBTransaction* transaction, |
int64 database_id, |
int64 object_store_id, |
@@ -1656,13 +2498,14 @@ bool IndexedDBBackingStore::DeleteIndex( |
IndexMetaDataKey::Encode(database_id, object_store_id, index_id, 0); |
const std::string index_meta_data_end = |
IndexMetaDataKey::EncodeMaxKey(database_id, object_store_id, index_id); |
- DeleteRange(leveldb_transaction, index_meta_data_start, index_meta_data_end); |
+ DeleteRangeHelper(leveldb_transaction, index_meta_data_start, |
+ index_meta_data_end); |
const std::string index_data_start = |
IndexDataKey::EncodeMinKey(database_id, object_store_id, index_id); |
const std::string index_data_end = |
IndexDataKey::EncodeMaxKey(database_id, object_store_id, index_id); |
- DeleteRange(leveldb_transaction, index_data_start, index_data_end); |
+ DeleteRangeHelper(leveldb_transaction, index_data_start, index_data_end); |
return true; |
} |
@@ -1883,7 +2726,9 @@ bool IndexedDBBackingStore::KeyExistsInIndex( |
IndexedDBBackingStore::Cursor::Cursor( |
const IndexedDBBackingStore::Cursor* other) |
- : transaction_(other->transaction_), |
+ : backing_store_(other->backing_store_), |
+ transaction_(other->transaction_), |
+ database_id_(other->database_id_), |
cursor_options_(other->cursor_options_), |
current_key_(new IndexedDBKey(*other->current_key_)) { |
if (other->iterator_) { |
@@ -1896,9 +2741,16 @@ IndexedDBBackingStore::Cursor::Cursor( |
} |
} |
-IndexedDBBackingStore::Cursor::Cursor(LevelDBTransaction* transaction, |
- const CursorOptions& cursor_options) |
- : transaction_(transaction), cursor_options_(cursor_options) {} |
+IndexedDBBackingStore::Cursor::Cursor( |
+ scoped_refptr<IndexedDBBackingStore> backing_store, |
+ LevelDBTransaction* transaction, |
+ int64 database_id, |
+ const CursorOptions& cursor_options) |
+ : backing_store_(backing_store), |
+ transaction_(transaction), |
+ database_id_(database_id), |
+ cursor_options_(cursor_options) { |
+} |
IndexedDBBackingStore::Cursor::~Cursor() {} |
bool IndexedDBBackingStore::Cursor::FirstSeek() { |
@@ -2062,16 +2914,19 @@ IndexedDBBackingStore::Cursor::record_identifier() const { |
class ObjectStoreKeyCursorImpl : public IndexedDBBackingStore::Cursor { |
public: |
ObjectStoreKeyCursorImpl( |
+ scoped_refptr<IndexedDBBackingStore> backing_store, |
LevelDBTransaction* transaction, |
+ int64 database_id, |
const IndexedDBBackingStore::Cursor::CursorOptions& cursor_options) |
- : IndexedDBBackingStore::Cursor(transaction, cursor_options) {} |
+ : IndexedDBBackingStore::Cursor( |
+ backing_store, transaction, database_id, cursor_options) {} |
virtual Cursor* Clone() OVERRIDE { |
return new ObjectStoreKeyCursorImpl(this); |
} |
// IndexedDBBackingStore::Cursor |
- virtual std::string* Value() OVERRIDE { |
+ virtual IndexedDBValue* Value() OVERRIDE { |
NOTREACHED(); |
return NULL; |
} |
@@ -2116,14 +2971,17 @@ bool ObjectStoreKeyCursorImpl::LoadCurrentRow() { |
class ObjectStoreCursorImpl : public IndexedDBBackingStore::Cursor { |
public: |
ObjectStoreCursorImpl( |
+ scoped_refptr<IndexedDBBackingStore> backing_store, |
LevelDBTransaction* transaction, |
+ int64 database_id, |
const IndexedDBBackingStore::Cursor::CursorOptions& cursor_options) |
- : IndexedDBBackingStore::Cursor(transaction, cursor_options) {} |
+ : IndexedDBBackingStore::Cursor( |
+ backing_store, transaction, database_id, cursor_options) {} |
virtual Cursor* Clone() OVERRIDE { return new ObjectStoreCursorImpl(this); } |
// IndexedDBBackingStore::Cursor |
- virtual std::string* Value() OVERRIDE { return ¤t_value_; } |
+ virtual IndexedDBValue* Value() OVERRIDE { return ¤t_value_; } |
virtual bool LoadCurrentRow() OVERRIDE; |
protected: |
@@ -2137,13 +2995,13 @@ class ObjectStoreCursorImpl : public IndexedDBBackingStore::Cursor { |
: IndexedDBBackingStore::Cursor(other), |
current_value_(other->current_value_) {} |
- std::string current_value_; |
+ IndexedDBValue current_value_; |
}; |
bool ObjectStoreCursorImpl::LoadCurrentRow() { |
- StringPiece slice(iterator_->Key()); |
+ StringPiece key_slice(iterator_->Key()); |
ObjectStoreDataKey object_store_data_key; |
- if (!ObjectStoreDataKey::Decode(&slice, &object_store_data_key)) { |
+ if (!ObjectStoreDataKey::Decode(&key_slice, &object_store_data_key)) { |
INTERNAL_READ_ERROR(LOAD_CURRENT_ROW); |
return false; |
} |
@@ -2151,8 +3009,8 @@ bool ObjectStoreCursorImpl::LoadCurrentRow() { |
current_key_ = object_store_data_key.user_key(); |
int64 version; |
- slice = StringPiece(iterator_->Value()); |
- if (!DecodeVarInt(&slice, &version)) { |
+ StringPiece value_slice = StringPiece(iterator_->Value()); |
+ if (!DecodeVarInt(&value_slice, &version)) { |
INTERNAL_READ_ERROR(LOAD_CURRENT_ROW); |
return false; |
} |
@@ -2162,29 +3020,34 @@ bool ObjectStoreCursorImpl::LoadCurrentRow() { |
EncodeIDBKey(*current_key_, &encoded_key); |
record_identifier_.Reset(encoded_key, version); |
- current_value_ = slice.as_string(); |
- return true; |
+ // TODO(ericu): Don't set bits until we know we've succeeded? |
+ current_value_.bits = value_slice.as_string(); |
+ return GetBlobInfoForRecord(backing_store_, transaction_, database_id_, |
+ iterator_->Key().as_string(), ¤t_value_); |
} |
class IndexKeyCursorImpl : public IndexedDBBackingStore::Cursor { |
public: |
IndexKeyCursorImpl( |
+ scoped_refptr<IndexedDBBackingStore> backing_store, |
LevelDBTransaction* transaction, |
+ int64 database_id, |
const IndexedDBBackingStore::Cursor::CursorOptions& cursor_options) |
- : IndexedDBBackingStore::Cursor(transaction, cursor_options) {} |
+ : IndexedDBBackingStore::Cursor( |
+ backing_store, transaction, database_id, cursor_options) {} |
virtual Cursor* Clone() OVERRIDE { return new IndexKeyCursorImpl(this); } |
// IndexedDBBackingStore::Cursor |
- virtual std::string* Value() OVERRIDE { |
+ virtual IndexedDBValue* Value() OVERRIDE { |
NOTREACHED(); |
return NULL; |
} |
virtual const IndexedDBKey& primary_key() const OVERRIDE { |
return *primary_key_; |
} |
- virtual const IndexedDBBackingStore::RecordIdentifier& RecordIdentifier() |
- const { |
+ virtual const IndexedDBBackingStore::RecordIdentifier& record_identifier() |
+ const OVERRIDE { |
NOTREACHED(); |
return record_identifier_; |
} |
@@ -2268,19 +3131,22 @@ bool IndexKeyCursorImpl::LoadCurrentRow() { |
class IndexCursorImpl : public IndexedDBBackingStore::Cursor { |
public: |
IndexCursorImpl( |
+ scoped_refptr<IndexedDBBackingStore> backing_store, |
LevelDBTransaction* transaction, |
+ int64 database_id, |
const IndexedDBBackingStore::Cursor::CursorOptions& cursor_options) |
- : IndexedDBBackingStore::Cursor(transaction, cursor_options) {} |
+ : IndexedDBBackingStore::Cursor( |
+ backing_store, transaction, database_id, cursor_options) {} |
virtual Cursor* Clone() OVERRIDE { return new IndexCursorImpl(this); } |
// IndexedDBBackingStore::Cursor |
- virtual std::string* Value() OVERRIDE { return ¤t_value_; } |
+ virtual IndexedDBValue* Value() OVERRIDE { return ¤t_value_; } |
virtual const IndexedDBKey& primary_key() const OVERRIDE { |
return *primary_key_; |
} |
- virtual const IndexedDBBackingStore::RecordIdentifier& RecordIdentifier() |
- const { |
+ virtual const IndexedDBBackingStore::RecordIdentifier& record_identifier() |
+ const OVERRIDE { |
NOTREACHED(); |
return record_identifier_; |
} |
@@ -2302,7 +3168,7 @@ class IndexCursorImpl : public IndexedDBBackingStore::Cursor { |
primary_leveldb_key_(other->primary_leveldb_key_) {} |
scoped_ptr<IndexedDBKey> primary_key_; |
- std::string current_value_; |
+ IndexedDBValue current_value_; |
std::string primary_leveldb_key_; |
}; |
@@ -2328,6 +3194,7 @@ bool IndexCursorImpl::LoadCurrentRow() { |
return false; |
} |
+ DCHECK_EQ(index_data_key.DatabaseId(), database_id_); |
primary_leveldb_key_ = |
ObjectStoreDataKey::Encode(index_data_key.DatabaseId(), |
index_data_key.ObjectStoreId(), |
@@ -2361,8 +3228,9 @@ bool IndexCursorImpl::LoadCurrentRow() { |
return false; |
} |
- current_value_ = slice.as_string(); |
- return true; |
+ current_value_.bits = slice.as_string(); |
+ return GetBlobInfoForRecord(backing_store_, transaction_, database_id_, |
+ primary_leveldb_key_, ¤t_value_); |
} |
bool ObjectStoreCursorOptions( |
@@ -2520,7 +3388,8 @@ IndexedDBBackingStore::OpenObjectStoreCursor( |
&cursor_options)) |
return scoped_ptr<IndexedDBBackingStore::Cursor>(); |
scoped_ptr<ObjectStoreCursorImpl> cursor( |
- new ObjectStoreCursorImpl(leveldb_transaction, cursor_options)); |
+ new ObjectStoreCursorImpl( |
+ this, leveldb_transaction, database_id, cursor_options)); |
if (!cursor->FirstSeek()) |
return scoped_ptr<IndexedDBBackingStore::Cursor>(); |
@@ -2546,7 +3415,8 @@ IndexedDBBackingStore::OpenObjectStoreKeyCursor( |
&cursor_options)) |
return scoped_ptr<IndexedDBBackingStore::Cursor>(); |
scoped_ptr<ObjectStoreKeyCursorImpl> cursor( |
- new ObjectStoreKeyCursorImpl(leveldb_transaction, cursor_options)); |
+ new ObjectStoreKeyCursorImpl( |
+ this, leveldb_transaction, database_id, cursor_options)); |
if (!cursor->FirstSeek()) |
return scoped_ptr<IndexedDBBackingStore::Cursor>(); |
@@ -2574,7 +3444,8 @@ IndexedDBBackingStore::OpenIndexKeyCursor( |
&cursor_options)) |
return scoped_ptr<IndexedDBBackingStore::Cursor>(); |
scoped_ptr<IndexKeyCursorImpl> cursor( |
- new IndexKeyCursorImpl(leveldb_transaction, cursor_options)); |
+ new IndexKeyCursorImpl( |
+ this, leveldb_transaction, database_id, cursor_options)); |
if (!cursor->FirstSeek()) |
return scoped_ptr<IndexedDBBackingStore::Cursor>(); |
@@ -2602,7 +3473,8 @@ IndexedDBBackingStore::OpenIndexCursor( |
&cursor_options)) |
return scoped_ptr<IndexedDBBackingStore::Cursor>(); |
scoped_ptr<IndexCursorImpl> cursor( |
- new IndexCursorImpl(leveldb_transaction, cursor_options)); |
+ new IndexCursorImpl( |
+ this, leveldb_transaction, database_id, cursor_options)); |
if (!cursor->FirstSeek()) |
return scoped_ptr<IndexedDBBackingStore::Cursor>(); |
@@ -2611,7 +3483,10 @@ IndexedDBBackingStore::OpenIndexCursor( |
IndexedDBBackingStore::Transaction::Transaction( |
IndexedDBBackingStore* backing_store) |
- : backing_store_(backing_store) {} |
+ : backing_store_(backing_store), |
+ database_id_(-1) { |
+ blob_info_tree_.abstractor().comparator_ = backing_store->comparator(); |
+} |
IndexedDBBackingStore::Transaction::~Transaction() {} |
@@ -2619,23 +3494,269 @@ void IndexedDBBackingStore::Transaction::Begin() { |
IDB_TRACE("IndexedDBBackingStore::Transaction::Begin"); |
DCHECK(!transaction_.get()); |
transaction_ = new LevelDBTransaction(backing_store_->db_.get()); |
+ fprintf(stderr, "ERICU: %p::Begin(%p).\n", this, transaction_.get()); |
+} |
+ |
+bool IndexedDBBackingStore::Transaction::HandleBlobPreTransaction( |
+ BlobEntryKeyValuePairVec* new_blob_entries, |
+ WriteDescriptorVec* new_files_to_write) { |
+ TreeType::Iterator iter; |
+ iter.StartIterLeast(&blob_info_tree_); |
+ new_blob_entries->clear(); |
+ new_files_to_write->clear(); |
+ if (*iter) { |
+ // Create LevelDBTransaction for the name generator seed and add-journal. |
+ scoped_refptr<LevelDBTransaction> pre_transaction = |
+ new LevelDBTransaction(backing_store_->db_.get()); |
+ BlobJournalType journal; |
+ for (; *iter; ++iter) { |
+ std::vector<IndexedDBBlobInfo>::iterator info_iter; |
+ std::vector<IndexedDBBlobInfo*> new_blob_keys; |
+ for (info_iter = (*iter)->blob_info.begin(); |
+ info_iter != (*iter)->blob_info.end(); ++info_iter) { |
+ int64 next_blob_key = -1; |
+ bool result = GetBlobKeyGeneratorCurrentNumber( |
+ pre_transaction.get(), database_id_, next_blob_key); |
+ if (!result || next_blob_key < 0) |
+ return false; |
+ BlobJournalEntryType journal_entry = |
+ std::make_pair(database_id_, next_blob_key); |
+ journal.push_back(journal_entry); |
+ if (info_iter->is_file()) { |
+ new_files_to_write->push_back( |
+ WriteDescriptor(info_iter->file_path(), next_blob_key)); |
+ } else { |
+ new_files_to_write->push_back( |
+ WriteDescriptor(info_iter->url(), next_blob_key)); |
+ } |
+ info_iter->set_key(next_blob_key); |
+ new_blob_keys.push_back(&*info_iter); |
+ result = UpdateBlobKeyGeneratorCurrentNumber( |
+ pre_transaction.get(), database_id_, next_blob_key + 1); |
+ if (!result) |
+ return result; |
+ } |
+ BlobEntryKey blob_entry_key; |
+ StringPiece key_piece((*iter)->key); |
+ if (!BlobEntryKey::FromObjectStoreDataKey(&key_piece, &blob_entry_key)) { |
+ NOTREACHED(); |
+ return false; |
+ } |
+ new_blob_entries->push_back(std::make_pair(blob_entry_key, |
+ EncodeBlobData(new_blob_keys))); |
+ } |
+ UpdatePrimaryJournalWithBlobList(pre_transaction.get(), journal); |
+ if (!pre_transaction->Commit()) |
+ return false; |
+ } |
+ return true; |
} |
-bool IndexedDBBackingStore::Transaction::Commit() { |
- IDB_TRACE("IndexedDBBackingStore::Transaction::Commit"); |
- DCHECK(transaction_.get()); |
+bool IndexedDBBackingStore::Transaction::CollectBlobFilesToRemove() { |
+ TreeType::Iterator iter; |
+ iter.StartIterLeast(&blob_info_tree_); |
+ // Look up all old files to remove as part of the transaction, store their |
+ // names in blobs_to_remove_, and remove their old blob data entries. |
+ if (*iter) { |
+ scoped_ptr<LevelDBIterator> db_iter = transaction_->CreateIterator(); |
+ for (; *iter; ++iter) { |
+ BlobEntryKey blob_entry_key; |
+ StringPiece key_piece((*iter)->key); |
+ if (!BlobEntryKey::FromObjectStoreDataKey(&key_piece, &blob_entry_key)) { |
+ NOTREACHED(); |
+ INTERNAL_WRITE_ERROR(TRANSACTION_COMMIT_METHOD); |
+ transaction_ = NULL; |
+ return false; |
+ } |
+ if (database_id_ < 0) |
+ database_id_ = blob_entry_key.database_id(); |
+ else |
+ DCHECK_EQ(database_id_, blob_entry_key.database_id()); |
+ std::string blob_entry_key_bytes = blob_entry_key.Encode(); |
+ db_iter->Seek(blob_entry_key_bytes); |
+ if (db_iter->IsValid()) { |
+ std::vector<IndexedDBBlobInfo> blob_info; |
+ if (!DecodeBlobData(db_iter->Value().as_string(), &blob_info)) { |
+ INTERNAL_READ_ERROR(TRANSACTION_COMMIT_METHOD); |
+ transaction_ = NULL; |
+ return false; |
+ } |
+ std::vector<IndexedDBBlobInfo>::iterator blob_info_iter; |
+ for (blob_info_iter = blob_info.begin(); |
+ blob_info_iter != blob_info.end(); ++blob_info_iter) |
+ blobs_to_remove_.push_back( |
+ std::make_pair(database_id_, blob_info_iter->key())); |
+ transaction_->Remove(blob_entry_key_bytes); |
+ } else { |
+ fprintf(stderr, "ERICU: db_iter wasn't valid.\n"); |
+ } |
+ } |
+ } |
+ return true; |
+} |
+ |
+void IndexedDBBackingStore::Transaction::WriteNewBlobs( |
+ BlobEntryKeyValuePairVec& new_blob_entries, |
+ WriteDescriptorVec& new_files_to_write, |
+ scoped_refptr<BlobWriteCallback> callback) { |
+ DCHECK_GT(new_files_to_write.size(), 0UL); |
+ DCHECK_GT(database_id_, 0); |
+ BlobEntryKeyValuePairVec::iterator blob_entry_iter; |
+ for (blob_entry_iter = new_blob_entries.begin(); |
+ blob_entry_iter != new_blob_entries.end(); ++blob_entry_iter) { |
+ // Add the new blob-table entry for each blob to the main transaction, or |
+ // remove any entry that may exist if there's no new one. |
+ if (!blob_entry_iter->second.size()) |
+ transaction_->Remove(blob_entry_iter->first.Encode()); |
+ else |
+ transaction_->Put(blob_entry_iter->first.Encode(), |
+ &blob_entry_iter->second); |
+ } |
+ // Creating the writer will start it going asynchronously. |
+ chained_blob_writer_ = new ChainedBlobWriter(database_id_, backing_store_, |
+ new_files_to_write, callback); |
+} |
+ |
+bool IndexedDBBackingStore::Transaction::SortBlobsToRemove() { |
+ IndexedDBActiveBlobRegistry* registry = |
+ backing_store_->active_blob_registry(); |
+ BlobJournalType::iterator iter; |
+ BlobJournalType primary_journal, secondary_journal; |
+ for (iter = blobs_to_remove_.begin(); iter != blobs_to_remove_.end(); |
+ ++iter) { |
+ if (registry->MarkDeletedCheckIfUsed(iter->first, iter->second)) |
+ secondary_journal.push_back(*iter); |
+ else |
+ primary_journal.push_back(*iter); |
+ } |
+ fprintf(stderr, "ERICU: Updating primary journal.\n"); |
+ UpdatePrimaryJournalWithBlobList(transaction_.get(), primary_journal); |
+ fprintf(stderr, "ERICU: Updating secondary journal.\n"); |
+ if (!MergeBlobsIntoSecondaryJournal(transaction_.get(), secondary_journal)) { |
+ fprintf(stderr, "ERICU: Updating secondary journal FAILED!\n"); |
+ return false; |
+ } |
+ // To signal how many blobs need attention right now. |
+ blobs_to_remove_.swap(primary_journal); |
+ return true; |
+} |
+ |
+bool IndexedDBBackingStore::Transaction::CommitPhaseOne( |
+ scoped_refptr<BlobWriteCallback> callback) { |
+ IDB_TRACE("IndexedDBBackingStore::Transaction::commit"); |
+ DCHECK(transaction_); |
+ DCHECK(backing_store_->task_runner()->RunsTasksOnCurrentThread()); |
+ |
+ if (!backing_store_->CleanUpBlobJournal(BlobJournalKey::Encode())) { |
+ INTERNAL_WRITE_ERROR(TRANSACTION_COMMIT_METHOD); |
+ transaction_ = NULL; |
+ return false; |
+ } |
+ |
+ BlobEntryKeyValuePairVec new_blob_entries; |
+ WriteDescriptorVec new_files_to_write; |
+ // This commits the journal of blob files we're about to add, if any. |
+ if (!HandleBlobPreTransaction(&new_blob_entries, &new_files_to_write)) { |
+ INTERNAL_WRITE_ERROR(TRANSACTION_COMMIT_METHOD); |
+ transaction_ = NULL; |
+ return false; |
+ } |
+ |
+ DCHECK(!new_files_to_write.size() || |
+ KeyPrefix::IsValidDatabaseId(database_id_)); |
+ fprintf(stderr, "ERICU: Calling CollectBlobFilesToRemove.\n"); |
+ if (!CollectBlobFilesToRemove()) { |
+ INTERNAL_WRITE_ERROR(TRANSACTION_COMMIT_METHOD); |
+ transaction_ = NULL; |
+ return false; |
+ } |
+ |
+ if (new_files_to_write.size()) { |
+ // This kicks off the writes of the new blobs, if any. |
+ // This call will zero out new_blob_entries and new_files_to_write. |
+ WriteNewBlobs(new_blob_entries, new_files_to_write, callback); |
+ // Remove the add journal, if any; once the blobs are written, and we |
+ // commit, this will do the cleanup. |
+ ClearBlobJournal(transaction_.get(), BlobJournalKey::Encode()); |
+ } else { |
+ callback->didSucceed(); |
+ } |
+ |
+ return true; |
+} |
+ |
+bool IndexedDBBackingStore::Transaction::CommitPhaseTwo() { |
+ fprintf(stderr, "ERICU: In CommitPhaseTwo; blobs_to_remove_.size() is %lu.\n", |
+ blobs_to_remove_.size()); |
+ if (blobs_to_remove_.size()) |
+ if (!SortBlobsToRemove()) { |
+ INTERNAL_WRITE_ERROR(TRANSACTION_COMMIT_METHOD); |
+ transaction_ = NULL; |
+ return false; |
+ } |
+ |
+ fprintf(stderr, "ERICU: committing transaction_.\n"); |
bool result = transaction_->Commit(); |
transaction_ = NULL; |
+ |
if (!result) |
INTERNAL_WRITE_ERROR(TRANSACTION_COMMIT_METHOD); |
+ else if (blobs_to_remove_.size()) |
+ backing_store_->CleanUpBlobJournal(BlobJournalKey::Encode()); |
+ |
return result; |
} |
void IndexedDBBackingStore::Transaction::Rollback() { |
IDB_TRACE("IndexedDBBackingStore::Transaction::Rollback"); |
DCHECK(transaction_.get()); |
+ if (chained_blob_writer_) |
+ chained_blob_writer_->Abort(); |
transaction_->Rollback(); |
transaction_ = NULL; |
} |
+// This is storing an info, even if empty, even if the previous key had no blob |
+// info that we know of. It duplicates a bunch of information stored in the |
+// leveldb transaction, but only w.r.t. the user keys altered--we don't keep the |
+// changes to exists or index keys here. |
+void IndexedDBBackingStore::Transaction::PutBlobInfo( |
+ int64 database_id, |
+ int64 object_store_id, |
+ const std::string&key, |
+ std::vector<IndexedDBBlobInfo>* blob_info) { |
+ fprintf(stderr, "ERICU: PutBlobInfo ; blob_info is %p.\n", blob_info); |
+ DCHECK_GT(key.size(), 0UL); |
+ if (database_id_ < 0) |
+ database_id_ = database_id; |
+ DCHECK_EQ(database_id_, database_id); |
+ AVLTreeNode* node = blob_info_tree_.Search(key); |
+ if (!node) { |
+ node = new AVLTreeNode; |
+ node->key.insert(node->key.end(), key.begin(), key.end()); |
+ blob_info_tree_.Insert(node); |
+ node->object_store_id = object_store_id; |
+ } |
+ DCHECK_EQ(node->object_store_id, object_store_id); |
+ node->blob_info.clear(); |
+ if (blob_info) |
+ node->blob_info.swap(*blob_info); |
+} |
+ |
+IndexedDBBackingStore::Transaction::WriteDescriptor::WriteDescriptor( |
+ const GURL& url, int64_t key) |
+ : is_file_(false), |
+ url_(url), |
+ key_(key) |
+{ |
+} |
+ |
+IndexedDBBackingStore::Transaction::WriteDescriptor::WriteDescriptor( |
+ const FilePath& file_path, int64_t key) |
+ : is_file_(true), |
+ file_path_(file_path), |
+ key_(key) |
+{ |
+} |
+ |
} // namespace content |