Chromium Code Reviews| Index: ios/chrome/browser/reading_list/reading_list_store.cc |
| diff --git a/ios/chrome/browser/reading_list/reading_list_store.cc b/ios/chrome/browser/reading_list/reading_list_store.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..7e1f6a682925cf1df961b7f7440ab43fb9e5b43a |
| --- /dev/null |
| +++ b/ios/chrome/browser/reading_list/reading_list_store.cc |
| @@ -0,0 +1,374 @@ |
| +// Copyright 2016 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "ios/chrome/browser/reading_list/reading_list_store.h" |
| + |
| +#include "base/bind.h" |
| +#include "base/files/file_path.h" |
| +#include "base/logging.h" |
| +#include "base/memory/ptr_util.h" |
| +#include "components/sync/model/entity_change.h" |
| +#include "components/sync/model/metadata_batch.h" |
| +#include "components/sync/model/metadata_change_list.h" |
| +#include "components/sync/model/model_type_change_processor.h" |
| +#include "components/sync/model/mutable_data_batch.h" |
| +#include "components/sync/model/simple_metadata_change_list.h" |
| +#include "components/sync/protocol/model_type_state.pb.h" |
| +#include "ios/chrome/browser/reading_list/proto/reading_list.pb.h" |
| +#include "ios/chrome/browser/reading_list/reading_list_model_impl.h" |
| +#include "ios/web/public/web_thread.h" |
| + |
| +ReadingListStore::ReadingListStore(std::unique_ptr<ReadingListDB> database, |
| + const base::FilePath& database_dir, |
| + StoreFactoryFunction create_store_callback) |
| + : ModelTypeService(base::Bind(&syncer::ModelTypeChangeProcessor::Create), |
| + syncer::READING_LIST), |
| + database_loaded_(false), |
| + create_store_callback_(create_store_callback), |
| + pending_transaction_(0) {} |
| + |
| +ReadingListStore::~ReadingListStore() { |
| + DCHECK(pending_transaction_ == 0); |
| +} |
| + |
| +void ReadingListStore::SetReadingListModel(ReadingListModelImpl* model) { |
| + DCHECK_CURRENTLY_ON(web::WebThread::UI); |
| + model_ = model; |
| + create_store_callback_.Run( |
| + base::Bind(&ReadingListStore::OnStoreCreated, base::AsWeakPtr(this))); |
| +} |
| + |
| +void ReadingListStore::BeginTransaction() { |
| + DCHECK_CURRENTLY_ON(web::WebThread::UI); |
| + pending_transaction_++; |
| + if (pending_transaction_ == 1) { |
| + batch_ = store_->CreateWriteBatch(); |
| + } |
| +} |
| + |
| +void ReadingListStore::CommitTransaction() { |
| + DCHECK_CURRENTLY_ON(web::WebThread::UI); |
| + pending_transaction_--; |
| + if (pending_transaction_ == 0) { |
| + store_->CommitWriteBatch( |
| + std::move(batch_), |
| + base::Bind(&ReadingListStore::OnDatabaseSave, base::AsWeakPtr(this))); |
| + batch_.reset(); |
| + } |
| +} |
| + |
| +void ReadingListStore::SaveEntry(const ReadingListEntry& entry, |
| + bool read, |
| + bool from_sync) { |
| + DCHECK_CURRENTLY_ON(web::WebThread::UI); |
| + BeginTransaction(); |
| + |
| + std::unique_ptr<reading_list::ReadingListLocal> pb_entry = |
| + entry.AsReadingListLocal(read); |
| + std::unique_ptr<sync_pb::ReadingListSpecifics> pb_entry_sync = |
| + entry.AsReadingListSpecifics(read); |
| + // Unref the URL before making asynchronous call. |
| + std::string local_key = entry.URL().spec(); |
| + |
| + store_->WriteData(batch_.get(), local_key, pb_entry->SerializeAsString()); |
| + |
| + if (!change_processor() || from_sync) { |
|
skym
2016/10/28 16:30:56
I'm sorry. This has changed recently, the processo
Olivier
2016/11/02 14:57:10
Done.
|
| + CommitTransaction(); |
| + return; |
| + } |
| + |
| + std::unique_ptr<syncer::MetadataChangeList> metadata_change_list = |
| + CreateMetadataChangeList(); |
| + |
| + std::unique_ptr<syncer::EntityData> entity_data(new syncer::EntityData()); |
| + *entity_data->specifics.mutable_reading_list() = *pb_entry_sync; |
| + entity_data->non_unique_name = pb_entry_sync->entry_id(); |
| + |
| + change_processor()->Put(entry.URL().spec(), std::move(entity_data), |
| + metadata_change_list.get()); |
| + |
| + static_cast<syncer::SimpleMetadataChangeList*>(metadata_change_list.get()) |
| + ->TransferChanges(store_.get(), batch_.get()); |
| + CommitTransaction(); |
| +} |
| + |
| +void ReadingListStore::RemoveEntry(const ReadingListEntry& entry, |
| + bool from_sync) { |
| + DCHECK_CURRENTLY_ON(web::WebThread::UI); |
| + BeginTransaction(); |
| + std::string local_key = entry.URL().spec(); |
| + store_->DeleteData(batch_.get(), local_key); |
| + if (!change_processor() || from_sync) { |
|
skym
2016/10/28 16:30:56
Same as above.
Olivier
2016/11/02 14:57:10
Done.
|
| + CommitTransaction(); |
| + return; |
| + } |
| + std::unique_ptr<syncer::MetadataChangeList> metadata_change_list = |
| + CreateMetadataChangeList(); |
| + |
| + change_processor()->Delete(local_key, metadata_change_list.get()); |
| + static_cast<syncer::SimpleMetadataChangeList*>(metadata_change_list.get()) |
| + ->TransferChanges(store_.get(), batch_.get()); |
| + |
| + CommitTransaction(); |
| +} |
| + |
| +void ReadingListStore::OnDatabaseLoad( |
| + syncer::ModelTypeStore::Result result, |
| + std::unique_ptr<syncer::ModelTypeStore::RecordList> entries) { |
| + DCHECK_CURRENTLY_ON(web::WebThread::UI); |
| + if (result != syncer::ModelTypeStore::Result::SUCCESS) { |
| + return; |
| + } |
| + database_loaded_ = true; |
| + auto read = base::MakeUnique<ReadingListEntries>(); |
| + auto unread = base::MakeUnique<ReadingListEntries>(); |
| + |
| + for (const syncer::ModelTypeStore::Record& r : *entries.get()) { |
| + // for (const reading_list::ReadingListLocal& pb_entry : *entries) { |
| + std::unique_ptr<reading_list::ReadingListLocal> proto = |
| + base::MakeUnique<reading_list::ReadingListLocal>(); |
| + if (!proto->ParseFromString(r.value)) { |
| + continue; |
| + // TODO(skym, crbug.com/582460): Handle unrecoverable initialization |
| + // failure. |
| + } |
| + |
| + std::unique_ptr<ReadingListEntry> entry( |
| + ReadingListEntry::FromReadingListLocal(*proto)); |
| + if (!entry) { |
| + continue; |
| + } |
| + if (proto->status() == reading_list::ReadingListLocal::READ) { |
| + read->push_back(std::move(*entry)); |
| + } else { |
| + unread->push_back(std::move(*entry)); |
| + } |
| + } |
| + |
| + model_->ModelLoaded(std::move(unread), std::move(read)); |
| + |
| + store_->ReadAllMetadata( |
| + base::Bind(&ReadingListStore::OnReadAllMetadata, base::AsWeakPtr(this))); |
| +} |
| + |
| +void ReadingListStore::OnReadAllMetadata( |
| + syncer::ModelTypeStore::Result result, |
| + std::unique_ptr<syncer::ModelTypeStore::RecordList> metadata_records, |
| + const std::string& global_metadata) { |
| + if (result != syncer::ModelTypeStore::Result::SUCCESS) { |
| + // Store has encountered some serious error. We should still be able to |
| + // continue as a read only service, since if we got this far we must have |
| + // loaded all data out succesfully. |
| + return; |
|
skym
2016/10/28 16:30:56
It looks like non of your initialization error cas
|
| + } |
| + |
| + std::unique_ptr<syncer::MetadataBatch> batch(new syncer::MetadataBatch()); |
| + sync_pb::ModelTypeState state; |
| + if (state.ParseFromString(global_metadata)) { |
| + batch->SetModelTypeState(state); |
| + } else { |
| + // TODO(skym): How bad is this scenario? We may be able to just give an |
|
skym
2016/10/28 16:30:56
Why did I leave TODOs in DeviceInfoService aaah X_
|
| + // empty batch to the processor and we'll treat corrupted data type state |
| + // as no data type state at all. The question is do we want to add any of |
| + // the entity metadata to the batch or completely skip that step? We're |
| + // going to have to perform a merge shortly. Does this decision/logic even |
| + // belong in this service? |
| + change_processor()->OnMetadataLoaded( |
| + change_processor()->CreateAndUploadError( |
| + FROM_HERE, "Failed to deserialize global metadata."), |
| + nullptr); |
| + } |
| + for (const syncer::ModelTypeStore::Record& r : *metadata_records.get()) { |
| + sync_pb::EntityMetadata entity_metadata; |
| + if (entity_metadata.ParseFromString(r.value)) { |
| + batch->AddMetadata(r.id, entity_metadata); |
| + } else { |
| + // TODO(skym): This really isn't too bad. We just want to regenerate |
| + // metadata for this particular entity. Unfortunately there isn't a |
| + // convenient way to tell the processor to do this. |
| + LOG(WARNING) << "Failed to deserialize entity metadata."; |
| + } |
| + } |
| + change_processor()->OnMetadataLoaded(syncer::SyncError(), std::move(batch)); |
| +} |
| + |
| +void ReadingListStore::OnDatabaseSave(syncer::ModelTypeStore::Result result) { |
| + return; |
| +} |
| + |
| +void ReadingListStore::OnStoreCreated( |
| + syncer::ModelTypeStore::Result result, |
| + std::unique_ptr<syncer::ModelTypeStore> store) { |
| + store_ = std::move(store); |
| + store_->ReadAllData( |
| + base::Bind(&ReadingListStore::OnDatabaseLoad, base::AsWeakPtr(this))); |
| + return; |
| +} |
| + |
| +syncer::ModelTypeService* ReadingListStore::GetModelTypeService() { |
| + return this; |
| +} |
| + |
| +// Creates an object used to communicate changes in the sync metadata to the |
| +// model type store. |
| +std::unique_ptr<syncer::MetadataChangeList> |
| +ReadingListStore::CreateMetadataChangeList() { |
| + return base::MakeUnique<syncer::SimpleMetadataChangeList>(); |
| +} |
| + |
| +// Perform the initial merge between local and sync data. This should only be |
| +// called when a data type is first enabled to start syncing, and there is no |
| +// sync metadata. Best effort should be made to match local and sync data. The |
| +// keys in the |entity_data_map| will have been created via GetClientTag(...), |
| +// and if a local and sync data should match/merge but disagree on tags, the |
| +// service should use the sync data's tag. Any local pieces of data that are |
| +// not present in sync should immediately be Put(...) to the processor before |
| +// returning. The same MetadataChangeList that was passed into this function |
| +// can be passed to Put(...) calls. Delete(...) can also be called but should |
| +// not be needed for most model types. Durable storage writes, if not able to |
| +// combine all change atomically, should save the metadata after the data |
| +// changes, so that this merge will be re-driven by sync if is not completely |
| +// saved during the current run. |
| +syncer::SyncError ReadingListStore::MergeSyncData( |
| + std::unique_ptr<syncer::MetadataChangeList> metadata_change_list, |
| + syncer::EntityDataMap entity_data_map) { |
|
skym
2016/10/28 16:30:55
Should there be a BeginTransaction() up here?
Olivier
2016/11/02 14:57:10
It would not hurt but the BeginBatchUpdates do it
|
| + // Keep track of the last update of each item. |
| + std::map<GURL, int64_t> last_update; |
| + std::unique_ptr<ReadingListModel::ScopedReadingListBatchUpdate> batch = |
| + model_->BeginBatchUpdates(); |
| + |
| + // Merge sync to local data. |
| + for (const auto& kv : entity_data_map) { |
| + const sync_pb::ReadingListSpecifics& specifics = |
| + kv.second.value().specifics.reading_list(); |
| + std::unique_ptr<ReadingListEntry> entry( |
| + ReadingListEntry::FromReadingListSpecifics(specifics)); |
| + DCHECK(entry->URL().spec() == kv.first); |
| + DCHECK(specifics.entry_id() == kv.first); |
| + last_update[entry->URL()] = entry->UpdateTime(); |
| + // This method will update the model and the local store. |
| + model_->SyncAddEntry( |
| + std::move(entry), |
| + specifics.status() == sync_pb::ReadingListSpecifics::READ); |
| + } |
| + |
| + int unread_count = model_->unread_size(); |
| + int read_count = model_->read_size(); |
| + for (int index = 0; index < unread_count + read_count; index++) { |
| + bool read = index >= unread_count; |
| + const ReadingListEntry& entry = |
| + read ? model_->GetReadEntryAtIndex(index - unread_count) |
| + : model_->GetUnreadEntryAtIndex(index); |
| + if (last_update.count(entry.URL()) && |
| + last_update[entry.URL()] >= entry.UpdateTime()) { |
|
skym
2016/10/28 16:30:55
What about the other direction?
skym
2016/11/01 18:27:25
Disregard this, I was confused. You're always send
|
| + // The synced entry is up to date. |
| + continue; |
| + } |
| + std::unique_ptr<sync_pb::ReadingListSpecifics> entry_pb = |
| + entry.AsReadingListSpecifics(read); |
| + |
| + std::unique_ptr<syncer::EntityData> entity_data(new syncer::EntityData()); |
| + *(entity_data->specifics.mutable_reading_list()) = *entry_pb; |
| + entity_data->non_unique_name = entry_pb->entry_id(); |
| + |
| + static_cast<syncer::SimpleMetadataChangeList*>(metadata_change_list.get()) |
| + ->TransferChanges(store_.get(), batch_.get()); |
|
skym
2016/10/28 16:30:56
This transfer change should get moved outside (and
Olivier
2016/11/02 14:57:10
Done.
|
| + change_processor()->Put(entry_pb->entry_id(), std::move(entity_data), |
| + metadata_change_list.get()); |
| + } |
|
skym
2016/10/28 16:30:55
I think there should be a commit here.
Olivier
2016/11/02 14:57:10
There is a hidden one when batch (renames to model
|
| + return syncer::SyncError(); |
| +} |
| + |
| +// Apply changes from the sync server locally. |
| +// Please note that |entity_changes| might have fewer entries than |
| +// |metadata_change_list| in case when some of the data changes are filtered |
| +// out, or even be empty in case when a commit confirmation is processed and |
| +// only the metadata needs to persisted. |
| +syncer::SyncError ReadingListStore::ApplySyncChanges( |
| + std::unique_ptr<syncer::MetadataChangeList> metadata_change_list, |
| + syncer::EntityChangeList entity_changes) { |
| + std::unique_ptr<ReadingListModel::ScopedReadingListBatchUpdate> batch = |
| + model_->BeginBatchUpdates(); |
| + for (syncer::EntityChange& change : entity_changes) { |
| + const sync_pb::ReadingListSpecifics& specifics = |
| + change.data().specifics.reading_list(); |
| + if (change.type() == syncer::EntityChange::ACTION_DELETE) { |
| + model_->SyncRemoveEntry(GURL(specifics.url())); |
| + continue; |
| + } |
| + |
| + std::unique_ptr<ReadingListEntry> entry( |
| + ReadingListEntry::FromReadingListSpecifics(specifics)); |
| + model_->SyncAddEntry( |
| + std::move(entry), |
| + specifics.status() == sync_pb::ReadingListSpecifics::READ); |
| + } |
| + return syncer::SyncError(); |
| +} |
| + |
| +// Asynchronously retrieve the corresponding sync data for |storage_keys|. |
| +void ReadingListStore::GetData(StorageKeyList storage_keys, |
| + DataCallback callback) { |
| + auto batch = base::MakeUnique<syncer::MutableDataBatch>(); |
| + for (std::string url_string : storage_keys) { |
| + auto add_to_batch_callback = |
| + base::Bind(&ReadingListStore::AddEntryToBatch, base::Unretained(this), |
| + batch.get()); |
| + model_->CallbackEntryReadStatusURL(GURL(url_string), add_to_batch_callback); |
| + } |
| + |
| + callback.Run(syncer::SyncError(), std::move(batch)); |
|
skym
2016/11/01 18:27:25
All these syncer::SyncError() calls are pretty use
|
| +} |
| + |
| +// Asynchronously retrieve all of the local sync data. |
| +void ReadingListStore::GetAllData(DataCallback callback) { |
| + auto batch = base::MakeUnique<syncer::MutableDataBatch>(); |
| + int unread_count = model_->unread_size(); |
| + int read_count = model_->read_size(); |
| + for (int index = 0; index < unread_count + read_count; index++) { |
| + bool read = index >= unread_count; |
| + const ReadingListEntry& entry = |
| + read ? model_->GetReadEntryAtIndex(index - unread_count) |
| + : model_->GetUnreadEntryAtIndex(index); |
| + AddEntryToBatch(batch.get(), entry, read); |
| + } |
| + |
| + callback.Run(syncer::SyncError(), std::move(batch)); |
| +} |
| + |
| +void ReadingListStore::AddEntryToBatch(syncer::MutableDataBatch* batch, |
| + const ReadingListEntry& entry, |
| + bool read) { |
| + std::unique_ptr<sync_pb::ReadingListSpecifics> entry_pb = |
| + entry.AsReadingListSpecifics(read); |
| + |
| + std::unique_ptr<syncer::EntityData> entity_data(new syncer::EntityData()); |
| + *(entity_data->specifics.mutable_reading_list()) = *entry_pb; |
| + entity_data->non_unique_name = entry_pb->entry_id(); |
| + |
| + batch->Put(entry_pb->entry_id(), std::move(entity_data)); |
| +} |
| + |
| +// Get or generate a client tag for |entity_data|. This must be the same tag |
| +// that was/would have been generated in the SyncableService/Directory world |
| +// for backward compatibility with pre-USS clients. The only time this |
| +// theoretically needs to be called is on the creation of local data, however |
| +// it is also used to verify the hash of remote data. If a data type was never |
| +// launched pre-USS, then method does not need to be different from |
| +// GetStorageKey(). |
| +std::string ReadingListStore::GetClientTag( |
| + const syncer::EntityData& entity_data) { |
| + return entity_data.specifics.reading_list().url(); |
|
skym
2016/10/28 16:30:56
I'd call GetStorageKey(entity_data) here so you do
Olivier
2016/11/02 14:57:10
If new model don't need it, should this method be
|
| +} |
| + |
| +// Get or generate a storage key for |entity_data|. This will only ever be |
| +// called once when first encountering a remote entity. Local changes will |
| +// provide their storage keys directly to Put instead of using this method. |
| +// Theoretically this function doesn't need to be stable across multiple calls |
| +// on the same or different clients, but to keep things simple, it probably |
| +// should be. |
| +std::string ReadingListStore::GetStorageKey( |
| + const syncer::EntityData& entity_data) { |
| + return entity_data.specifics.reading_list().url(); |
| +} |