Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1182)

Unified Diff: ios/chrome/browser/reading_list/reading_list_store.cc

Issue 2398233003: with sync (Closed)
Patch Set: done Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: ios/chrome/browser/reading_list/reading_list_store.cc
diff --git a/ios/chrome/browser/reading_list/reading_list_store.cc b/ios/chrome/browser/reading_list/reading_list_store.cc
index 88746456bd40fbeaca7dd06c6fedc618ca6c8d7f..592f80a7708ddd4621f758ef76f4078282ac4f04 100644
--- a/ios/chrome/browser/reading_list/reading_list_store.cc
+++ b/ios/chrome/browser/reading_list/reading_list_store.cc
@@ -8,42 +8,43 @@
#include "base/files/file_path.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
+#include "components/sync/api/entity_change.h"
+#include "components/sync/api/metadata_change_list.h"
+#include "components/sync/api/metadata_batch.h"
+#include "components/sync/core/simple_metadata_change_list.h"
+#include "components/sync/core/shared_model_type_processor.h"
+#include "components/sync/protocol/model_type_state.pb.h"
#include "ios/chrome/browser/reading_list/proto/reading_list.pb.h"
#include "ios/chrome/browser/reading_list/reading_list_model_impl.h"
#include "ios/web/public/web_thread.h"
ReadingListStore::ReadingListStore(std::unique_ptr<ReadingListDB> database,
- const base::FilePath& database_dir)
- : database_(std::move(database)),
+ const base::FilePath& database_dir,
+ StoreFactoryFunction create_store_callback)
+ : ModelTypeService(
+ base::Bind(
+ &syncer::SharedModelTypeProcessor::CreateAsChangeProcessor),
+ syncer::READING_LIST),
database_loaded_(false),
- pending_transaction_(0),
- weak_ptr_factory_(this) {
- database_->Init("ReadingList", database_dir,
- base::Bind(&ReadingListStore::OnDatabaseInit,
- weak_ptr_factory_.GetWeakPtr()));
-}
+ create_store_callback_(create_store_callback),
+ pending_transaction_(0) {}
ReadingListStore::~ReadingListStore() {
DCHECK(pending_transaction_ == 0);
}
-void ReadingListStore::OnDatabaseInit(bool success) {
- DCHECK_CURRENTLY_ON(web::WebThread::UI);
- if (!success) {
- database_.reset();
- }
-}
-
void ReadingListStore::SetReadingListModel(ReadingListModelImpl* model) {
DCHECK_CURRENTLY_ON(web::WebThread::UI);
model_ = model;
+ create_store_callback_.Run(
+ base::Bind(&ReadingListStore::OnStoreCreated, base::AsWeakPtr(this)));
}
void ReadingListStore::LoadPersistentLists() {
DCHECK_CURRENTLY_ON(web::WebThread::UI);
DCHECK(model_);
- database_->LoadEntries(base::Bind(&ReadingListStore::OnDatabaseLoad,
- weak_ptr_factory_.GetWeakPtr()));
+ // database_->LoadEntries(
+ // base::Bind(&ReadingListStore::OnDatabaseLoad, base::AsWeakPtr(this)));
}
void ReadingListStore::BeginTransaction() {
@@ -59,10 +60,11 @@ void ReadingListStore::CommitTransaction() {
DCHECK_CURRENTLY_ON(web::WebThread::UI);
pending_transaction_--;
if (pending_transaction_ == 0) {
- database_->UpdateEntries(std::move(pending_keys_to_save_),
- std::move(pending_keys_to_remove_),
- base::Bind(&ReadingListStore::OnDatabaseSave,
- weak_ptr_factory_.GetWeakPtr()));
+ // database_->UpdateEntries(
+ // std::move(pending_keys_to_save_),
+ // std::move(pending_keys_to_remove_),
+ // base::Bind(&ReadingListStore::OnDatabaseSave,
+ // base::AsWeakPtr(this)));
pending_keys_to_save_ = nullptr;
pending_keys_to_remove_ = nullptr;
}
@@ -70,42 +72,84 @@ void ReadingListStore::CommitTransaction() {
void ReadingListStore::SaveEntry(const ReadingListEntry& entry, bool read) {
DCHECK_CURRENTLY_ON(web::WebThread::UI);
- BeginTransaction();
-
+ // BeginTransaction();
+ //
std::unique_ptr<reading_list::ReadingListLocal> pb_entry =
entry.AsReadingListLocal(read);
// Unref the URL before making asynchronous call.
std::string local_key = entry.URL().spec();
- pending_keys_to_save_->push_back(std::make_pair(local_key, *pb_entry));
- CommitTransaction();
+ std::unique_ptr<syncer::ModelTypeStore::WriteBatch> batch =
+ store_->CreateWriteBatch();
+ store_->WriteData(batch.get(), local_key, pb_entry->SerializeAsString());
+
+ if (!change_processor()) {
+ store_->CommitWriteBatch(
+ std::move(batch),
+ base::Bind(&ReadingListStore::OnDatabaseSave, base::AsWeakPtr(this)));
+ return;
+ }
+
+ std::unique_ptr<syncer::MetadataChangeList> metadata_change_list =
+ CreateMetadataChangeList();
+
+ std::unique_ptr<syncer::EntityData> entity_data(new syncer::EntityData());
+ *entity_data->specifics.mutable_reading_list() = pb_entry->entry();
+ entity_data->non_unique_name = pb_entry->entry().url();
+
+ if (read) {
+ change_processor()->Delete(pb_entry->entry().url(),
+ metadata_change_list.get());
+ } else {
+ change_processor()->Put(pb_entry->entry().url(), std::move(entity_data),
+ metadata_change_list.get());
+ }
+
+ static_cast<syncer::SimpleMetadataChangeList*>(metadata_change_list.get())
+ ->TransferChanges(store_.get(), batch.get());
+ store_->CommitWriteBatch(
+ std::move(batch),
+ base::Bind(&ReadingListStore::OnDatabaseSave, base::AsWeakPtr(this)));
}
void ReadingListStore::RemoveEntry(const ReadingListEntry& entry) {
DCHECK_CURRENTLY_ON(web::WebThread::UI);
- BeginTransaction();
- pending_keys_to_remove_->push_back(entry.URL().spec());
- CommitTransaction();
+ std::string local_key = entry.URL().spec();
+ std::unique_ptr<syncer::ModelTypeStore::WriteBatch> batch =
+ store_->CreateWriteBatch();
+ store_->DeleteData(batch.get(), local_key);
+ store_->CommitWriteBatch(
+ std::move(batch),
+ base::Bind(&ReadingListStore::OnDatabaseSave, base::AsWeakPtr(this)));
}
-void ReadingListStore::OnDatabaseLoad(bool success,
- std::unique_ptr<EntryVector> entries) {
+void ReadingListStore::OnDatabaseLoad(
+ syncer::ModelTypeStore::Result result,
+ std::unique_ptr<syncer::ModelTypeStore::RecordList> entries) {
DCHECK_CURRENTLY_ON(web::WebThread::UI);
- if (!success) {
- database_.reset();
+ if (result != syncer::ModelTypeStore::Result::SUCCESS) {
return;
}
database_loaded_ = true;
auto read = base::MakeUnique<ReadingListEntries>();
auto unread = base::MakeUnique<ReadingListEntries>();
- for (const reading_list::ReadingListLocal& pb_entry : *entries) {
+ for (const syncer::ModelTypeStore::Record& r : *entries.get()) {
+ // for (const reading_list::ReadingListLocal& pb_entry : *entries) {
+ std::unique_ptr<reading_list::ReadingListLocal> proto =
+ base::MakeUnique<reading_list::ReadingListLocal>();
+ if (!proto->ParseFromString(r.value)) {
+ continue;
+ // TODO(skym, crbug.com/582460): Handle unrecoverable initialization
+ // failure.
+ }
+
std::unique_ptr<ReadingListEntry> entry(
- ReadingListEntry::FromReadingListLocal(pb_entry));
+ ReadingListEntry::FromReadingListLocal(*proto));
if (!entry) {
continue;
}
- if (pb_entry.entry().status() == sync_pb::ReadingListSpecifics::READ) {
+ if (proto->entry().status() == sync_pb::ReadingListSpecifics::READ) {
read->push_back(std::move(*entry));
} else {
unread->push_back(std::move(*entry));
@@ -117,12 +161,232 @@ void ReadingListStore::OnDatabaseLoad(bool success,
ReadingListEntry::CompareEntryUpdateTime);
model_->ModelLoaded(std::move(unread), std::move(read));
+
+ store_->ReadAllMetadata(
+ base::Bind(&ReadingListStore::OnReadAllMetadata, base::AsWeakPtr(this)));
}
-void ReadingListStore::OnDatabaseSave(bool success) {
- DCHECK_CURRENTLY_ON(web::WebThread::UI);
- if (!success) {
- database_.reset();
- database_loaded_ = false;
+void ReadingListStore::OnReadAllMetadata(
+ syncer::ModelTypeStore::Result result,
+ std::unique_ptr<syncer::ModelTypeStore::RecordList> metadata_records,
+ const std::string& global_metadata) {
+ if (result != syncer::ModelTypeStore::Result::SUCCESS) {
+ // Store has encountered some serious error. We should still be able to
+ // continue as a read only service, since if we got this far we must have
+ // loaded all data out succesfully.
+ return;
+ }
+
+ // If we have no metadata then we don't want to create a processor. The idea
+ // is that by not having a processor, the services will suffer less of a
+ // performance hit. This isn't terribly applicable for this model type, but
+ // we want this class to be as similar to other services as possible so follow
+ // the convention.
+ if (metadata_records->size() > 0 || !global_metadata.empty()) {
+ CreateChangeProcessor();
+ }
+
+ // Set this after OnChangeProcessorSet so that we can correctly avoid giving
+ // the processor empty metadata. We always want to set |has_metadata_loaded_|
+ // at this point so that we'll know to give a processor empty metadata if it
+ // is created later.
+ has_metadata_loaded_ = true;
+
+ if (!change_processor()) {
+ // This means we haven't been told to start syncing and we don't have any
+ // local metadata.
+ return;
+ }
+
+ std::unique_ptr<syncer::MetadataBatch> batch(new syncer::MetadataBatch());
+ sync_pb::ModelTypeState state;
+ if (state.ParseFromString(global_metadata)) {
+ batch->SetModelTypeState(state);
+ } else {
+ // TODO(skym): How bad is this scenario? We may be able to just give an
+ // empty batch to the processor and we'll treat corrupted data type state
+ // as no data type state at all. The question is do we want to add any of
+ // the entity metadata to the batch or completely skip that step? We're
+ // going to have to perform a merge shortly. Does this decision/logic even
+ // belong in this service?
+ change_processor()->OnMetadataLoaded(
+ change_processor()->CreateAndUploadError(
+ FROM_HERE, "Failed to deserialize global metadata."),
+ nullptr);
+ }
+ for (const syncer::ModelTypeStore::Record& r : *metadata_records.get()) {
+ sync_pb::EntityMetadata entity_metadata;
+ if (entity_metadata.ParseFromString(r.value)) {
+ batch->AddMetadata(r.id, entity_metadata);
+ } else {
+ // TODO(skym): This really isn't too bad. We just want to regenerate
+ // metadata for this particular entity. Unfortunately there isn't a
+ // convenient way to tell the processor to do this.
+ LOG(WARNING) << "Failed to deserialize entity metadata.";
+ }
+ }
+ change_processor()->OnMetadataLoaded(syncer::SyncError(), std::move(batch));
+}
+
+void ReadingListStore::OnDatabaseSave(syncer::ModelTypeStore::Result result) {
+ return;
+}
+
+void ReadingListStore::OnStoreCreated(
+ syncer::ModelTypeStore::Result result,
+ std::unique_ptr<syncer::ModelTypeStore> store) {
+ store_ = std::move(store);
+ store_->ReadAllData(
+ base::Bind(&ReadingListStore::OnDatabaseLoad, base::AsWeakPtr(this)));
+ return;
+}
+
+syncer::ModelTypeService* ReadingListStore::GetModelTypeService() {
+ return this;
+}
+
+// Creates an object used to communicate changes in the sync metadata to the
+// model type store.
+std::unique_ptr<syncer::MetadataChangeList>
+ReadingListStore::CreateMetadataChangeList() {
+ return base::MakeUnique<syncer::SimpleMetadataChangeList>();
+}
+
+// Perform the initial merge between local and sync data. This should only be
+// called when a data type is first enabled to start syncing, and there is no
+// sync metadata. Best effort should be made to match local and sync data. The
+// keys in the |entity_data_map| will have been created via GetClientTag(...),
+// and if a local and sync data should match/merge but disagree on tags, the
+// service should use the sync data's tag. Any local pieces of data that are
+// not present in sync should immediately be Put(...) to the processor before
+// returning. The same MetadataChangeList that was passed into this function
+// can be passed to Put(...) calls. Delete(...) can also be called but should
+// not be needed for most model types. Durable storage writes, if not able to
+// combine all change atomically, should save the metadata after the data
+// changes, so that this merge will be re-driven by sync if is not completely
+// saved during the current run.
+syncer::SyncError ReadingListStore::MergeSyncData(
+ std::unique_ptr<syncer::MetadataChangeList> metadata_change_list,
+ syncer::EntityDataMap entity_data_map) {
+ // std::unique_ptr<syncer::ModelTypeStore::WriteBatch> batch =
+ // store_->CreateWriteBatch();
+ for (const auto& kv : entity_data_map) {
+ const sync_pb::ReadingListSpecifics& specifics =
+ kv.second.value().specifics.reading_list();
+
+ if (!model_->CallbackEntryURL(
+ GURL(specifics.url()),
+ base::Bind(&ReadingListStore::NoopEntry, base::AsWeakPtr(this)))) {
+ model_->AddEntry(GURL(specifics.url()), specifics.title());
+ }
+
+ // std::unique_ptr<reading_list::ReadingListLocal> proto =
+ // base::MakeUnique<reading_list::ReadingListLocal>();
+ //
+ // proto->set_allocated_entry(new
+ // sync_pb::ReadingListSpecifics(specifics));
+ //
+ // // Unref the URL before making asynchronous call.
+ // std::string local_key = specifics.url();
+ //
+ //
+ // store_->WriteData(batch.get(), local_key, proto->SerializeAsString());
+ }
+
+ // store_->CommitWriteBatch(
+ // std::move(batch),
+ // base::Bind(&ReadingListStore::OnDatabaseSave,
+ // base::AsWeakPtr(this)));
+ return syncer::SyncError();
+}
+
+// Apply changes from the sync server locally.
+// Please note that |entity_changes| might have fewer entries than
+// |metadata_change_list| in case when some of the data changes are filtered
+// out, or even be empty in case when a commit confirmation is processed and
+// only the metadata needs to persisted.
+syncer::SyncError ReadingListStore::ApplySyncChanges(
+ std::unique_ptr<syncer::MetadataChangeList> metadata_change_list,
+ syncer::EntityChangeList entity_changes) {
+ // std::unique_ptr<syncer::ModelTypeStore::WriteBatch> batch =
+ // store_->CreateWriteBatch();
+ for (syncer::EntityChange& change : entity_changes) {
+ if (change.type() == syncer::EntityChange::ACTION_DELETE) {
+ // if (model_->CallbackEntryURL(GURL(change.storage_key()),
+ // base::Bind(&ReadingListStore::NoopEntry,
+ // base::AsWeakPtr(this)))) {
+ // model_->RemoveEntryByUrl(GURL(specifics.url()));
+ // }
+ continue;
+ } else {
+ const sync_pb::ReadingListSpecifics& specifics =
+ change.data().specifics.reading_list();
+
+ if (!model_->CallbackEntryURL(GURL(specifics.url()),
+ base::Bind(&ReadingListStore::NoopEntry,
+ base::AsWeakPtr(this)))) {
+ model_->AddEntry(GURL(specifics.url()), specifics.title());
+ }
+
+ // proto->set_allocated_entry(new
+ // sync_pb::ReadingListSpecifics(specifics));
+ //
+ // // Unref the URL before making asynchronous call.
+ // std::string local_key = specifics.url();
+ //
+ //
+ // store_->WriteData(batch.get(), local_key,
+ // proto->SerializeAsString());
+ }
+ }
+ // store_->CommitWriteBatch(
+ // std::move(batch),
+ // base::Bind(&ReadingListStore::OnDatabaseSave,
+ // base::AsWeakPtr(this)));
+
+ return syncer::SyncError();
+}
+
+void ReadingListStore::NoopEntry(const ReadingListEntry&) {}
+
+// Asynchronously retrieve the corresponding sync data for |storage_keys|.
+void ReadingListStore::GetData(StorageKeyList storage_keys,
+ DataCallback callback) {
+ return;
+}
+
+// Asynchronously retrieve all of the local sync data.
+void ReadingListStore::GetAllData(DataCallback callback) {}
+
+// Get or generate a client tag for |entity_data|. This must be the same tag
+// that was/would have been generated in the SyncableService/Directory world
+// for backward compatibility with pre-USS clients. The only time this
+// theoretically needs to be called is on the creation of local data, however
+// it is also used to verify the hash of remote data. If a data type was never
+// launched pre-USS, then method does not need to be different from
+// GetStorageKey().
+std::string ReadingListStore::GetClientTag(
+ const syncer::EntityData& entity_data) {
+ return entity_data.specifics.reading_list().url();
+}
+
+// Get or generate a storage key for |entity_data|. This will only ever be
+// called once when first encountering a remote entity. Local changes will
+// provide their storage keys directly to Put instead of using this method.
+// Theoretically this function doesn't need to be stable across multiple calls
+// on the same or different clients, but to keep things simple, it probably
+// should be.
+std::string ReadingListStore::GetStorageKey(
+ const syncer::EntityData& entity_data) {
+ return entity_data.specifics.reading_list().url();
+}
+
+// Overridable notification for when the processor is set. This is typically
+// when the service should start loading metadata and then subsequently giving
+// it to the processor.
+void ReadingListStore::OnChangeProcessorSet() {
+ if (has_metadata_loaded_) {
+ change_processor()->OnMetadataLoaded(
+ syncer::SyncError(), base::MakeUnique<syncer::MetadataBatch>());
}
}
« no previous file with comments | « ios/chrome/browser/reading_list/reading_list_store.h ('k') | ios/chrome/browser/sync/ios_chrome_sync_client.mm » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698