Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Unified Diff: components/sync/core/shared_model_type_processor_unittest.cc

Issue 2222373003: [Sync] Adding storage key concept for ModelTypeServices. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Comments and method name fixes. Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: components/sync/core/shared_model_type_processor_unittest.cc
diff --git a/components/sync/core/shared_model_type_processor_unittest.cc b/components/sync/core/shared_model_type_processor_unittest.cc
index 669abf2799e3d72d681131b97711777d6c0ade44..f5fa5aabc4d575ae3560b7718e001267bb3f4d41 100644
--- a/components/sync/core/shared_model_type_processor_unittest.cc
+++ b/components/sync/core/shared_model_type_processor_unittest.cc
@@ -8,6 +8,7 @@
#include <stdint.h>
#include <map>
+#include <utility>
#include <vector>
#include "base/bind.h"
@@ -34,31 +35,50 @@ namespace syncer_v2 {
namespace {
-const std::string kTag1 = "tag1";
-const std::string kTag2 = "tag2";
-const std::string kTag3 = "tag3";
-const std::string kValue1 = "value1";
-const std::string kValue2 = "value2";
-const std::string kValue3 = "value3";
+std::string ClientTagFromName(const std::string& name) {
+ return "ClientTag_" + name;
+}
+
+std::string StorageKeyFromName(const std::string& name) {
maxbogue 2016/08/10 23:56:03 Just use the key as the name.
skym 2016/08/11 16:28:47 Done.
+ return "StorageKey_" + name;
+}
-std::string GenerateTagHash(const std::string& tag) {
- return syncer::syncable::GenerateSyncableHash(syncer::PREFERENCES, tag);
+std::string TagHashFromName(const std::string& name) {
+ return syncer::syncable::GenerateSyncableHash(syncer::PREFERENCES,
+ ClientTagFromName(name));
}
-sync_pb::EntitySpecifics GenerateSpecifics(const std::string& tag,
+const char kName1[] = "name1";
maxbogue 2016/08/10 23:56:03 I think it will make everything simpler if these a
skym 2016/08/11 16:28:47 Done.
+const char kName2[] = "name2";
+const char kName3[] = "name3";
+const char kName4[] = "name4";
+const char kName5[] = "name5";
+const char kValue1[] = "value1";
+const char kValue2[] = "value2";
+const char kValue3[] = "value3";
+const std::string kHash1(TagHashFromName(kName1));
+const std::string kHash2(TagHashFromName(kName2));
+const std::string kHash3(TagHashFromName(kName3));
+const std::string kHash4(TagHashFromName(kName4));
+const std::string kHash5(TagHashFromName(kName5));
+const std::string kKey1(StorageKeyFromName(kName1));
+const std::string kKey2(StorageKeyFromName(kName2));
+const sync_pb::EntitySpecifics kEmptySpecifics;
+
+sync_pb::EntitySpecifics GenerateSpecifics(const std::string& name,
const std::string& value) {
sync_pb::EntitySpecifics specifics;
- specifics.mutable_preference()->set_name(tag);
+ specifics.mutable_preference()->set_name(name);
specifics.mutable_preference()->set_value(value);
return specifics;
}
-std::unique_ptr<EntityData> GenerateEntityData(const std::string& tag,
+std::unique_ptr<EntityData> GenerateEntityData(const std::string& name,
const std::string& value) {
std::unique_ptr<EntityData> entity_data = base::WrapUnique(new EntityData());
- entity_data->client_tag_hash = GenerateTagHash(tag);
- entity_data->specifics = GenerateSpecifics(tag, value);
- entity_data->non_unique_name = tag;
+ entity_data->client_tag_hash = TagHashFromName(name);
+ entity_data->specifics = GenerateSpecifics(name, value);
+ entity_data->non_unique_name = name;
return entity_data;
}
@@ -86,49 +106,49 @@ syncer::SyncError CreateSyncError(syncer::SyncError::ErrorType error_type) {
// committed, etc. Having a separate class helps keep the main one cleaner.
class SimpleStore {
public:
- void PutData(const std::string& tag, const EntityData& data) {
+ void PutData(const std::string& key, const EntityData& data) {
data_change_count_++;
- data_store_[tag] = CopyEntityData(data);
+ data_store_[key] = CopyEntityData(data);
}
- void PutMetadata(const std::string& tag,
+ void PutMetadata(const std::string& key,
const sync_pb::EntityMetadata& metadata) {
metadata_change_count_++;
- metadata_store_[tag] = metadata;
+ metadata_store_[key] = metadata;
}
- void RemoveData(const std::string& tag) {
+ void RemoveData(const std::string& key) {
data_change_count_++;
- data_store_.erase(tag);
+ data_store_.erase(key);
}
- void RemoveMetadata(const std::string& tag) {
+ void RemoveMetadata(const std::string& key) {
metadata_change_count_++;
- metadata_store_.erase(tag);
+ metadata_store_.erase(key);
}
- bool HasData(const std::string& tag) const {
- return data_store_.find(tag) != data_store_.end();
+ bool HasData(const std::string& key) const {
+ return data_store_.find(key) != data_store_.end();
}
- bool HasMetadata(const std::string& tag) const {
- return metadata_store_.find(tag) != metadata_store_.end();
+ bool HasMetadata(const std::string& key) const {
+ return metadata_store_.find(key) != metadata_store_.end();
}
const std::map<std::string, std::unique_ptr<EntityData>>& GetAllData() const {
return data_store_;
}
- const EntityData& GetData(const std::string& tag) const {
- return *data_store_.find(tag)->second;
+ const EntityData& GetData(const std::string& key) const {
+ return *data_store_.find(key)->second;
}
- const std::string& GetValue(const std::string& tag) const {
- return GetData(tag).specifics.preference().value();
+ const std::string& GetValue(const std::string& key) const {
+ return GetData(key).specifics.preference().value();
}
- const sync_pb::EntityMetadata& GetMetadata(const std::string& tag) const {
- return metadata_store_.find(tag)->second;
+ const sync_pb::EntityMetadata& GetMetadata(const std::string& key) const {
+ return metadata_store_.find(key)->second;
}
size_t DataCount() const { return data_store_.size(); }
@@ -237,36 +257,43 @@ class SharedModelTypeProcessorTest : public ::testing::Test,
}
// Local data modification. Emulates signals from the model thread.
- void WriteItem(const std::string& tag, const std::string& value) {
- WriteItem(tag, GenerateEntityData(tag, value));
+ sync_pb::EntitySpecifics WriteItem(const std::string& name,
maxbogue 2016/08/10 23:56:03 No need to return specifics.
skym 2016/08/11 16:28:47 Acknowledged.
+ const std::string& value) {
+ std::unique_ptr<EntityData> entity_data = GenerateEntityData(name, value);
+ sync_pb::EntitySpecifics specifics_copy = entity_data->specifics;
+ WriteItem(name, std::move(entity_data));
+ return specifics_copy;
}
// Overloaded form to allow passing of custom entity data.
- void WriteItem(const std::string& tag,
+ void WriteItem(const std::string& name,
std::unique_ptr<EntityData> entity_data) {
- db_.PutData(tag, *entity_data);
+ db_.PutData(StorageKeyFromName(name), *entity_data);
if (type_processor()) {
std::unique_ptr<MetadataChangeList> change_list(
new SimpleMetadataChangeList());
- type_processor()->Put(tag, std::move(entity_data), change_list.get());
+ type_processor()->Put(StorageKeyFromName(name), std::move(entity_data),
+ change_list.get());
ApplyMetadataChangeList(std::move(change_list));
}
}
- // Writes data for |tag| and simulates a commit response for it.
- void WriteItemAndAck(const std::string& tag, const std::string& value) {
- WriteItem(tag, value);
- worker()->ExpectPendingCommits({tag});
+ // Writes data for |name| and simulates a commit response for it.
+ sync_pb::EntitySpecifics WriteItemAndAck(const std::string& name,
+ const std::string& value) {
+ sync_pb::EntitySpecifics specifics = WriteItem(name, value);
+ worker()->ExpectPendingCommits({TagHashFromName(name)});
worker()->AckOnePendingCommit();
EXPECT_EQ(0U, worker()->GetNumPendingCommits());
+ return specifics;
}
- void DeleteItem(const std::string& tag) {
- db_.RemoveData(tag);
+ void DeleteItem(const std::string& name) {
+ db_.RemoveData(StorageKeyFromName(name));
if (type_processor()) {
std::unique_ptr<MetadataChangeList> change_list(
new SimpleMetadataChangeList());
- type_processor()->Delete(tag, change_list.get());
+ type_processor()->Delete(StorageKeyFromName(name), change_list.get());
ApplyMetadataChangeList(std::move(change_list));
}
}
@@ -279,25 +306,27 @@ class SharedModelTypeProcessorTest : public ::testing::Test,
}
// Wipes existing DB and simulates a pending update of a server-known item.
- void ResetStateWriteItem(const std::string& tag, const std::string& value) {
+ sync_pb::EntitySpecifics ResetStateWriteItem(const std::string& name,
+ const std::string& value) {
ResetState();
InitializeToReadyState();
EXPECT_EQ(0U, ProcessorEntityCount());
- WriteItemAndAck(tag, "acked-value");
- WriteItem(tag, value);
+ WriteItemAndAck(name, "acked-value");
+ sync_pb::EntitySpecifics specifics = WriteItem(name, value);
EXPECT_EQ(1U, ProcessorEntityCount());
clear_change_processor();
worker_ = nullptr;
+ return specifics;
}
// Wipes existing DB and simulates a pending deletion of a server-known item.
- void ResetStateDeleteItem(const std::string& tag, const std::string& value) {
+ void ResetStateDeleteItem(const std::string& name, const std::string& value) {
ResetState();
InitializeToReadyState();
EXPECT_EQ(0U, ProcessorEntityCount());
- WriteItemAndAck(tag, value);
+ WriteItemAndAck(name, value);
EXPECT_EQ(1U, ProcessorEntityCount());
- DeleteItem(tag);
+ DeleteItem(name);
EXPECT_EQ(1U, ProcessorEntityCount());
clear_change_processor();
worker_ = nullptr;
@@ -314,10 +343,10 @@ class SharedModelTypeProcessorTest : public ::testing::Test,
void OnInitialSyncDone() { OnInitialSyncDone(UpdateResponseDataList()); }
// Overloaded form that constructs an update for a single entity.
- void OnInitialSyncDone(const std::string& tag, const std::string& value) {
+ void OnInitialSyncDone(const std::string& name, const std::string& value) {
UpdateResponseDataList updates;
UpdateResponseData update;
- update.entity = GenerateEntityData(tag, value)->PassToPtr();
+ update.entity = GenerateEntityData(name, value)->PassToPtr();
updates.push_back(update);
OnInitialSyncDone(updates);
}
@@ -387,8 +416,11 @@ class SharedModelTypeProcessorTest : public ::testing::Test,
// FakeModelTypeService overrides.
std::string GetClientTag(const EntityData& entity_data) override {
- // The tag is the preference name - see GenerateSpecifics.
- return entity_data.specifics.preference().name();
+ return ClientTagFromName(entity_data.specifics.preference().name());
+ }
+
+ std::string GetStorageKey(const EntityData& entity_data) override {
+ return StorageKeyFromName(entity_data.specifics.preference().name());
}
std::unique_ptr<MetadataChangeList> CreateMetadataChangeList() override {
@@ -430,16 +462,16 @@ class SharedModelTypeProcessorTest : public ::testing::Test,
for (const EntityChange& change : entity_changes) {
switch (change.type()) {
case EntityChange::ACTION_ADD:
- EXPECT_FALSE(db_.HasData(change.client_tag()));
- db_.PutData(change.client_tag(), change.data());
+ EXPECT_FALSE(db_.HasData(change.storage_key()));
+ db_.PutData(change.storage_key(), change.data());
break;
case EntityChange::ACTION_UPDATE:
- EXPECT_TRUE(db_.HasData(change.client_tag()));
- db_.PutData(change.client_tag(), change.data());
+ EXPECT_TRUE(db_.HasData(change.storage_key()));
+ db_.PutData(change.storage_key(), change.data());
break;
case EntityChange::ACTION_DELETE:
- EXPECT_TRUE(db_.HasData(change.client_tag()));
- db_.RemoveData(change.client_tag());
+ EXPECT_TRUE(db_.HasData(change.storage_key()));
+ db_.RemoveData(change.storage_key());
break;
}
}
@@ -479,16 +511,16 @@ class SharedModelTypeProcessorTest : public ::testing::Test,
}
}
- void GetData(ClientTagList tags, DataCallback callback) override {
+ void GetData(StorageKeyList keys, DataCallback callback) override {
if (service_error_.IsSet()) {
data_callback_ = base::Bind(callback, service_error_, nullptr);
service_error_ = syncer::SyncError();
return;
}
std::unique_ptr<DataBatchImpl> batch(new DataBatchImpl());
- for (const std::string& tag : tags) {
- DCHECK(db_.HasData(tag)) << "No data for " << tag;
- batch->Put(tag, CopyEntityData(db_.GetData(tag)));
+ for (const std::string& key : keys) {
+ DCHECK(db_.HasData(key)) << "No data for " << key;
+ batch->Put(key, CopyEntityData(db_.GetData(key)));
}
data_callback_ =
base::Bind(callback, syncer::SyncError(), base::Passed(&batch));
@@ -533,7 +565,7 @@ TEST_F(SharedModelTypeProcessorTest, InitialSync) {
OnSyncStarting();
// Local write before initial sync.
- WriteItem(kTag1, kValue1);
+ WriteItem(kName1, kValue1);
// Has data, but no metadata, entity in the processor, or commit request.
EXPECT_EQ(1U, db().DataCount());
@@ -542,16 +574,16 @@ TEST_F(SharedModelTypeProcessorTest, InitialSync) {
EXPECT_EQ(0U, worker()->GetNumPendingCommits());
// Initial sync with one server item.
- OnInitialSyncDone(kTag2, kValue2);
+ OnInitialSyncDone(kName2, kValue2);
// Now have data and metadata for both items, as well as a commit request for
// the local item.
EXPECT_EQ(2U, db().DataCount());
EXPECT_EQ(2U, db().MetadataCount());
EXPECT_EQ(2U, ProcessorEntityCount());
- EXPECT_EQ(1, db().GetMetadata(kTag1).sequence_number());
- EXPECT_EQ(0, db().GetMetadata(kTag2).sequence_number());
- worker()->ExpectPendingCommits({kTag1});
+ EXPECT_EQ(1, db().GetMetadata(kKey1).sequence_number());
+ EXPECT_EQ(0, db().GetMetadata(kKey2).sequence_number());
+ worker()->ExpectPendingCommits({kHash1});
}
// Test that an error during the merge is propagated to the error handler.
@@ -582,7 +614,7 @@ TEST_F(SharedModelTypeProcessorTest, StartErrors) {
CreateSyncError(syncer::SyncError::DATATYPE_ERROR), nullptr);
// Test an error loading pending data.
- ResetStateWriteItem(kTag1, kValue1);
+ ResetStateWriteItem(kName1, kValue1);
SetServiceError(syncer::SyncError::DATATYPE_ERROR);
InitializeToMetadataLoaded();
OnPendingCommitDataLoaded();
@@ -602,137 +634,137 @@ TEST_F(SharedModelTypeProcessorTest, StartErrors) {
// This results in 2 + 12 = 14 orderings of the events.
TEST_F(SharedModelTypeProcessorTest, LoadPendingCommit) {
// Data, connect.
- ResetStateWriteItem(kTag1, kValue1);
+ sync_pb::EntitySpecifics specifics1(ResetStateWriteItem(kName1, kValue1));
InitializeToMetadataLoaded();
OnPendingCommitDataLoaded();
OnSyncStarting();
EXPECT_EQ(1U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(0, kTag1, kValue1);
+ worker()->ExpectNthPendingCommit(0, kHash1, specifics1);
// Connect, data.
- ResetStateWriteItem(kTag1, kValue1);
+ sync_pb::EntitySpecifics specifics2(ResetStateWriteItem(kName1, kValue1));
InitializeToMetadataLoaded();
OnSyncStarting();
EXPECT_EQ(nullptr, worker());
OnPendingCommitDataLoaded();
EXPECT_EQ(1U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(0, kTag1, kValue1);
+ worker()->ExpectNthPendingCommit(0, kHash1, specifics2);
// Data, connect, put.
- ResetStateWriteItem(kTag1, kValue1);
+ sync_pb::EntitySpecifics specifics3(ResetStateWriteItem(kName1, kValue1));
InitializeToMetadataLoaded();
OnPendingCommitDataLoaded();
OnSyncStarting();
- WriteItem(kTag1, kValue2);
+ sync_pb::EntitySpecifics specifics4(WriteItem(kName1, kValue2));
EXPECT_EQ(2U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(0, kTag1, kValue1);
- worker()->ExpectNthPendingCommit(1, kTag1, kValue2);
+ worker()->ExpectNthPendingCommit(0, kHash1, specifics3);
+ worker()->ExpectNthPendingCommit(1, kHash1, specifics4);
// Data, put, connect.
- ResetStateWriteItem(kTag1, kValue1);
+ ResetStateWriteItem(kName1, kValue1);
InitializeToMetadataLoaded();
OnPendingCommitDataLoaded();
- WriteItem(kTag1, kValue2);
+ sync_pb::EntitySpecifics specifics5(WriteItem(kName1, kValue2));
OnSyncStarting();
EXPECT_EQ(1U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(0, kTag1, kValue2);
+ worker()->ExpectNthPendingCommit(0, kHash1, specifics5);
// Connect, data, put.
- ResetStateWriteItem(kTag1, kValue1);
+ sync_pb::EntitySpecifics specifics6(ResetStateWriteItem(kName1, kValue1));
InitializeToMetadataLoaded();
OnSyncStarting();
OnPendingCommitDataLoaded();
- WriteItem(kTag1, kValue2);
+ sync_pb::EntitySpecifics specifics7(WriteItem(kName1, kValue2));
EXPECT_EQ(2U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(0, kTag1, kValue1);
- worker()->ExpectNthPendingCommit(1, kTag1, kValue2);
+ worker()->ExpectNthPendingCommit(0, kHash1, specifics6);
+ worker()->ExpectNthPendingCommit(1, kHash1, specifics7);
// Connect, put, data.
- ResetStateWriteItem(kTag1, kValue1);
+ ResetStateWriteItem(kName1, kValue1);
InitializeToMetadataLoaded();
OnSyncStarting();
- WriteItem(kTag1, kValue2);
+ sync_pb::EntitySpecifics specifics8(WriteItem(kName1, kValue2));
EXPECT_EQ(nullptr, worker());
OnPendingCommitDataLoaded();
EXPECT_EQ(1U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(0, kTag1, kValue2);
+ worker()->ExpectNthPendingCommit(0, kHash1, specifics8);
// Put, data, connect.
- ResetStateWriteItem(kTag1, kValue1);
+ ResetStateWriteItem(kName1, kValue1);
InitializeToMetadataLoaded();
- WriteItem(kTag1, kValue2);
+ sync_pb::EntitySpecifics specifics9(WriteItem(kName1, kValue2));
OnPendingCommitDataLoaded();
OnSyncStarting();
EXPECT_EQ(1U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(0, kTag1, kValue2);
+ worker()->ExpectNthPendingCommit(0, kHash1, specifics9);
// Put, connect, data.
- ResetStateWriteItem(kTag1, kValue1);
+ ResetStateWriteItem(kName1, kValue1);
InitializeToMetadataLoaded();
- WriteItem(kTag1, kValue2);
+ sync_pb::EntitySpecifics specifics10(WriteItem(kName1, kValue2));
OnSyncStarting();
EXPECT_EQ(nullptr, worker());
OnPendingCommitDataLoaded();
EXPECT_EQ(1U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(0, kTag1, kValue2);
+ worker()->ExpectNthPendingCommit(0, kHash1, specifics10);
// Data, connect, delete.
- ResetStateWriteItem(kTag1, kValue1);
+ sync_pb::EntitySpecifics specifics11(ResetStateWriteItem(kName1, kValue1));
InitializeToMetadataLoaded();
OnPendingCommitDataLoaded();
OnSyncStarting();
- DeleteItem(kTag1);
+ DeleteItem(kName1);
EXPECT_EQ(2U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(0, kTag1, kValue1);
- worker()->ExpectNthPendingCommit(1, kTag1, "");
+ worker()->ExpectNthPendingCommit(0, kHash1, specifics11);
+ worker()->ExpectNthPendingCommit(1, kHash1, kEmptySpecifics);
// Data, delete, connect.
- ResetStateWriteItem(kTag1, kValue1);
+ ResetStateWriteItem(kName1, kValue1);
InitializeToMetadataLoaded();
OnPendingCommitDataLoaded();
- DeleteItem(kTag1);
+ DeleteItem(kName1);
OnSyncStarting();
EXPECT_EQ(1U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(0, kTag1, "");
+ worker()->ExpectNthPendingCommit(0, kHash1, kEmptySpecifics);
// Connect, data, delete.
- ResetStateWriteItem(kTag1, kValue1);
+ sync_pb::EntitySpecifics specifics12(ResetStateWriteItem(kName1, kValue1));
InitializeToMetadataLoaded();
OnSyncStarting();
OnPendingCommitDataLoaded();
- DeleteItem(kTag1);
+ DeleteItem(kName1);
EXPECT_EQ(2U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(0, kTag1, kValue1);
- worker()->ExpectNthPendingCommit(1, kTag1, "");
+ worker()->ExpectNthPendingCommit(0, kHash1, specifics12);
+ worker()->ExpectNthPendingCommit(1, kHash1, kEmptySpecifics);
// Connect, delete, data.
- ResetStateWriteItem(kTag1, kValue1);
+ ResetStateWriteItem(kName1, kValue1);
InitializeToMetadataLoaded();
OnSyncStarting();
- DeleteItem(kTag1);
+ DeleteItem(kName1);
EXPECT_EQ(nullptr, worker());
OnPendingCommitDataLoaded();
EXPECT_EQ(1U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(0, kTag1, "");
+ worker()->ExpectNthPendingCommit(0, kHash1, kEmptySpecifics);
// Delete, data, connect.
- ResetStateWriteItem(kTag1, kValue1);
+ ResetStateWriteItem(kName1, kValue1);
InitializeToMetadataLoaded();
- DeleteItem(kTag1);
+ DeleteItem(kName1);
OnPendingCommitDataLoaded();
OnSyncStarting();
EXPECT_EQ(1U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(0, kTag1, "");
+ worker()->ExpectNthPendingCommit(0, kHash1, kEmptySpecifics);
// Delete, connect, data.
- ResetStateWriteItem(kTag1, kValue1);
+ ResetStateWriteItem(kName1, kValue1);
InitializeToMetadataLoaded();
- DeleteItem(kTag1);
+ DeleteItem(kName1);
OnSyncStarting();
EXPECT_EQ(nullptr, worker());
OnPendingCommitDataLoaded();
EXPECT_EQ(1U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(0, kTag1, "");
+ worker()->ExpectNthPendingCommit(0, kHash1, kEmptySpecifics);
}
// This test covers race conditions during loading a pending delete. All cases
@@ -747,53 +779,53 @@ TEST_F(SharedModelTypeProcessorTest, LoadPendingCommit) {
// This results in 1 + 4 = 5 orderings of the events.
TEST_F(SharedModelTypeProcessorTest, LoadPendingDelete) {
// Connect.
- ResetStateDeleteItem(kTag1, kValue1);
+ ResetStateDeleteItem(kName1, kValue1);
InitializeToMetadataLoaded();
OnSyncStarting();
EXPECT_EQ(1U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(0, kTag1, "");
+ worker()->ExpectNthPendingCommit(0, kHash1, kEmptySpecifics);
// Connect, put.
- ResetStateDeleteItem(kTag1, kValue1);
+ ResetStateDeleteItem(kName1, kValue1);
InitializeToMetadataLoaded();
OnSyncStarting();
EXPECT_EQ(1U, worker()->GetNumPendingCommits());
- WriteItem(kTag1, kValue2);
+ sync_pb::EntitySpecifics specifics1(WriteItem(kName1, kValue2));
EXPECT_EQ(2U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(0, kTag1, "");
- worker()->ExpectNthPendingCommit(1, kTag1, kValue2);
+ worker()->ExpectNthPendingCommit(0, kHash1, kEmptySpecifics);
+ worker()->ExpectNthPendingCommit(1, kHash1, specifics1);
// Put, connect.
- ResetStateDeleteItem(kTag1, kValue1);
+ ResetStateDeleteItem(kName1, kValue1);
InitializeToMetadataLoaded();
- WriteItem(kTag1, kValue2);
+ sync_pb::EntitySpecifics specifics2(WriteItem(kName1, kValue2));
OnSyncStarting();
EXPECT_EQ(1U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(0, kTag1, kValue2);
+ worker()->ExpectNthPendingCommit(0, kHash1, specifics2);
// Connect, delete.
- ResetStateDeleteItem(kTag1, kValue1);
+ ResetStateDeleteItem(kName1, kValue1);
InitializeToMetadataLoaded();
OnSyncStarting();
EXPECT_EQ(1U, worker()->GetNumPendingCommits());
- DeleteItem(kTag1);
+ DeleteItem(kName1);
EXPECT_EQ(2U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(0, kTag1, "");
- worker()->ExpectNthPendingCommit(1, kTag1, "");
+ worker()->ExpectNthPendingCommit(0, kHash1, kEmptySpecifics);
+ worker()->ExpectNthPendingCommit(1, kHash1, kEmptySpecifics);
// Delete, connect.
- ResetStateDeleteItem(kTag1, kValue1);
+ ResetStateDeleteItem(kName1, kValue1);
InitializeToMetadataLoaded();
- DeleteItem(kTag1);
+ DeleteItem(kName1);
OnSyncStarting();
EXPECT_EQ(1U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(0, kTag1, "");
+ worker()->ExpectNthPendingCommit(0, kHash1, kEmptySpecifics);
}
// Test that loading a committed item does not queue another commit.
TEST_F(SharedModelTypeProcessorTest, LoadCommited) {
InitializeToReadyState();
- WriteItemAndAck(kTag1, kValue1);
+ WriteItemAndAck(kName1, kValue1);
clear_change_processor();
// Test that a new processor loads the metadata without committing.
@@ -808,25 +840,25 @@ TEST_F(SharedModelTypeProcessorTest, LocalCreateItem) {
InitializeToReadyState();
EXPECT_EQ(0U, worker()->GetNumPendingCommits());
- WriteItem(kTag1, kValue1);
+ WriteItem(kName1, kValue1);
// Verify the commit request this operation has triggered.
- worker()->ExpectPendingCommits({kTag1});
+ worker()->ExpectPendingCommits({kHash1});
const CommitRequestData& tag1_request_data =
- worker()->GetLatestPendingCommitForTag(kTag1);
+ worker()->GetLatestPendingCommitForHash(kHash1);
const EntityData& tag1_data = tag1_request_data.entity.value();
EXPECT_EQ(kUncommittedVersion, tag1_request_data.base_version);
EXPECT_TRUE(tag1_data.id.empty());
EXPECT_FALSE(tag1_data.creation_time.is_null());
EXPECT_FALSE(tag1_data.modification_time.is_null());
- EXPECT_EQ(kTag1, tag1_data.non_unique_name);
+ EXPECT_EQ(kName1, tag1_data.non_unique_name);
EXPECT_FALSE(tag1_data.is_deleted());
- EXPECT_EQ(kTag1, tag1_data.specifics.preference().name());
+ EXPECT_EQ(kName1, tag1_data.specifics.preference().name());
EXPECT_EQ(kValue1, tag1_data.specifics.preference().value());
EXPECT_EQ(1U, db().MetadataCount());
- const sync_pb::EntityMetadata metadata = db().GetMetadata(kTag1);
+ const sync_pb::EntityMetadata metadata = db().GetMetadata(kKey1);
EXPECT_TRUE(metadata.has_client_tag_hash());
EXPECT_FALSE(metadata.has_server_id());
EXPECT_FALSE(metadata.is_deleted());
@@ -839,7 +871,7 @@ TEST_F(SharedModelTypeProcessorTest, LocalCreateItem) {
worker()->AckOnePendingCommit();
EXPECT_EQ(1U, db().MetadataCount());
- const sync_pb::EntityMetadata acked_metadata = db().GetMetadata(kTag1);
+ const sync_pb::EntityMetadata acked_metadata = db().GetMetadata(kKey1);
EXPECT_TRUE(acked_metadata.has_server_id());
EXPECT_EQ(1, acked_metadata.sequence_number());
EXPECT_EQ(1, acked_metadata.acked_sequence_number());
@@ -850,7 +882,7 @@ TEST_F(SharedModelTypeProcessorTest, LocalCreateItem) {
// propagated to the error handler.
TEST_F(SharedModelTypeProcessorTest, ErrorApplyingAck) {
InitializeToReadyState();
- WriteItem(kTag1, kValue1);
+ WriteItem(kName1, kValue1);
SetServiceError(syncer::SyncError::DATATYPE_ERROR);
error_handler()->ExpectError(syncer::SyncError::DATATYPE_ERROR);
worker()->AckOnePendingCommit();
@@ -861,9 +893,7 @@ TEST_F(SharedModelTypeProcessorTest, ErrorApplyingAck) {
TEST_F(SharedModelTypeProcessorTest, LocalUpdateItemWithOverrides) {
const std::string kId1 = "cid1";
const std::string kId2 = "cid2";
- const std::string kName1 = "name1";
- const std::string kName2 = "name2";
- const std::string kTag3Hash = GenerateTagHash(kTag3);
+ const std::string kName3Hash = TagHashFromName(kName3);
InitializeToReadyState();
EXPECT_EQ(0U, worker()->GetNumPendingCommits());
@@ -873,40 +903,42 @@ TEST_F(SharedModelTypeProcessorTest, LocalUpdateItemWithOverrides) {
entity_data->specifics.mutable_preference()->set_value(kValue1);
entity_data->non_unique_name = kName1;
- entity_data->client_tag_hash = kTag3Hash;
+ entity_data->client_tag_hash = kName3Hash;
entity_data->id = kId1;
- WriteItem(kTag1, std::move(entity_data));
+ WriteItem(kName1, std::move(entity_data));
EXPECT_EQ(1U, worker()->GetNumPendingCommits());
- ASSERT_FALSE(worker()->HasPendingCommitForTag(kTag3));
- ASSERT_TRUE(worker()->HasPendingCommitForTag(kTag1));
+ ASSERT_FALSE(worker()->HasPendingCommitForHash(kHash3));
+ ASSERT_TRUE(worker()->HasPendingCommitForHash(kHash1));
EXPECT_EQ(1U, db().MetadataCount());
const EntityData& out_entity1 =
- worker()->GetLatestPendingCommitForTag(kTag1).entity.value();
- const sync_pb::EntityMetadata metadata_v1 = db().GetMetadata(kTag1);
+ worker()->GetLatestPendingCommitForHash(kHash1).entity.value();
+ const sync_pb::EntityMetadata metadata_v1 = db().GetMetadata(kKey1);
EXPECT_EQ(kId1, out_entity1.id);
- EXPECT_NE(kTag3Hash, out_entity1.client_tag_hash);
+ EXPECT_NE(kName3Hash, out_entity1.client_tag_hash);
EXPECT_EQ(kValue1, out_entity1.specifics.preference().value());
EXPECT_EQ(kId1, metadata_v1.server_id());
EXPECT_EQ(metadata_v1.client_tag_hash(), out_entity1.client_tag_hash);
entity_data.reset(new EntityData());
+ // This is a sketchy move here, changing the name will change the generated
+ // storage key and client tag values.
entity_data->specifics.mutable_preference()->set_name(kName2);
entity_data->specifics.mutable_preference()->set_value(kValue2);
entity_data->non_unique_name = kName2;
- entity_data->client_tag_hash = kTag3Hash;
+ entity_data->client_tag_hash = kName3Hash;
// Make sure ID isn't overwritten either.
entity_data->id = kId2;
- WriteItem(kTag1, std::move(entity_data));
+ WriteItem(kName1, std::move(entity_data));
EXPECT_EQ(2U, worker()->GetNumPendingCommits());
- ASSERT_FALSE(worker()->HasPendingCommitForTag(kTag3));
- ASSERT_TRUE(worker()->HasPendingCommitForTag(kTag1));
+ ASSERT_FALSE(worker()->HasPendingCommitForHash(kHash3));
+ ASSERT_TRUE(worker()->HasPendingCommitForHash(kHash1));
EXPECT_EQ(1U, db().MetadataCount());
const EntityData& out_entity2 =
- worker()->GetLatestPendingCommitForTag(kTag1).entity.value();
- const sync_pb::EntityMetadata metadata_v2 = db().GetMetadata(kTag1);
+ worker()->GetLatestPendingCommitForHash(kHash1).entity.value();
+ const sync_pb::EntityMetadata metadata_v2 = db().GetMetadata(kKey1);
EXPECT_EQ(kValue2, out_entity2.specifics.preference().value());
// Should still see old cid1 value, override is not respected on update.
@@ -923,23 +955,23 @@ TEST_F(SharedModelTypeProcessorTest, LocalUpdateItemWithOverrides) {
TEST_F(SharedModelTypeProcessorTest, LocalUpdateItem) {
InitializeToReadyState();
- WriteItem(kTag1, kValue1);
+ WriteItem(kName1, kValue1);
EXPECT_EQ(1U, db().MetadataCount());
- worker()->ExpectPendingCommits({kTag1});
+ worker()->ExpectPendingCommits({kHash1});
const CommitRequestData& request_data_v1 =
- worker()->GetLatestPendingCommitForTag(kTag1);
+ worker()->GetLatestPendingCommitForHash(kHash1);
const EntityData& data_v1 = request_data_v1.entity.value();
- const sync_pb::EntityMetadata metadata_v1 = db().GetMetadata(kTag1);
+ const sync_pb::EntityMetadata metadata_v1 = db().GetMetadata(kKey1);
- WriteItem(kTag1, kValue2);
+ WriteItem(kName1, kValue2);
EXPECT_EQ(1U, db().MetadataCount());
- worker()->ExpectPendingCommits({kTag1, kTag1});
+ worker()->ExpectPendingCommits({kHash1, kHash1});
const CommitRequestData& request_data_v2 =
- worker()->GetLatestPendingCommitForTag(kTag1);
+ worker()->GetLatestPendingCommitForHash(kHash1);
const EntityData& data_v2 = request_data_v2.entity.value();
- const sync_pb::EntityMetadata metadata_v2 = db().GetMetadata(kTag1);
+ const sync_pb::EntityMetadata metadata_v2 = db().GetMetadata(kKey1);
// Test some of the relations between old and new commit requests.
EXPECT_GT(request_data_v2.sequence_number, request_data_v1.sequence_number);
@@ -950,9 +982,9 @@ TEST_F(SharedModelTypeProcessorTest, LocalUpdateItem) {
EXPECT_TRUE(data_v2.id.empty());
EXPECT_FALSE(data_v2.creation_time.is_null());
EXPECT_FALSE(data_v2.modification_time.is_null());
- EXPECT_EQ(kTag1, data_v2.non_unique_name);
+ EXPECT_EQ(kName1, data_v2.non_unique_name);
EXPECT_FALSE(data_v2.is_deleted());
- EXPECT_EQ(kTag1, data_v2.specifics.preference().name());
+ EXPECT_EQ(kName1, data_v2.specifics.preference().name());
EXPECT_EQ(kValue2, data_v2.specifics.preference().value());
EXPECT_FALSE(metadata_v1.has_server_id());
@@ -975,33 +1007,33 @@ TEST_F(SharedModelTypeProcessorTest, LocalUpdateItem) {
// commit request.
TEST_F(SharedModelTypeProcessorTest, LocalUpdateItemRedundant) {
InitializeToReadyState();
- WriteItem(kTag1, kValue1);
+ WriteItem(kName1, kValue1);
EXPECT_EQ(1U, db().MetadataCount());
- worker()->ExpectPendingCommits({kTag1});
+ worker()->ExpectPendingCommits({kHash1});
- WriteItem(kTag1, kValue1);
- worker()->ExpectPendingCommits({kTag1});
+ WriteItem(kName1, kValue1);
+ worker()->ExpectPendingCommits({kHash1});
}
// Thoroughly tests the data generated by a server item creation.
TEST_F(SharedModelTypeProcessorTest, ServerCreateItem) {
InitializeToReadyState();
- worker()->UpdateFromServer(kTag1, kValue1);
+ worker()->UpdateFromServer(kHash1, GenerateSpecifics(kName1, kValue1));
EXPECT_EQ(1U, db().DataCount());
EXPECT_EQ(1U, db().MetadataCount());
EXPECT_EQ(1U, ProcessorEntityCount());
EXPECT_EQ(0U, worker()->GetNumPendingCommits());
- const EntityData& data = db().GetData(kTag1);
+ const EntityData& data = db().GetData(kKey1);
EXPECT_FALSE(data.id.empty());
- EXPECT_EQ(kTag1, data.specifics.preference().name());
+ EXPECT_EQ(kName1, data.specifics.preference().name());
EXPECT_EQ(kValue1, data.specifics.preference().value());
EXPECT_FALSE(data.creation_time.is_null());
EXPECT_FALSE(data.modification_time.is_null());
- EXPECT_EQ(kTag1, data.non_unique_name);
+ EXPECT_EQ(kName1, data.non_unique_name);
EXPECT_FALSE(data.is_deleted());
- const sync_pb::EntityMetadata metadata = db().GetMetadata(kTag1);
+ const sync_pb::EntityMetadata metadata = db().GetMetadata(kKey1);
EXPECT_TRUE(metadata.has_client_tag_hash());
EXPECT_TRUE(metadata.has_server_id());
EXPECT_FALSE(metadata.is_deleted());
@@ -1019,7 +1051,7 @@ TEST_F(SharedModelTypeProcessorTest, ErrorApplyingUpdate) {
InitializeToReadyState();
SetServiceError(syncer::SyncError::DATATYPE_ERROR);
error_handler()->ExpectError(syncer::SyncError::DATATYPE_ERROR);
- worker()->UpdateFromServer(kTag1, kValue1);
+ worker()->UpdateFromServer(kHash1, GenerateSpecifics(kName1, kValue1));
}
// Thoroughly tests the data generated by a server item creation.
@@ -1027,17 +1059,18 @@ TEST_F(SharedModelTypeProcessorTest, ServerUpdateItem) {
InitializeToReadyState();
// Local add writes data and metadata; ack writes metadata again.
- WriteItemAndAck(kTag1, kValue1);
+ WriteItemAndAck(kName1, kValue1);
EXPECT_EQ(1U, db().DataChangeCount());
EXPECT_EQ(2U, db().MetadataChangeCount());
// Redundant update from server doesn't write data but updates metadata.
- worker()->UpdateFromServer(kTag1, kValue1);
+ worker()->UpdateFromServer(kHash1, GenerateSpecifics(kName1, kValue1));
EXPECT_EQ(1U, db().DataChangeCount());
EXPECT_EQ(3U, db().MetadataChangeCount());
// A reflection (update already received) is ignored completely.
- worker()->UpdateFromServer(kTag1, kValue1, 0 /* version_offset */);
+ worker()->UpdateFromServer(kHash1, GenerateSpecifics(kName1, kValue1),
+ 0 /* version_offset */);
EXPECT_EQ(1U, db().DataChangeCount());
EXPECT_EQ(3U, db().MetadataChangeCount());
}
@@ -1045,23 +1078,23 @@ TEST_F(SharedModelTypeProcessorTest, ServerUpdateItem) {
// Tests locally deleting an acknowledged item.
TEST_F(SharedModelTypeProcessorTest, LocalDeleteItem) {
InitializeToReadyState();
- WriteItemAndAck(kTag1, kValue1);
+ WriteItemAndAck(kName1, kValue1);
EXPECT_EQ(0U, worker()->GetNumPendingCommits());
- const sync_pb::EntityMetadata metadata_v1 = db().GetMetadata(kTag1);
+ const sync_pb::EntityMetadata metadata_v1 = db().GetMetadata(kKey1);
EXPECT_FALSE(metadata_v1.is_deleted());
EXPECT_EQ(1, metadata_v1.sequence_number());
EXPECT_EQ(1, metadata_v1.acked_sequence_number());
EXPECT_EQ(1, metadata_v1.server_version());
- DeleteItem(kTag1);
+ DeleteItem(kName1);
EXPECT_EQ(0U, db().DataCount());
// Metadata is not removed until the commit response comes back.
EXPECT_EQ(1U, db().MetadataCount());
EXPECT_EQ(1U, ProcessorEntityCount());
- worker()->ExpectPendingCommits({kTag1});
+ worker()->ExpectPendingCommits({kHash1});
- const sync_pb::EntityMetadata metadata_v2 = db().GetMetadata(kTag1);
+ const sync_pb::EntityMetadata metadata_v2 = db().GetMetadata(kKey1);
EXPECT_TRUE(metadata_v2.is_deleted());
EXPECT_EQ(2, metadata_v2.sequence_number());
EXPECT_EQ(1, metadata_v2.acked_sequence_number());
@@ -1077,31 +1110,31 @@ TEST_F(SharedModelTypeProcessorTest, LocalDeleteItem) {
// response, then getting the commit responses.
TEST_F(SharedModelTypeProcessorTest, LocalDeleteItemInterleaved) {
InitializeToReadyState();
- WriteItem(kTag1, kValue1);
- worker()->ExpectPendingCommits({kTag1});
+ WriteItem(kName1, kValue1);
+ worker()->ExpectPendingCommits({kHash1});
const CommitRequestData& data_v1 =
- worker()->GetLatestPendingCommitForTag(kTag1);
+ worker()->GetLatestPendingCommitForHash(kHash1);
- const sync_pb::EntityMetadata metadata_v1 = db().GetMetadata(kTag1);
+ const sync_pb::EntityMetadata metadata_v1 = db().GetMetadata(kKey1);
EXPECT_FALSE(metadata_v1.is_deleted());
EXPECT_EQ(1, metadata_v1.sequence_number());
EXPECT_EQ(0, metadata_v1.acked_sequence_number());
EXPECT_EQ(kUncommittedVersion, metadata_v1.server_version());
- DeleteItem(kTag1);
+ DeleteItem(kName1);
EXPECT_EQ(0U, db().DataCount());
EXPECT_EQ(1U, db().MetadataCount());
EXPECT_EQ(1U, ProcessorEntityCount());
- worker()->ExpectPendingCommits({kTag1, kTag1});
+ worker()->ExpectPendingCommits({kHash1, kHash1});
const CommitRequestData& data_v2 =
- worker()->GetLatestPendingCommitForTag(kTag1);
+ worker()->GetLatestPendingCommitForHash(kHash1);
EXPECT_GT(data_v2.sequence_number, data_v1.sequence_number);
EXPECT_TRUE(data_v2.entity->id.empty());
EXPECT_EQ(kUncommittedVersion, data_v2.base_version);
EXPECT_TRUE(data_v2.entity->is_deleted());
- const sync_pb::EntityMetadata metadata_v2 = db().GetMetadata(kTag1);
+ const sync_pb::EntityMetadata metadata_v2 = db().GetMetadata(kKey1);
EXPECT_TRUE(metadata_v2.is_deleted());
EXPECT_EQ(2, metadata_v2.sequence_number());
EXPECT_EQ(0, metadata_v2.acked_sequence_number());
@@ -1113,7 +1146,7 @@ TEST_F(SharedModelTypeProcessorTest, LocalDeleteItemInterleaved) {
EXPECT_EQ(1U, db().MetadataCount());
EXPECT_EQ(1U, ProcessorEntityCount());
- const sync_pb::EntityMetadata metadata_v3 = db().GetMetadata(kTag1);
+ const sync_pb::EntityMetadata metadata_v3 = db().GetMetadata(kKey1);
EXPECT_TRUE(metadata_v3.is_deleted());
EXPECT_EQ(2, metadata_v3.sequence_number());
EXPECT_EQ(1, metadata_v3.acked_sequence_number());
@@ -1127,13 +1160,13 @@ TEST_F(SharedModelTypeProcessorTest, LocalDeleteItemInterleaved) {
TEST_F(SharedModelTypeProcessorTest, ServerDeleteItem) {
InitializeToReadyState();
- WriteItemAndAck(kTag1, kValue1);
+ WriteItemAndAck(kName1, kValue1);
EXPECT_EQ(1U, ProcessorEntityCount());
EXPECT_EQ(1U, db().MetadataCount());
EXPECT_EQ(1U, db().DataCount());
EXPECT_EQ(0U, worker()->GetNumPendingCommits());
- worker()->TombstoneFromServer(kTag1);
+ worker()->TombstoneFromServer(kHash1);
// Delete from server should clear the data and all the metadata.
EXPECT_EQ(0U, db().DataCount());
EXPECT_EQ(0U, db().MetadataCount());
@@ -1145,7 +1178,7 @@ TEST_F(SharedModelTypeProcessorTest, ServerDeleteItem) {
// Should have no effect and not crash.
TEST_F(SharedModelTypeProcessorTest, LocalDeleteUnknown) {
InitializeToReadyState();
- DeleteItem(kTag1);
+ DeleteItem(kName1);
EXPECT_EQ(0U, db().DataCount());
EXPECT_EQ(0U, db().MetadataCount());
EXPECT_EQ(0U, ProcessorEntityCount());
@@ -1156,7 +1189,7 @@ TEST_F(SharedModelTypeProcessorTest, LocalDeleteUnknown) {
// Should have no effect and not crash.
TEST_F(SharedModelTypeProcessorTest, ServerDeleteUnknown) {
InitializeToReadyState();
- worker()->TombstoneFromServer(kTag1);
+ worker()->TombstoneFromServer(kHash1);
EXPECT_EQ(0U, db().DataCount());
EXPECT_EQ(0U, db().MetadataCount());
EXPECT_EQ(0U, ProcessorEntityCount());
@@ -1169,21 +1202,21 @@ TEST_F(SharedModelTypeProcessorTest, TwoIndependentItems) {
InitializeToReadyState();
EXPECT_EQ(0U, worker()->GetNumPendingCommits());
- WriteItem(kTag1, kValue1);
+ WriteItem(kName1, kValue1);
EXPECT_EQ(1U, db().DataCount());
EXPECT_EQ(1U, db().MetadataCount());
- const sync_pb::EntityMetadata metadata1 = db().GetMetadata(kTag1);
+ const sync_pb::EntityMetadata metadata1 = db().GetMetadata(kKey1);
// There should be one commit request for this item only.
- worker()->ExpectPendingCommits({kTag1});
+ worker()->ExpectPendingCommits({kHash1});
- WriteItem(kTag2, kValue2);
+ WriteItem(kName2, kValue2);
EXPECT_EQ(2U, db().DataCount());
EXPECT_EQ(2U, db().MetadataCount());
- const sync_pb::EntityMetadata metadata2 = db().GetMetadata(kTag2);
+ const sync_pb::EntityMetadata metadata2 = db().GetMetadata(kKey2);
// The second write should trigger another single-item commit request.
- worker()->ExpectPendingCommits({kTag1, kTag2});
+ worker()->ExpectPendingCommits({kHash1, kHash2});
EXPECT_FALSE(metadata1.is_deleted());
EXPECT_EQ(1, metadata1.sequence_number());
@@ -1198,65 +1231,66 @@ TEST_F(SharedModelTypeProcessorTest, TwoIndependentItems) {
TEST_F(SharedModelTypeProcessorTest, ConflictResolutionChangesMatch) {
InitializeToReadyState();
- WriteItem(kTag1, kValue1);
+ sync_pb::EntitySpecifics specifics(WriteItem(kName1, kValue1));
EXPECT_EQ(1U, db().DataChangeCount());
- EXPECT_EQ(kValue1, db().GetValue(kTag1));
+ EXPECT_EQ(kValue1, db().GetValue(kKey1));
EXPECT_EQ(1U, db().MetadataChangeCount());
- EXPECT_EQ(kUncommittedVersion, db().GetMetadata(kTag1).server_version());
- worker()->ExpectPendingCommits({kTag1});
- worker()->ExpectNthPendingCommit(0, kTag1, kValue1);
+ EXPECT_EQ(kUncommittedVersion, db().GetMetadata(kKey1).server_version());
+ worker()->ExpectPendingCommits({kHash1});
+ worker()->ExpectNthPendingCommit(0, kHash1, specifics);
// Changes match doesn't call ResolveConflict.
- worker()->UpdateFromServer(kTag1, kValue1);
+ worker()->UpdateFromServer(kHash1, specifics);
// Updated metadata but not data; no new commit request.
EXPECT_EQ(1U, db().DataChangeCount());
- EXPECT_EQ(1, db().GetMetadata(kTag1).server_version());
- worker()->ExpectPendingCommits({kTag1});
+ EXPECT_EQ(1, db().GetMetadata(kKey1).server_version());
+ worker()->ExpectPendingCommits({kHash1});
}
TEST_F(SharedModelTypeProcessorTest, ConflictResolutionUseLocal) {
InitializeToReadyState();
- WriteItem(kTag1, kValue1);
+ sync_pb::EntitySpecifics specifics(WriteItem(kName1, kValue1));
SetConflictResolution(ConflictResolution::UseLocal());
- worker()->UpdateFromServer(kTag1, kValue2);
+ worker()->UpdateFromServer(kHash1, GenerateSpecifics(kName1, kValue2));
// Updated metadata but not data; new commit request.
EXPECT_EQ(1U, db().DataChangeCount());
EXPECT_EQ(2U, db().MetadataChangeCount());
- EXPECT_EQ(1, db().GetMetadata(kTag1).server_version());
- worker()->ExpectPendingCommits({kTag1, kTag1});
- worker()->ExpectNthPendingCommit(1, kTag1, kValue1);
+ EXPECT_EQ(1, db().GetMetadata(kKey1).server_version());
+ worker()->ExpectPendingCommits({kHash1, kHash1});
+ worker()->ExpectNthPendingCommit(1, kHash1, specifics);
}
TEST_F(SharedModelTypeProcessorTest, ConflictResolutionUseRemote) {
InitializeToReadyState();
- WriteItem(kTag1, kValue1);
+ WriteItem(kName1, kValue1);
SetConflictResolution(ConflictResolution::UseRemote());
- worker()->UpdateFromServer(kTag1, kValue2);
+ worker()->UpdateFromServer(kHash1, GenerateSpecifics(kName1, kValue2));
// Updated client data and metadata; no new commit request.
EXPECT_EQ(2U, db().DataChangeCount());
- EXPECT_EQ(kValue2, db().GetValue(kTag1));
+ EXPECT_EQ(kValue2, db().GetValue(kKey1));
EXPECT_EQ(2U, db().MetadataChangeCount());
- EXPECT_EQ(1, db().GetMetadata(kTag1).server_version());
- worker()->ExpectPendingCommits({kTag1});
+ EXPECT_EQ(1, db().GetMetadata(kKey1).server_version());
+ worker()->ExpectPendingCommits({kHash1});
}
TEST_F(SharedModelTypeProcessorTest, ConflictResolutionUseNew) {
InitializeToReadyState();
- WriteItem(kTag1, kValue1);
+ WriteItem(kName1, kValue1);
SetConflictResolution(
- ConflictResolution::UseNew(GenerateEntityData(kTag1, kValue3)));
+ ConflictResolution::UseNew(GenerateEntityData(kName1, kValue3)));
- worker()->UpdateFromServer(kTag1, kValue2);
+ worker()->UpdateFromServer(kHash1, GenerateSpecifics(kName1, kValue2));
EXPECT_EQ(2U, db().DataChangeCount());
- EXPECT_EQ(kValue3, db().GetValue(kTag1));
+ EXPECT_EQ(kValue3, db().GetValue(kKey1));
EXPECT_EQ(2U, db().MetadataChangeCount());
- EXPECT_EQ(1, db().GetMetadata(kTag1).server_version());
- worker()->ExpectPendingCommits({kTag1, kTag1});
- worker()->ExpectNthPendingCommit(1, kTag1, kValue3);
+ EXPECT_EQ(1, db().GetMetadata(kKey1).server_version());
+ worker()->ExpectPendingCommits({kHash1, kHash1});
+ worker()->ExpectNthPendingCommit(1, kHash1,
+ GenerateSpecifics(kName1, kValue3));
}
// Test proper handling of disconnect and reconnect.
@@ -1267,16 +1301,16 @@ TEST_F(SharedModelTypeProcessorTest, Disconnect) {
InitializeToReadyState();
// The first item is fully committed.
- WriteItemAndAck(kTag1, kValue1);
+ WriteItemAndAck(kName1, kValue1);
// The second item has a commit request in progress.
- WriteItem(kTag2, kValue2);
- EXPECT_TRUE(worker()->HasPendingCommitForTag(kTag2));
+ WriteItem(kName2, kValue2);
+ EXPECT_TRUE(worker()->HasPendingCommitForHash(kHash2));
DisconnectSync();
// The third item is added after stopping.
- WriteItem(kTag3, kValue3);
+ WriteItem(kName3, kValue3);
// Reconnect.
OnSyncStarting();
@@ -1285,13 +1319,13 @@ TEST_F(SharedModelTypeProcessorTest, Disconnect) {
EXPECT_EQ(2U, worker()->GetNthPendingCommit(0).size());
// The first item was already in sync.
- EXPECT_FALSE(worker()->HasPendingCommitForTag(kTag1));
+ EXPECT_FALSE(worker()->HasPendingCommitForHash(kHash1));
// The second item's commit was interrupted and should be retried.
- EXPECT_TRUE(worker()->HasPendingCommitForTag(kTag2));
+ EXPECT_TRUE(worker()->HasPendingCommitForHash(kHash2));
// The third item's commit was not started until the reconnect.
- EXPECT_TRUE(worker()->HasPendingCommitForTag(kTag3));
+ EXPECT_TRUE(worker()->HasPendingCommitForHash(kHash3));
}
// Test proper handling of disable and re-enable.
@@ -1302,16 +1336,16 @@ TEST_F(SharedModelTypeProcessorTest, Disable) {
InitializeToReadyState();
// The first item is fully committed.
- WriteItemAndAck(kTag1, kValue1);
+ WriteItemAndAck(kName1, kValue1);
// The second item has a commit request in progress.
- WriteItem(kTag2, kValue2);
- EXPECT_TRUE(worker()->HasPendingCommitForTag(kTag2));
+ WriteItem(kName2, kValue2);
+ EXPECT_TRUE(worker()->HasPendingCommitForHash(kHash2));
DisableSync();
// The third item is added after disable.
- WriteItem(kTag3, kValue3);
+ WriteItem(kName3, kValue3);
// Now we re-enable.
CreateChangeProcessor();
@@ -1321,7 +1355,7 @@ TEST_F(SharedModelTypeProcessorTest, Disable) {
// Once we're ready to commit, all three local items should consider
// themselves uncommitted and pending for commit.
- worker()->ExpectPendingCommits({kTag1, kTag2, kTag3});
+ worker()->ExpectPendingCommits({kHash1, kHash2, kHash3});
}
// Test re-encrypt everything when desired encryption key changes.
@@ -1329,33 +1363,33 @@ TEST_F(SharedModelTypeProcessorTest, ReEncryptCommitsWithNewKey) {
InitializeToReadyState();
// Commit an item.
- WriteItemAndAck(kTag1, kValue1);
+ sync_pb::EntitySpecifics specifics1(WriteItemAndAck(kName1, kValue1));
// Create another item and don't wait for its commit response.
- WriteItem(kTag2, kValue2);
- worker()->ExpectPendingCommits({kTag2});
- EXPECT_EQ(1U, db().GetMetadata(kTag1).sequence_number());
- EXPECT_EQ(1U, db().GetMetadata(kTag2).sequence_number());
+ sync_pb::EntitySpecifics specifics2(WriteItem(kName2, kValue2));
+ worker()->ExpectPendingCommits({kHash2});
+ EXPECT_EQ(1U, db().GetMetadata(kKey1).sequence_number());
+ EXPECT_EQ(1U, db().GetMetadata(kKey2).sequence_number());
// Receive notice that the account's desired encryption key has changed.
worker()->UpdateWithEncryptionKey("k1");
// Tag 2 is recommitted immediately because the data was in memory.
ASSERT_EQ(2U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(1, kTag2, kValue2);
+ worker()->ExpectNthPendingCommit(1, kHash2, specifics2);
// Sequence numbers in the store are updated.
- EXPECT_EQ(2U, db().GetMetadata(kTag1).sequence_number());
- EXPECT_EQ(2U, db().GetMetadata(kTag2).sequence_number());
+ EXPECT_EQ(2U, db().GetMetadata(kKey1).sequence_number());
+ EXPECT_EQ(2U, db().GetMetadata(kKey2).sequence_number());
// Tag 1 needs to go to the store to load its data before recommitting.
OnPendingCommitDataLoaded();
ASSERT_EQ(3U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(2, kTag1, kValue1);
+ worker()->ExpectNthPendingCommit(2, kHash1, specifics1);
}
// Test that an error loading pending commit data for re-encryption is
// propagated to the error handler.
TEST_F(SharedModelTypeProcessorTest, ReEncryptErrorLoadingData) {
InitializeToReadyState();
- WriteItemAndAck(kTag1, kValue1);
+ WriteItemAndAck(kName1, kValue1);
SetServiceError(syncer::SyncError::DATATYPE_ERROR);
worker()->UpdateWithEncryptionKey("k1");
error_handler()->ExpectError(syncer::SyncError::DATATYPE_ERROR);
@@ -1367,99 +1401,106 @@ TEST_F(SharedModelTypeProcessorTest, ReEncryptUpdatesWithNewKey) {
InitializeToReadyState();
// Receive an unencrypted update.
- worker()->UpdateFromServer(kTag1, kValue1);
+ worker()->UpdateFromServer(kHash1, GenerateSpecifics(kName1, kValue1));
ASSERT_EQ(0U, worker()->GetNumPendingCommits());
UpdateResponseDataList update;
// Receive an entity with old encryption as part of the update.
- update.push_back(worker()->GenerateUpdateData(kTag2, kValue2, 1, "k1"));
+ update.push_back(worker()->GenerateUpdateData(
+ kHash2, GenerateSpecifics(kName2, kValue2), 1, "k1"));
// Receive an entity with up-to-date encryption as part of the update.
- update.push_back(worker()->GenerateUpdateData(kTag3, kValue3, 1, "k2"));
+ update.push_back(worker()->GenerateUpdateData(
+ kHash3, GenerateSpecifics(kName3, kValue3), 1, "k2"));
// Set desired encryption key to k2 to force updates to some items.
worker()->UpdateWithEncryptionKey("k2", update);
- // kTag2 needed to be re-encrypted and had data so it was queued immediately.
- worker()->ExpectPendingCommits({kTag2});
+ // kName2 needed to be re-encrypted and had data so it was queued immediately.
+ worker()->ExpectPendingCommits({kHash2});
OnPendingCommitDataLoaded();
- // kTag1 needed data so once that's loaded, it is also queued.
- worker()->ExpectPendingCommits({kTag2, kTag1});
+ // kName1 needed data so once that's loaded, it is also queued.
+ worker()->ExpectPendingCommits({kHash2, kHash1});
// Receive a separate update that was encrypted with key k1.
- worker()->UpdateFromServer("enc_k1", kValue1, 1, "k1");
+ worker()->UpdateFromServer(kHash4, GenerateSpecifics(kName4, kValue1), 1,
+ "k1");
// Receipt of updates encrypted with old key also forces a re-encrypt commit.
- worker()->ExpectPendingCommits({kTag2, kTag1, "enc_k1"});
+ worker()->ExpectPendingCommits({kHash2, kHash1, kHash4});
// Receive an update that was encrypted with key k2.
- worker()->UpdateFromServer("enc_k2", kValue1, 1, "k2");
+ worker()->UpdateFromServer(kHash5, GenerateSpecifics(kName5, kValue1), 1,
+ "k2");
// That was the correct key, so no re-encryption is required.
- worker()->ExpectPendingCommits({kTag2, kTag1, "enc_k1"});
+ worker()->ExpectPendingCommits({kHash2, kHash1, kHash4});
}
// Test that re-encrypting enqueues the right data for USE_LOCAL conflicts.
TEST_F(SharedModelTypeProcessorTest, ReEncryptConflictResolutionUseLocal) {
InitializeToReadyState();
worker()->UpdateWithEncryptionKey("k1");
- WriteItem(kTag1, kValue1);
- worker()->ExpectPendingCommits({kTag1});
+ sync_pb::EntitySpecifics specifics(WriteItem(kName1, kValue1));
+ worker()->ExpectPendingCommits({kHash1});
SetConflictResolution(ConflictResolution::UseLocal());
// Unencrypted update needs to be re-commited with key k1.
- worker()->UpdateFromServer(kTag1, kValue2, 1, "");
+ worker()->UpdateFromServer(kHash1, GenerateSpecifics(kName1, kValue2), 1, "");
// Ensure the re-commit has the correct value.
EXPECT_EQ(2U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(1, kTag1, kValue1);
- EXPECT_EQ(kValue1, db().GetValue(kTag1));
+ worker()->ExpectNthPendingCommit(1, kHash1, specifics);
+ EXPECT_EQ(kValue1, db().GetValue(kKey1));
}
// Test that re-encrypting enqueues the right data for USE_REMOTE conflicts.
TEST_F(SharedModelTypeProcessorTest, ReEncryptConflictResolutionUseRemote) {
InitializeToReadyState();
worker()->UpdateWithEncryptionKey("k1");
- WriteItem(kTag1, kValue1);
+ WriteItem(kName1, kValue1);
SetConflictResolution(ConflictResolution::UseRemote());
// Unencrypted update needs to be re-commited with key k1.
- worker()->UpdateFromServer(kTag1, kValue2, 1, "");
+ sync_pb::EntitySpecifics specifics(GenerateSpecifics(kName1, kValue2));
+ worker()->UpdateFromServer(kHash1, specifics, 1, "");
// Ensure the re-commit has the correct value.
EXPECT_EQ(2U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(1, kTag1, kValue2);
- EXPECT_EQ(kValue2, db().GetValue(kTag1));
+ worker()->ExpectNthPendingCommit(1, kHash1, specifics);
+ EXPECT_EQ(kValue2, db().GetValue(kKey1));
}
// Test that re-encrypting enqueues the right data for USE_NEW conflicts.
TEST_F(SharedModelTypeProcessorTest, ReEncryptConflictResolutionUseNew) {
InitializeToReadyState();
worker()->UpdateWithEncryptionKey("k1");
- WriteItem(kTag1, kValue1);
+ WriteItem(kName1, kValue1);
SetConflictResolution(
- ConflictResolution::UseNew(GenerateEntityData(kTag1, kValue3)));
+ ConflictResolution::UseNew(GenerateEntityData(kName1, kValue3)));
// Unencrypted update needs to be re-commited with key k1.
- worker()->UpdateFromServer(kTag1, kValue2, 1, "");
+ worker()->UpdateFromServer(kHash1, GenerateSpecifics(kName1, kValue2), 1, "");
// Ensure the re-commit has the correct value.
EXPECT_EQ(2U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(1, kTag1, kValue3);
- EXPECT_EQ(kValue3, db().GetValue(kTag1));
+ worker()->ExpectNthPendingCommit(1, kHash1,
+ GenerateSpecifics(kName1, kValue3));
+ EXPECT_EQ(kValue3, db().GetValue(kKey1));
}
TEST_F(SharedModelTypeProcessorTest, ReEncryptConflictWhileLoading) {
InitializeToReadyState();
// Create item and ack so its data is no longer cached.
- WriteItemAndAck(kTag1, kValue1);
+ WriteItemAndAck(kName1, kValue1);
// Update key so that it needs to fetch data to re-commit.
worker()->UpdateWithEncryptionKey("k1");
EXPECT_EQ(0U, worker()->GetNumPendingCommits());
// Unencrypted update needs to be re-commited with key k1.
- worker()->UpdateFromServer(kTag1, kValue2, 1, "");
+ sync_pb::EntitySpecifics specifics(GenerateSpecifics(kName1, kValue2));
+ worker()->UpdateFromServer(kHash1, specifics, 1, "");
// Ensure the re-commit has the correct value.
EXPECT_EQ(1U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(0, kTag1, kValue2);
- EXPECT_EQ(kValue2, db().GetValue(kTag1));
+ worker()->ExpectNthPendingCommit(0, kHash1, specifics);
+ EXPECT_EQ(kValue2, db().GetValue(kKey1));
// Data load completing shouldn't change anything.
OnPendingCommitDataLoaded();
@@ -1469,46 +1510,46 @@ TEST_F(SharedModelTypeProcessorTest, ReEncryptConflictWhileLoading) {
// Tests that a real remote change wins over a local encryption-only change.
TEST_F(SharedModelTypeProcessorTest, IgnoreLocalEncryption) {
InitializeToReadyState();
- WriteItemAndAck(kTag1, kValue1);
+ sync_pb::EntitySpecifics specifics(WriteItemAndAck(kName1, kValue1));
worker()->UpdateWithEncryptionKey("k1");
OnPendingCommitDataLoaded();
EXPECT_EQ(1U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(0, kTag1, kValue1);
+ worker()->ExpectNthPendingCommit(0, kHash1, specifics);
- worker()->UpdateFromServer(kTag1, kValue2);
+ worker()->UpdateFromServer(kHash1, GenerateSpecifics(kName1, kValue2));
EXPECT_EQ(1U, worker()->GetNumPendingCommits());
}
// Tests that a real local change wins over a remote encryption-only change.
TEST_F(SharedModelTypeProcessorTest, IgnoreRemoteEncryption) {
InitializeToReadyState();
- WriteItemAndAck(kTag1, kValue1);
+ sync_pb::EntitySpecifics specifics1(WriteItemAndAck(kName1, kValue1));
- WriteItem(kTag1, kValue2);
+ sync_pb::EntitySpecifics specifics2(WriteItem(kName1, kValue2));
UpdateResponseDataList update;
- update.push_back(worker()->GenerateUpdateData(kTag1, kValue1, 1, "k1"));
+ update.push_back(worker()->GenerateUpdateData(kHash1, specifics1, 1, "k1"));
worker()->UpdateWithEncryptionKey("k1", update);
EXPECT_EQ(2U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(1, kTag1, kValue2);
+ worker()->ExpectNthPendingCommit(1, kHash1, specifics2);
}
// Same as above but with two commit requests before one ack.
TEST_F(SharedModelTypeProcessorTest, IgnoreRemoteEncryptionInterleaved) {
InitializeToReadyState();
- WriteItem(kTag1, kValue1);
- WriteItem(kTag1, kValue2);
+ sync_pb::EntitySpecifics specifics1(WriteItem(kName1, kValue1));
+ sync_pb::EntitySpecifics specifics2(WriteItem(kName1, kValue2));
worker()->AckOnePendingCommit();
// kValue1 is now the base value.
EXPECT_EQ(1U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(0, kTag1, kValue2);
+ worker()->ExpectNthPendingCommit(0, kHash1, specifics2);
UpdateResponseDataList update;
- update.push_back(worker()->GenerateUpdateData(kTag1, kValue1, 1, "k1"));
+ update.push_back(worker()->GenerateUpdateData(kHash1, specifics1, 1, "k1"));
worker()->UpdateWithEncryptionKey("k1", update);
EXPECT_EQ(2U, worker()->GetNumPendingCommits());
- worker()->ExpectNthPendingCommit(1, kTag1, kValue2);
+ worker()->ExpectNthPendingCommit(1, kHash1, specifics2);
}
} // namespace syncer_v2

Powered by Google App Engine
This is Rietveld 408576698