| Index: sync/engine/process_commit_response_command_unittest.cc
|
| diff --git a/sync/engine/process_commit_response_command_unittest.cc b/sync/engine/process_commit_response_command_unittest.cc
|
| index 482036d7b1815e9fc6b866b2dc2a9457f6879481..4723398c30c62970c3389b5b3478c2bafbb4f1f8 100644
|
| --- a/sync/engine/process_commit_response_command_unittest.cc
|
| +++ b/sync/engine/process_commit_response_command_unittest.cc
|
| @@ -8,6 +8,7 @@
|
|
|
| #include "base/location.h"
|
| #include "base/stringprintf.h"
|
| +#include "sync/internal_api/public/test/test_entry_factory.h"
|
| #include "sync/protocol/bookmark_specifics.pb.h"
|
| #include "sync/protocol/sync.pb.h"
|
| #include "sync/sessions/sync_session.h"
|
| @@ -31,11 +32,13 @@ namespace syncer {
|
| using sessions::SyncSession;
|
| using syncable::BASE_VERSION;
|
| using syncable::Entry;
|
| +using syncable::ID;
|
| using syncable::IS_DIR;
|
| using syncable::IS_UNSYNCED;
|
| using syncable::Id;
|
| using syncable::MutableEntry;
|
| using syncable::NON_UNIQUE_NAME;
|
| +using syncable::UNIQUE_POSITION;
|
| using syncable::UNITTEST;
|
| using syncable::WriteTransaction;
|
|
|
| @@ -55,13 +58,14 @@ class ProcessCommitResponseCommandTest : public SyncerCommandTest {
|
| (*mutable_routing_info())[AUTOFILL] = GROUP_DB;
|
|
|
| SyncerCommandTest::SetUp();
|
| +
|
| + test_entry_factory_.reset(new TestEntryFactory(directory()));
|
| }
|
|
|
| protected:
|
|
|
| ProcessCommitResponseCommandTest()
|
| - : next_old_revision_(1),
|
| - next_new_revision_(4000),
|
| + : next_new_revision_(4000),
|
| next_server_position_(10000) {
|
| }
|
|
|
| @@ -75,67 +79,25 @@ class ProcessCommitResponseCommandTest : public SyncerCommandTest {
|
| << "Item should have a valid (positive) server base revision";
|
| }
|
|
|
| - // Create an unsynced item in the database. If item_id is a local ID, it
|
| - // will be treated as a create-new. Otherwise, if it's a server ID, we'll
|
| - // fake the server data so that it looks like it exists on the server.
|
| - // Returns the methandle of the created item in |metahandle_out| if not NULL.
|
| - void CreateUnsyncedItem(const Id& item_id,
|
| - const Id& parent_id,
|
| - const string& name,
|
| - bool is_folder,
|
| - ModelType model_type,
|
| - int64* metahandle_out) {
|
| - WriteTransaction trans(FROM_HERE, UNITTEST, directory());
|
| - Id predecessor_id;
|
| - ASSERT_TRUE(
|
| - directory()->GetLastChildIdForTest(&trans, parent_id, &predecessor_id));
|
| - MutableEntry entry(&trans, syncable::CREATE, parent_id, name);
|
| - ASSERT_TRUE(entry.good());
|
| - entry.Put(syncable::ID, item_id);
|
| - entry.Put(syncable::BASE_VERSION,
|
| - item_id.ServerKnows() ? next_old_revision_++ : 0);
|
| - entry.Put(syncable::IS_UNSYNCED, true);
|
| - entry.Put(syncable::IS_DIR, is_folder);
|
| - entry.Put(syncable::IS_DEL, false);
|
| - entry.Put(syncable::PARENT_ID, parent_id);
|
| - entry.PutPredecessor(predecessor_id);
|
| - sync_pb::EntitySpecifics default_specifics;
|
| - AddDefaultFieldValue(model_type, &default_specifics);
|
| - entry.Put(syncable::SPECIFICS, default_specifics);
|
| - if (item_id.ServerKnows()) {
|
| - entry.Put(syncable::SERVER_SPECIFICS, default_specifics);
|
| - entry.Put(syncable::SERVER_IS_DIR, is_folder);
|
| - entry.Put(syncable::SERVER_PARENT_ID, parent_id);
|
| - entry.Put(syncable::SERVER_IS_DEL, false);
|
| - }
|
| - if (metahandle_out)
|
| - *metahandle_out = entry.Get(syncable::META_HANDLE);
|
| - }
|
| -
|
| - // Create a new unsynced item in the database, and synthesize a commit
|
| - // record and a commit response for it in the syncer session. If item_id
|
| - // is a local ID, the item will be a create operation. Otherwise, it
|
| - // will be an edit.
|
| - void CreateUnprocessedCommitResult(
|
| + int CreateUnprocessedBookmarkCommitResult(
|
| const Id& item_id,
|
| const Id& parent_id,
|
| const string& name,
|
| - ModelType model_type,
|
| + bool is_folder,
|
| sessions::OrderedCommitSet *commit_set,
|
| sync_pb::ClientToServerMessage *commit,
|
| sync_pb::ClientToServerResponse *response) {
|
| - bool is_folder = true;
|
| - int64 metahandle = 0;
|
| - CreateUnsyncedItem(item_id, parent_id, name, is_folder, model_type,
|
| - &metahandle);
|
| + int64 metahandle;
|
| + test_entry_factory_->CreateUnsyncedBookmarkItem(
|
| + item_id, parent_id, name, is_folder, &metahandle);
|
|
|
| // ProcessCommitResponseCommand consumes commit_ids from the session
|
| // state, so we need to update that. O(n^2) because it's a test.
|
| - commit_set->AddCommitItem(metahandle, item_id, model_type);
|
| + commit_set->AddCommitItem(metahandle, item_id, BOOKMARKS);
|
|
|
| WriteTransaction trans(FROM_HERE, UNITTEST, directory());
|
| MutableEntry entry(&trans, syncable::GET_BY_ID, item_id);
|
| - ASSERT_TRUE(entry.good());
|
| + EXPECT_TRUE(entry.good());
|
| entry.Put(syncable::SYNCING, true);
|
|
|
| // Add to the commit message.
|
| @@ -173,6 +135,63 @@ class ProcessCommitResponseCommandTest : public SyncerCommandTest {
|
| response->commit().entryresponse(i).id_string());
|
| }
|
| }
|
| +
|
| + return metahandle;
|
| + }
|
| +
|
| + // Create a new unsynced item in the database, and synthesize a commit
|
| + // record and a commit response for it in the syncer session. If item_id
|
| + // is a local ID, the item will be a create operation. Otherwise, it
|
| + // will be an edit.
|
| + int CreateUnprocessedCommitResult(
|
| + const Id& item_id,
|
| + const Id& parent_id,
|
| + const string& name,
|
| + ModelType model_type,
|
| + sessions::OrderedCommitSet *commit_set,
|
| + sync_pb::ClientToServerMessage *commit,
|
| + sync_pb::ClientToServerResponse *response) {
|
| + int64 metahandle = 0;
|
| + test_entry_factory_->CreateUnsyncedItem(item_id, parent_id, name,
|
| + model_type, &metahandle);
|
| +
|
| + // ProcessCommitResponseCommand consumes commit_ids from the session
|
| + // state, so we need to update that. O(n^2) because it's a test.
|
| + commit_set->AddCommitItem(metahandle, item_id, model_type);
|
| +
|
| + WriteTransaction trans(FROM_HERE, UNITTEST, directory());
|
| + MutableEntry entry(&trans, syncable::GET_BY_ID, item_id);
|
| + EXPECT_TRUE(entry.good());
|
| + entry.Put(syncable::SYNCING, true);
|
| +
|
| + // Add to the commit message.
|
| + commit->set_message_contents(ClientToServerMessage::COMMIT);
|
| + sync_pb::SyncEntity* entity = commit->mutable_commit()->add_entries();
|
| + entity->set_non_unique_name(name);
|
| + entity->set_folder(false);
|
| + entity->set_parent_id_string(SyncableIdToProto(parent_id));
|
| + entity->set_version(entry.Get(syncable::BASE_VERSION));
|
| + entity->mutable_specifics()->CopyFrom(entry.Get(syncable::SPECIFICS));
|
| + entity->set_id_string(SyncableIdToProto(item_id));
|
| +
|
| + // Should be a hash, but this is good enough for our purposes.
|
| + entity->set_client_defined_unique_tag(name);
|
| +
|
| + // Add to the response message.
|
| + response->set_error_code(sync_pb::SyncEnums::SUCCESS);
|
| + sync_pb::CommitResponse_EntryResponse* entry_response =
|
| + response->mutable_commit()->add_entryresponse();
|
| + entry_response->set_response_type(CommitResponse::SUCCESS);
|
| + entry_response->set_name("Garbage.");
|
| + entry_response->set_non_unique_name(entity->name());
|
| + if (item_id.ServerKnows())
|
| + entry_response->set_id_string(entity->id_string());
|
| + else
|
| + entry_response->set_id_string(id_factory_.NewServerId().GetServerId());
|
| + entry_response->set_version(next_new_revision_++);
|
| + entry_response->set_position_in_parent(next_server_position_++);
|
| +
|
| + return metahandle;
|
| }
|
|
|
| void SetLastErrorCode(sync_pb::CommitResponse::ResponseType error_code,
|
| @@ -184,8 +203,8 @@ class ProcessCommitResponseCommandTest : public SyncerCommandTest {
|
| }
|
|
|
| TestIdFactory id_factory_;
|
| + scoped_ptr<TestEntryFactory> test_entry_factory_;
|
| private:
|
| - int64 next_old_revision_;
|
| int64 next_new_revision_;
|
| int64 next_server_position_;
|
| DISALLOW_COPY_AND_ASSIGN(ProcessCommitResponseCommandTest);
|
| @@ -197,72 +216,64 @@ TEST_F(ProcessCommitResponseCommandTest, MultipleCommitIdProjections) {
|
| sync_pb::ClientToServerResponse response;
|
|
|
| Id bookmark_folder_id = id_factory_.NewLocalId();
|
| - Id bookmark_id1 = id_factory_.NewLocalId();
|
| - Id bookmark_id2 = id_factory_.NewLocalId();
|
| - Id pref_id1 = id_factory_.NewLocalId(), pref_id2 = id_factory_.NewLocalId();
|
| - Id autofill_id1 = id_factory_.NewLocalId();
|
| - Id autofill_id2 = id_factory_.NewLocalId();
|
| - CreateUnprocessedCommitResult(bookmark_folder_id, id_factory_.root(),
|
| - "A bookmark folder", BOOKMARKS,
|
| - &commit_set, &request, &response);
|
| - CreateUnprocessedCommitResult(bookmark_id1, bookmark_folder_id,
|
| - "bookmark 1", BOOKMARKS,
|
| - &commit_set, &request, &response);
|
| - CreateUnprocessedCommitResult(bookmark_id2, bookmark_folder_id,
|
| - "bookmark 2", BOOKMARKS,
|
| - &commit_set, &request, &response);
|
| - CreateUnprocessedCommitResult(pref_id1, id_factory_.root(),
|
| - "Pref 1", PREFERENCES,
|
| - &commit_set, &request, &response);
|
| - CreateUnprocessedCommitResult(pref_id2, id_factory_.root(),
|
| - "Pref 2", PREFERENCES,
|
| - &commit_set, &request, &response);
|
| - CreateUnprocessedCommitResult(autofill_id1, id_factory_.root(),
|
| - "Autofill 1", AUTOFILL,
|
| - &commit_set, &request, &response);
|
| - CreateUnprocessedCommitResult(autofill_id2, id_factory_.root(),
|
| - "Autofill 2", AUTOFILL,
|
| - &commit_set, &request, &response);
|
| + int bookmark_folder_handle = CreateUnprocessedBookmarkCommitResult(
|
| + bookmark_folder_id, id_factory_.root(), "A bookmark folder", true,
|
| + &commit_set, &request, &response);
|
| + int bookmark1_handle = CreateUnprocessedBookmarkCommitResult(
|
| + id_factory_.NewLocalId(), bookmark_folder_id, "bookmark 1", false,
|
| + &commit_set, &request, &response);
|
| + int bookmark2_handle = CreateUnprocessedBookmarkCommitResult(
|
| + id_factory_.NewLocalId(), bookmark_folder_id, "bookmark 2", false,
|
| + &commit_set, &request, &response);
|
| + int pref1_handle = CreateUnprocessedCommitResult(
|
| + id_factory_.NewLocalId(), id_factory_.root(), "Pref 1", PREFERENCES,
|
| + &commit_set, &request, &response);
|
| + int pref2_handle = CreateUnprocessedCommitResult(
|
| + id_factory_.NewLocalId(), id_factory_.root(), "Pref 2", PREFERENCES,
|
| + &commit_set, &request, &response);
|
| + int autofill1_handle = CreateUnprocessedCommitResult(
|
| + id_factory_.NewLocalId(), id_factory_.root(), "Autofill 1", AUTOFILL,
|
| + &commit_set, &request, &response);
|
| + int autofill2_handle = CreateUnprocessedCommitResult(
|
| + id_factory_.NewLocalId(), id_factory_.root(), "Autofill 2", AUTOFILL,
|
| + &commit_set, &request, &response);
|
|
|
| ProcessCommitResponseCommand command(commit_set, request, response);
|
| ExpectGroupsToChange(command, GROUP_UI, GROUP_DB);
|
| command.ExecuteImpl(session());
|
|
|
| syncable::ReadTransaction trans(FROM_HERE, directory());
|
| - Id new_fid;
|
| - ASSERT_TRUE(directory()->GetFirstChildId(
|
| - &trans, id_factory_.root(), &new_fid));
|
| +
|
| + Entry b_folder(&trans, syncable::GET_BY_HANDLE, bookmark_folder_handle);
|
| + ASSERT_TRUE(b_folder.good());
|
| +
|
| + Id new_fid = b_folder.Get(syncable::ID);
|
| ASSERT_FALSE(new_fid.IsRoot());
|
| EXPECT_TRUE(new_fid.ServerKnows());
|
| EXPECT_FALSE(bookmark_folder_id.ServerKnows());
|
| EXPECT_FALSE(new_fid == bookmark_folder_id);
|
| - Entry b_folder(&trans, syncable::GET_BY_ID, new_fid);
|
| - ASSERT_TRUE(b_folder.good());
|
| +
|
| ASSERT_EQ("A bookmark folder", b_folder.Get(NON_UNIQUE_NAME))
|
| << "Name of bookmark folder should not change.";
|
| ASSERT_LT(0, b_folder.Get(BASE_VERSION))
|
| << "Bookmark folder should have a valid (positive) server base revision";
|
|
|
| // Look at the two bookmarks in bookmark_folder.
|
| - Id cid;
|
| - ASSERT_TRUE(directory()->GetFirstChildId(&trans, new_fid, &cid));
|
| - Entry b1(&trans, syncable::GET_BY_ID, cid);
|
| - Entry b2(&trans, syncable::GET_BY_ID, b1.Get(syncable::NEXT_ID));
|
| + Entry b1(&trans, syncable::GET_BY_HANDLE, bookmark1_handle);
|
| + Entry b2(&trans, syncable::GET_BY_HANDLE, bookmark2_handle);
|
| CheckEntry(&b1, "bookmark 1", BOOKMARKS, new_fid);
|
| CheckEntry(&b2, "bookmark 2", BOOKMARKS, new_fid);
|
| - ASSERT_TRUE(b2.Get(syncable::NEXT_ID).IsRoot());
|
|
|
| // Look at the prefs and autofill items.
|
| - Entry p1(&trans, syncable::GET_BY_ID, b_folder.Get(syncable::NEXT_ID));
|
| - Entry p2(&trans, syncable::GET_BY_ID, p1.Get(syncable::NEXT_ID));
|
| + Entry p1(&trans, syncable::GET_BY_HANDLE, pref1_handle);
|
| + Entry p2(&trans, syncable::GET_BY_HANDLE, pref2_handle);
|
| CheckEntry(&p1, "Pref 1", PREFERENCES, id_factory_.root());
|
| CheckEntry(&p2, "Pref 2", PREFERENCES, id_factory_.root());
|
|
|
| - Entry a1(&trans, syncable::GET_BY_ID, p2.Get(syncable::NEXT_ID));
|
| - Entry a2(&trans, syncable::GET_BY_ID, a1.Get(syncable::NEXT_ID));
|
| + Entry a1(&trans, syncable::GET_BY_HANDLE, autofill1_handle);
|
| + Entry a2(&trans, syncable::GET_BY_HANDLE, autofill2_handle);
|
| CheckEntry(&a1, "Autofill 1", AUTOFILL, id_factory_.root());
|
| CheckEntry(&a2, "Autofill 2", AUTOFILL, id_factory_.root());
|
| - ASSERT_TRUE(a2.Get(syncable::NEXT_ID).IsRoot());
|
| }
|
|
|
| // In this test, we test processing a commit response for a commit batch that
|
| @@ -282,17 +293,16 @@ TEST_F(ProcessCommitResponseCommandTest, NewFolderCommitKeepsChildOrder) {
|
|
|
| // Create the parent folder, a new item whose ID will change on commit.
|
| Id folder_id = id_factory_.NewLocalId();
|
| - CreateUnprocessedCommitResult(folder_id, id_factory_.root(), "A",
|
| - BOOKMARKS,
|
| - &commit_set, &request, &response);
|
| + CreateUnprocessedBookmarkCommitResult(folder_id, id_factory_.root(),
|
| + "A", true,
|
| + &commit_set, &request, &response);
|
|
|
| // Verify that the item is reachable.
|
| {
|
| syncable::ReadTransaction trans(FROM_HERE, directory());
|
| - Id child_id;
|
| - ASSERT_TRUE(directory()->GetFirstChildId(
|
| - &trans, id_factory_.root(), &child_id));
|
| - ASSERT_EQ(folder_id, child_id);
|
| + syncable::Entry root(&trans, syncable::GET_BY_ID, id_factory_.root());
|
| + ASSERT_TRUE(root.good());
|
| + ASSERT_EQ(folder_id, root.GetFirstChildId());
|
| }
|
|
|
| // The first 25 children of the parent folder will be part of the commit
|
| @@ -302,19 +312,44 @@ TEST_F(ProcessCommitResponseCommandTest, NewFolderCommitKeepsChildOrder) {
|
| for (; i < batch_size; ++i) {
|
| // Alternate between new and old child items, just for kicks.
|
| Id id = (i % 4 < 2) ? id_factory_.NewLocalId() : id_factory_.NewServerId();
|
| - CreateUnprocessedCommitResult(
|
| - id, folder_id, base::StringPrintf("Item %d", i), BOOKMARKS,
|
| + CreateUnprocessedBookmarkCommitResult(
|
| + id, folder_id, base::StringPrintf("Item %d", i), false,
|
| &commit_set, &request, &response);
|
| }
|
| // The second 25 children will be unsynced items but NOT part of the commit
|
| // batch. When the ID of the parent folder changes during the commit,
|
| // these items PARENT_ID should be updated, and their ordering should be
|
| // preserved.
|
| + std::vector<int64> uncommitted_existing;
|
| + std::vector<int64> uncommitted_new;
|
| for (; i < 2*batch_size; ++i) {
|
| // Alternate between new and old child items, just for kicks.
|
| - Id id = (i % 4 < 2) ? id_factory_.NewLocalId() : id_factory_.NewServerId();
|
| - CreateUnsyncedItem(id, folder_id, base::StringPrintf("Item %d", i),
|
| - false, BOOKMARKS, NULL);
|
| + if (i % 4 < 2) {
|
| + int64 metahandle;
|
| + syncable::Id id = id_factory_.NewLocalId();
|
| + test_entry_factory_->CreateUnsyncedBookmarkItem(
|
| + id, folder_id, base::StringPrintf("Item %d", i), false, &metahandle);
|
| + uncommitted_existing.push_back(metahandle);
|
| + } else {
|
| + int64 metahandle;
|
| + syncable::Id id = id_factory_.NewServerId();
|
| + test_entry_factory_->CreateUnsyncedBookmarkItem(
|
| + id, folder_id, base::StringPrintf("Item %d", i), false, &metahandle);
|
| + uncommitted_new.push_back(metahandle);
|
| + }
|
| + }
|
| +
|
| + // Take a snapshot of current positions under our folder.
|
| + std::vector<UniquePosition> positions;
|
| + {
|
| + syncable::ReadTransaction trans(FROM_HERE, directory());
|
| + Entry folder(&trans, syncable::GET_BY_ID, folder_id);
|
| + syncable::Id id_iter = folder.GetFirstChildId();
|
| + while (!id_iter.IsRoot()) {
|
| + Entry entry(&trans, syncable::GET_BY_ID, id_iter);
|
| + positions.push_back(entry.Get(UNIQUE_POSITION));
|
| + id_iter = entry.GetSuccessorId();
|
| + }
|
| }
|
|
|
| // Process the commit response for the parent folder and the first
|
| @@ -328,9 +363,9 @@ TEST_F(ProcessCommitResponseCommandTest, NewFolderCommitKeepsChildOrder) {
|
| syncable::ReadTransaction trans(FROM_HERE, directory());
|
| // Lookup the parent folder by finding a child of the root. We can't use
|
| // folder_id here, because it changed during the commit.
|
| - Id new_fid;
|
| - ASSERT_TRUE(directory()->GetFirstChildId(
|
| - &trans, id_factory_.root(), &new_fid));
|
| + syncable::Entry root(&trans, syncable::GET_BY_ID, id_factory_.root());
|
| + ASSERT_TRUE(root.good());
|
| + Id new_fid = root.GetFirstChildId();
|
| ASSERT_FALSE(new_fid.IsRoot());
|
| EXPECT_TRUE(new_fid.ServerKnows());
|
| EXPECT_FALSE(folder_id.ServerKnows());
|
| @@ -342,40 +377,54 @@ TEST_F(ProcessCommitResponseCommandTest, NewFolderCommitKeepsChildOrder) {
|
| ASSERT_LT(0, parent.Get(BASE_VERSION))
|
| << "Parent should have a valid (positive) server base revision";
|
|
|
| - Id cid;
|
| - ASSERT_TRUE(directory()->GetFirstChildId(&trans, new_fid, &cid));
|
| - int child_count = 0;
|
| - // Now loop over all the children of the parent folder, verifying
|
| - // that they are in their original order by checking to see that their
|
| - // names are still sequential.
|
| - while (!cid.IsRoot()) {
|
| - SCOPED_TRACE(::testing::Message("Examining item #") << child_count);
|
| + //int child_count = 0;
|
| + // Now loop over the of the request, ensuring its contents were committed.
|
| + for (int i = 0; i < response.commit().entryresponse_size(); ++i) {
|
| + Id cid = Id::CreateFromServerId(
|
| + response.commit().entryresponse(i).id_string());
|
| +
|
| + SCOPED_TRACE(::testing::Message("Examining item #") << cid);
|
| Entry c(&trans, syncable::GET_BY_ID, cid);
|
| DCHECK(c.good());
|
| - ASSERT_EQ(base::StringPrintf("Item %d", child_count),
|
| - c.Get(NON_UNIQUE_NAME));
|
| - ASSERT_EQ(new_fid, c.Get(syncable::PARENT_ID));
|
| - if (child_count < batch_size) {
|
| - ASSERT_FALSE(c.Get(IS_UNSYNCED)) << "Item should be committed";
|
| - ASSERT_TRUE(cid.ServerKnows());
|
| - ASSERT_LT(0, c.Get(BASE_VERSION));
|
| - } else {
|
| - ASSERT_TRUE(c.Get(IS_UNSYNCED)) << "Item should be uncommitted";
|
| - // We alternated between creates and edits; double check that these items
|
| - // have been preserved.
|
| - if (child_count % 4 < 2) {
|
| - ASSERT_FALSE(cid.ServerKnows());
|
| - ASSERT_GE(0, c.Get(BASE_VERSION));
|
| - } else {
|
| - ASSERT_TRUE(cid.ServerKnows());
|
| - ASSERT_LT(0, c.Get(BASE_VERSION));
|
| - }
|
| +
|
| + if (cid != new_fid) { // If it's not the parent, it must be a child.
|
| + ASSERT_EQ(new_fid, c.Get(syncable::PARENT_ID));
|
| }
|
| - cid = c.Get(syncable::NEXT_ID);
|
| - child_count++;
|
| + ASSERT_FALSE(c.Get(IS_UNSYNCED)) << "Item should be committed";
|
| + ASSERT_TRUE(cid.ServerKnows());
|
| + ASSERT_LT(0, c.Get(BASE_VERSION));
|
| + }
|
| +
|
| + // Ensure the entries that overflowed the batch size are as we left them.
|
| + for (std::vector<int64>::iterator i = uncommitted_new.begin();
|
| + i != uncommitted_new.end(); ++i) {
|
| + Entry entry(&trans, syncable::GET_BY_HANDLE, *i);
|
| + ASSERT_TRUE(entry.Get(ID).ServerKnows());
|
| + ASSERT_LT(0, entry.Get(BASE_VERSION));
|
| }
|
| - ASSERT_EQ(batch_size*2, child_count)
|
| - << "Too few or too many children in parent folder after commit.";
|
| +
|
| + for (std::vector<int64>::iterator i = uncommitted_existing.begin();
|
| + i != uncommitted_existing.end(); ++i) {
|
| + Entry entry(&trans, syncable::GET_BY_HANDLE, *i);
|
| + ASSERT_FALSE(entry.Get(ID).ServerKnows());
|
| + ASSERT_GE(0, entry.Get(BASE_VERSION));
|
| + }
|
| +
|
| + // Verify positions against the snapshot we took earlier.
|
| + syncable::Entry new_f(&trans, syncable::GET_BY_ID, new_fid);
|
| + ASSERT_TRUE(new_f.good());
|
| + syncable::Id id_iter = new_f.GetFirstChildId();
|
| + std::vector<UniquePosition>::iterator snapshot_iter = positions.begin();
|
| + while (!id_iter.IsRoot() && snapshot_iter != positions.end()) {
|
| + SCOPED_TRACE(::testing::Message("Examining: ") << id_iter);
|
| + Entry entry(&trans, syncable::GET_BY_ID, id_iter);
|
| + ASSERT_TRUE(snapshot_iter->Equals(entry.Get(UNIQUE_POSITION)));
|
| + id_iter = entry.GetSuccessorId();
|
| + snapshot_iter++;
|
| + }
|
| +
|
| + // Assert that we still have the same number of children.
|
| + EXPECT_TRUE(id_iter.IsRoot() && snapshot_iter == positions.end());
|
| }
|
|
|
| // This test fixture runs across a Cartesian product of per-type fail/success
|
| @@ -413,13 +462,15 @@ TEST_P(MixedResult, ExtensionActivity) {
|
| << "To not be lame, this test requires more than one active group.";
|
|
|
| // Bookmark item setup.
|
| - CreateUnprocessedCommitResult(id_factory_.NewServerId(),
|
| - id_factory_.root(), "Some bookmark", BOOKMARKS,
|
| + CreateUnprocessedBookmarkCommitResult(
|
| + id_factory_.NewServerId(),
|
| + id_factory_.root(), "Some bookmark", false,
|
| &commit_set, &request, &response);
|
| if (ShouldFailBookmarkCommit())
|
| SetLastErrorCode(CommitResponse::TRANSIENT_ERROR, &response);
|
| // Autofill item setup.
|
| - CreateUnprocessedCommitResult(id_factory_.NewServerId(),
|
| + CreateUnprocessedCommitResult(
|
| + id_factory_.NewServerId(),
|
| id_factory_.root(), "Some autofill", AUTOFILL,
|
| &commit_set, &request, &response);
|
| if (ShouldFailAutofillCommit())
|
|
|