Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1581)

Side by Side Diff: chrome/browser/sync/engine/process_updates_command.cc

Issue 194065: Initial commit of sync engine code to browser/sync.... (Closed) Base URL: svn://chrome-svn/chrome/trunk/src/
Patch Set: Fixes to gtest include path, reverted syncapi. Created 11 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
Property Changes:
Added: svn:eol-style
+ LF
OLDNEW
(Empty)
1 // Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "chrome/browser/sync/engine/process_updates_command.h"
6
7 #include <vector>
8
9 #include "base/basictypes.h"
10 #include "chrome/browser/sync/engine/syncer.h"
11 #include "chrome/browser/sync/engine/syncer_util.h"
idana 2009/09/10 05:44:37 Move sync_util.h after syncer_session.h
12 #include "chrome/browser/sync/engine/syncer_proto_util.h"
13 #include "chrome/browser/sync/engine/syncer_session.h"
14 #include "chrome/browser/sync/engine/syncproto.h"
15 #include "chrome/browser/sync/syncable/directory_manager.h"
16 #include "chrome/browser/sync/syncable/syncable.h"
17 #include "chrome/browser/sync/util/character_set_converters.h"
18
19 using std::vector;
20
21 namespace browser_sync {
22
23 ProcessUpdatesCommand::ProcessUpdatesCommand() {}
24 ProcessUpdatesCommand::~ProcessUpdatesCommand() {}
25
26 void ProcessUpdatesCommand::ModelChangingExecuteImpl(SyncerSession* session) {
27 syncable::ScopedDirLookup dir(session->dirman(), session->account_name());
28 if (!dir.good()) {
29 LOG(ERROR) << "Scoped dir lookup failed!";
30 return;
31 }
32 SyncerStatus status(session);
33
34 const GetUpdatesResponse updates = session->update_response().get_updates();
35 const int update_count = updates.entries_size();
36
37 LOG(INFO) << "Get updates from ts " << dir->last_sync_timestamp() <<
38 " returned " << update_count << " updates.";
39
40 if (updates.has_newest_timestamp()) {
41 int64 newest_timestamp = updates.newest_timestamp();
42 LOG(INFO) << "Newest Timestamp:" << newest_timestamp;
43 status.set_servers_latest_timestamp(newest_timestamp);
44 }
45
46 int64 new_timestamp = 0;
47 if (updates.has_new_timestamp()) {
48 new_timestamp = updates.new_timestamp();
49 LOG(INFO) << "Get Updates got new timestamp: " << new_timestamp;
50 if (0 == update_count) {
51 if (new_timestamp > dir->last_sync_timestamp()) {
52 dir->set_last_sync_timestamp(new_timestamp);
53 session->set_timestamp_dirty();
54 }
55 return;
56 }
57 }
58
59 if (0 == status.servers_latest_timestamp()) {
60 // Hack since new server never gives us the server's latest
61 // timestamp. But if a getupdates returns zero, then we know we
62 // are up to date.
63 status.set_servers_latest_timestamp(status.current_sync_timestamp());
64 }
65 // If we have updates that are ALL supposed to be skipped, we don't want
66 // to get them again. In fact, the account's final updates are all
67 // supposed to be skipped and we DON'T step past them, we will sync forever
68 int64 latest_skip_timestamp = 0;
69 bool any_non_skip_results = false;
70 vector<VerifiedUpdate>::iterator it;
71 for (it = session->VerifiedUpdatesBegin();
72 it < session->VerifiedUpdatesEnd();
73 ++it) {
74 const sync_pb::SyncEntity update = it->second;
75
76 any_non_skip_results = (it->first != VERIFY_SKIP);
77 if (!any_non_skip_results) {
78 // ALL updates were to be skipped, including this one
79 if (update.sync_timestamp() > latest_skip_timestamp) {
80 latest_skip_timestamp = update.sync_timestamp();
81 }
82 } else {
83 latest_skip_timestamp = 0;
84 }
85
86 if (it->first != VERIFY_SUCCESS && it->first != VERIFY_UNDELETE)
87 continue;
88 switch (ProcessUpdate(dir, update)) {
89 case SUCCESS_PROCESSED:
90 case SUCCESS_STORED:
91 // We can update the timestamp because we store the update
92 // even if we can't apply it now.
93 if (update.sync_timestamp() > new_timestamp)
94 new_timestamp = update.sync_timestamp();
95 break;
96 }
97
98 }
99
100 if (latest_skip_timestamp > new_timestamp)
101 new_timestamp = latest_skip_timestamp;
102
103 if (new_timestamp > dir->last_sync_timestamp()) {
104 dir->set_last_sync_timestamp(new_timestamp);
105 session->set_timestamp_dirty();
106 }
107
108 status.zero_consecutive_problem_get_updates();
109 status.zero_consecutive_errors();
110 status.set_current_sync_timestamp(dir->last_sync_timestamp());
111 status.set_syncing(true);
112 return;
113 }
114
115 namespace {
116 // returns true if the entry is still ok to process
117 bool ReverifyEntry(syncable::WriteTransaction* trans, const SyncEntity& entry,
118 syncable::MutableEntry* same_id) {
119
120 const bool deleted = entry.has_deleted() && entry.deleted();
121 const bool is_directory = entry.IsFolder();
122 const bool is_bookmark = entry.has_bookmarkdata();
123
124 return VERIFY_SUCCESS ==
125 SyncerUtil::VerifyUpdateConsistency(trans,
126 entry,
127 same_id,
128 deleted,
129 is_directory,
130 is_bookmark);
131 }
132 } // anonymous namespace
133
134 // TODO(sync): Refactor this code.
135 // Process a single update. Will avoid touching global state.
136 ServerUpdateProcessingResult ProcessUpdatesCommand::ProcessUpdate(
137 const syncable::ScopedDirLookup& dir, const sync_pb::SyncEntity& pb_entry) {
138
139 const SyncEntity& entry = *static_cast<const SyncEntity*>(&pb_entry);
140 using namespace syncable;
141 syncable::Id id = entry.id();
142 SyncName name = SyncerProtoUtil::NameFromSyncEntity(entry);
143
144 WriteTransaction trans(dir, SYNCER, __FILE__, __LINE__);
145
146 SyncerUtil::CreateNewEntry(&trans, id);
147
148 // We take a two step approach. First we store the entries data in the
149 // server fields of a local entry and then move the data to the local fields
150 MutableEntry update_entry(&trans, GET_BY_ID, id);
151 // TODO(sync): do we need to run ALL these checks, or is a mere version
152 // check good enough?
153 if (!ReverifyEntry(&trans, entry, &update_entry)) {
154 return SUCCESS_PROCESSED; // the entry has become irrelevant
155 }
156
157 SyncerUtil::UpdateServerFieldsFromUpdate(&update_entry, entry, name);
158
159 if (update_entry.Get(SERVER_VERSION) == update_entry.Get(BASE_VERSION) &&
160 !update_entry.Get(IS_UNSYNCED)) {
161 CHECK(SyncerUtil::ServerAndLocalEntriesMatch(
162 &update_entry)) << update_entry;
163 }
164 return SUCCESS_PROCESSED;
165 }
166
167 } // namespace browser_sync
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698