|
OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "chrome/browser/sync/engine/get_commit_ids_command.h" | |
6 | |
7 #include <set> | |
8 #include <utility> | |
9 #include <vector> | |
10 | |
11 #include "chrome/browser/sync/engine/syncer_util.h" | |
12 #include "chrome/browser/sync/engine/syncer_session.h" | |
idana
2009/09/10 05:44:37
syncer_session.h should come before syncer_util.h
| |
13 #include "chrome/browser/sync/syncable/syncable.h" | |
14 #include "chrome/browser/sync/util/sync_types.h" | |
15 | |
16 using std::set; | |
17 using std::vector; | |
18 | |
19 namespace browser_sync { | |
20 | |
21 GetCommitIdsCommand::GetCommitIdsCommand(int commit_batch_size) | |
22 : requested_commit_batch_size_(commit_batch_size) {} | |
23 | |
24 GetCommitIdsCommand::~GetCommitIdsCommand() {} | |
25 | |
26 void GetCommitIdsCommand::ExecuteImpl(SyncerSession *session) { | |
idana
2009/09/10 05:44:37
"SyncerSession *session" -> "SyncerSession* sessio
| |
27 // Gather the full set of unsynced items and store it in the session. | |
28 // They are not in the correct order for commit. | |
29 syncable::Directory::UnsyncedMetaHandles all_unsynced_handles; | |
30 SyncerUtil::GetUnsyncedEntries(session->write_transaction(), | |
31 &all_unsynced_handles); | |
idana
2009/09/10 05:44:37
Indentation
| |
32 session->set_unsynced_handles(all_unsynced_handles); | |
33 | |
34 BuildCommitIds(session); | |
35 | |
36 const vector<syncable::Id>& verified_commit_ids = | |
37 ordered_commit_set_.GetCommitIds(); | |
38 | |
39 for (size_t i = 0; i < verified_commit_ids.size(); i++) | |
40 LOG(INFO) << "Debug commit batch result:" << verified_commit_ids[i]; | |
41 | |
42 session->set_commit_ids(verified_commit_ids); | |
43 } | |
44 | |
45 void GetCommitIdsCommand::AddUncommittedParentsAndTheirPredecessors( | |
46 syncable::BaseTransaction* trans, | |
47 syncable::Id parent_id) { | |
48 using namespace syncable; | |
49 OrderedCommitSet item_dependencies; | |
50 | |
51 // Climb the tree adding entries leaf -> root. | |
52 while (!parent_id.ServerKnows()) { | |
53 Entry parent(trans, GET_BY_ID, parent_id); | |
54 CHECK(parent.good()) << "Bad user-only parent in item path."; | |
55 int64 handle = parent.Get(META_HANDLE); | |
56 if (ordered_commit_set_.HaveCommitItem(handle) || | |
57 item_dependencies.HaveCommitItem(handle)) { | |
58 break; | |
59 } | |
60 if (!AddItemThenPredecessors(trans, &parent, IS_UNSYNCED, | |
61 &item_dependencies)) { | |
62 break; // Parent was already present in the set. | |
63 } | |
64 parent_id = parent.Get(PARENT_ID); | |
65 } | |
66 | |
67 // Reverse what we added to get the correct order. | |
68 ordered_commit_set_.AppendReverse(item_dependencies); | |
69 } | |
70 | |
71 bool GetCommitIdsCommand::AddItem(syncable::Entry* item, | |
72 OrderedCommitSet* result) { | |
73 int64 item_handle = item->Get(syncable::META_HANDLE); | |
74 if (result->HaveCommitItem(item_handle) || | |
75 ordered_commit_set_.HaveCommitItem(item_handle)) { | |
76 return false; | |
77 } | |
78 result->AddCommitItem(item_handle, item->Get(syncable::ID)); | |
79 return true; | |
80 } | |
81 | |
82 bool GetCommitIdsCommand::AddItemThenPredecessors( | |
83 syncable::BaseTransaction* trans, | |
84 syncable::Entry* item, | |
85 syncable::IndexedBitField inclusion_filter, | |
86 OrderedCommitSet* result) { | |
87 if (!AddItem(item, result)) | |
88 return false; | |
89 if (item->Get(syncable::IS_DEL)) | |
90 return true; // Deleted items have no predecessors. | |
91 | |
92 syncable::Id prev_id = item->Get(syncable::PREV_ID); | |
93 while (!prev_id.IsRoot()) { | |
94 syncable::Entry prev(trans, syncable::GET_BY_ID, prev_id); | |
95 CHECK(prev.good()) << "Bad id when walking predecessors."; | |
96 if (!prev.Get(inclusion_filter)) | |
97 break; | |
98 if (!AddItem(&prev, result)) | |
99 break; | |
100 prev_id = prev.Get(syncable::PREV_ID); | |
101 } | |
102 return true; | |
103 } | |
104 | |
105 void GetCommitIdsCommand::AddPredecessorsThenItem( | |
106 syncable::BaseTransaction* trans, | |
107 syncable::Entry* item, | |
108 syncable::IndexedBitField inclusion_filter) { | |
109 OrderedCommitSet item_dependencies; | |
110 AddItemThenPredecessors(trans, item, inclusion_filter, &item_dependencies); | |
111 | |
112 // Reverse what we added to get the correct order. | |
113 ordered_commit_set_.AppendReverse(item_dependencies); | |
114 } | |
115 | |
116 bool GetCommitIdsCommand::IsCommitBatchFull() { | |
117 return ordered_commit_set_.Size() >= requested_commit_batch_size_; | |
118 } | |
119 | |
120 void GetCommitIdsCommand::AddCreatesAndMoves(SyncerSession *session) { | |
121 // Add moves and creates, and prepend their uncommitted parents. | |
122 for (CommitMetahandleIterator iterator(session, &ordered_commit_set_); | |
123 !IsCommitBatchFull() && iterator.Valid(); | |
124 iterator.Increment()) { | |
125 int64 metahandle = iterator.Current(); | |
126 | |
127 syncable::Entry entry(session->write_transaction(), | |
128 syncable::GET_BY_HANDLE, | |
129 metahandle); | |
130 if (!entry.Get(syncable::IS_DEL)) { | |
131 AddUncommittedParentsAndTheirPredecessors( | |
132 session->write_transaction(), entry.Get(syncable::PARENT_ID)); | |
133 AddPredecessorsThenItem(session->write_transaction(), &entry, | |
134 syncable::IS_UNSYNCED); | |
135 } | |
136 } | |
137 | |
138 // It's possible that we overcommitted while trying to expand dependent | |
139 // items. If so, truncate the set down to the allowed size. | |
140 ordered_commit_set_.Truncate(requested_commit_batch_size_); | |
141 } | |
142 | |
143 void GetCommitIdsCommand::AddDeletes(SyncerSession *session) { | |
idana
2009/09/10 05:44:37
"SyncerSession *session" -> "SyncerSession* sessio
| |
144 set<syncable::Id> legal_delete_parents; | |
145 | |
146 for (CommitMetahandleIterator iterator(session, &ordered_commit_set_); | |
147 !IsCommitBatchFull() && iterator.Valid(); | |
148 iterator.Increment()) { | |
149 int64 metahandle = iterator.Current(); | |
150 | |
151 syncable::Entry entry(session->write_transaction(), | |
152 syncable::GET_BY_HANDLE, | |
153 metahandle); | |
154 | |
155 if (entry.Get(syncable::IS_DEL)) { | |
156 syncable::Entry parent(session->write_transaction(), | |
157 syncable::GET_BY_ID, | |
158 entry.Get(syncable::PARENT_ID)); | |
159 // If the parent is deleted and unsynced, then any children of that | |
160 // parent don't need to be added to the delete queue. | |
161 // | |
162 // Note: the parent could be synced if there was an update deleting a | |
163 // folder when we had a deleted all items in it. | |
164 // We may get more updates, or we may want to delete the entry. | |
165 if (parent.good() && | |
166 parent.Get(syncable::IS_DEL) && | |
167 parent.Get(syncable::IS_UNSYNCED)) { | |
168 // However, if an entry is moved, these rules can apply differently. | |
169 // | |
170 // If the entry was moved, then the destination parent was deleted, | |
171 // then we'll miss it in the roll up. We have to add it in manually. | |
172 // TODO(chron): Unit test for move / delete cases: | |
173 // Case 1: Locally moved, then parent deleted | |
174 // Case 2: Server moved, then locally issue recursive delete. | |
175 if (entry.Get(syncable::ID).ServerKnows() && | |
176 entry.Get(syncable::PARENT_ID) != | |
177 entry.Get(syncable::SERVER_PARENT_ID)) { | |
178 LOG(INFO) << "Inserting moved and deleted entry, will be missed by" | |
179 " delete roll." << entry.Get(syncable::ID); | |
180 | |
181 ordered_commit_set_.AddCommitItem(metahandle, | |
182 entry.Get(syncable::ID)); | |
183 } | |
184 | |
185 // Skip this entry since it's a child of a parent that will be | |
186 // deleted. The server will unroll the delete and delete the | |
187 // child as well. | |
188 continue; | |
189 } | |
190 | |
191 legal_delete_parents.insert(entry.Get(syncable::PARENT_ID)); | |
192 } | |
193 } | |
194 | |
195 // We could store all the potential entries with a particular parent during | |
196 // the above scan, but instead we rescan here. This is less efficient, but | |
197 // we're dropping memory alloc/dealloc in favor of linear scans of recently | |
198 // examined entries. | |
199 // | |
200 // Scan through the UnsyncedMetaHandles again. If we have a deleted | |
201 // entry, then check if the parent is in legal_delete_parents. | |
202 // | |
203 // Parent being in legal_delete_parents means for the child: | |
204 // a recursive delete is not currently happening (no recent deletes in same | |
205 // folder) | |
206 // parent did expect at least one old deleted child | |
207 // parent was not deleted | |
208 | |
209 for (CommitMetahandleIterator iterator(session, &ordered_commit_set_); | |
210 !IsCommitBatchFull() && iterator.Valid(); | |
211 iterator.Increment()) { | |
212 int64 metahandle = iterator.Current(); | |
213 syncable::MutableEntry entry(session->write_transaction(), | |
214 syncable::GET_BY_HANDLE, | |
215 metahandle); | |
216 if (entry.Get(syncable::IS_DEL)) { | |
217 syncable::Id parent_id = entry.Get(syncable::PARENT_ID); | |
218 if (legal_delete_parents.count(parent_id)) { | |
219 ordered_commit_set_.AddCommitItem(metahandle, entry.Get(syncable::ID)); | |
220 } | |
221 } | |
222 } | |
223 } | |
224 | |
225 void GetCommitIdsCommand::BuildCommitIds(SyncerSession *session) { | |
idana
2009/09/10 05:44:37
"SyncerSession *session" -> "SyncerSession* sessio
| |
226 // Commits follow these rules: | |
227 // 1. Moves or creates are preceded by needed folder creates, from | |
228 // root to leaf. For folders whose contents are ordered, moves | |
229 // and creates appear in order. | |
230 // 2. Moves/Creates before deletes. | |
231 // 3. Deletes, collapsed. | |
232 // We commit deleted moves under deleted items as moves when collapsing | |
233 // delete trees. | |
234 | |
235 // Add moves and creates, and prepend their uncommitted parents. | |
236 AddCreatesAndMoves(session); | |
237 | |
238 // Add all deletes. | |
239 AddDeletes(session); | |
240 } | |
241 | |
242 } // namespace browser_sync | |
OLD | NEW |