Chromium Code Reviews| Index: sync/engine/syncer.cc |
| diff --git a/sync/engine/syncer.cc b/sync/engine/syncer.cc |
| index 62c7af893ff2d6dc91d74c53980c7a50f4eef564..d845ba1a19fad65ac8e671e798687015299393df 100644 |
| --- a/sync/engine/syncer.cc |
| +++ b/sync/engine/syncer.cc |
| @@ -110,17 +110,6 @@ void Syncer::SyncShare(sessions::SyncSession* session, |
| switch (current_step) { |
| case SYNCER_BEGIN: |
| - // This isn't perfect, as we can end up bundling extensions activity |
| - // intended for the next session into the current one. We could do a |
| - // test-and-reset as with the source, but note that also falls short if |
| - // the commit request fails (e.g. due to lost connection), as we will |
| - // fall all the way back to the syncer thread main loop in that case, |
| - // creating a new session when a connection is established, losing the |
| - // records set here on the original attempt. This should provide us |
| - // with the right data "most of the time", and we're only using this |
| - // for analysis purposes, so Law of Large Numbers FTW. |
| - session->context()->extensions_monitor()->GetAndClearRecords( |
| - session->mutable_extensions_activity()); |
| session->context()->PruneUnthrottledTypes(base::TimeTicks::Now()); |
| session->SendEventNotification(SyncEngineEvent::SYNC_CYCLE_BEGIN); |
| @@ -166,13 +155,42 @@ void Syncer::SyncShare(sessions::SyncSession* session, |
| case STORE_TIMESTAMPS: { |
| StoreTimestampsCommand store_timestamps; |
| store_timestamps.Execute(session); |
| - // We should download all of the updates before attempting to process |
| - // them. |
| - if (session->status_controller().ServerSaysNothingMoreToDownload() || |
| - !session->status_controller().download_updates_succeeded()) { |
| + // We download all of the updates before attempting to apply them. |
| + SyncerError result = |
| + session->status_controller().last_download_updates_result(); |
| + if (result == SYNCER_OK) { |
| + const ClientToServerResponse& updates_response = |
| + session->status_controller().updates_response(); |
|
tim (not reviewing)
2012/04/20 16:41:07
You're doing this just to get changes_remaining, r
rlarocque
2012/04/23 20:16:49
The next revision of this patch should avoid this
|
| + // Loop back if we still have more to download. Otherwise exit this |
| + // loop normally. |
| + DCHECK(updates_response.get_updates().has_changes_remaining()); |
| + if (updates_response.get_updates().changes_remaining() != 0) { |
| + next_step = DOWNLOAD_UPDATES; |
| + } else { |
| + next_step = APPLY_UPDATES; |
| + } |
| + } else if (result == SERVER_RETURN_MIGRATION_DONE) { |
| + // The MIGRATION_DONE case is special. The migration test code relies |
|
tim (not reviewing)
2012/04/20 16:41:07
As far as I can tell it is actually handled natura
rlarocque
2012/04/23 20:16:49
Same as above.
|
| + // on all sorts of strange side effects, and this is one of them. The |
| + // effect of this special case is that we will count downloads that |
| + // encounter migration_done errors as non-failures, while jobs that do |
| + // attempt to commit will be counted as failures most of the time, |
|
tim (not reviewing)
2012/04/20 16:41:07
I'm having a hard time groking this: "... we will
rlarocque
2012/04/23 20:16:49
This will also be simplified in the next revision.
|
| + // because they will fail PROCESS_COMMIT_RESPONSE. In short, we're |
| + // trying to make sure that the necessary changes for crbug.com/122033 |
| + // do not upset the delicate balance that allows the migration tests |
| + // to pass. |
| + // |
| + // We are tracking some of the effort to clean up this mess at |
| + // crbug.com/123954. |
| next_step = APPLY_UPDATES; |
| } else { |
| - next_step = DOWNLOAD_UPDATES; |
| + // All other errors. We should not attempt to apply updates until we |
| + // have received all of them. At this point, we have not received all |
| + // downloads so we should not attempt to apply any of the updates that |
| + // we have downloded successfully. |
| + last_step = SYNCER_END; // Necessary for CONFIGURATION mode. |
| + next_step = SYNCER_END; |
| + DVLOG(1) << "Aborting sync cycle due to download updates failure"; |
| } |
| break; |
| } |
| @@ -203,6 +221,19 @@ void Syncer::SyncShare(sessions::SyncSession* session, |
| if (!session->status_controller().commit_ids().empty()) { |
| DVLOG(1) << "Building a commit message"; |
| + |
| + // This isn't perfect, since the set of extensions activity may not |
| + // correlate exactly with the items being committed. That's OK as |
| + // long as we're looking for a rough estimate of extensions activity, |
| + // not an precise mapping of which commits were triggered by which |
| + // extension. |
| + // |
| + // We will push this list of extensions activity back into the |
| + // ExtensionsActivityMonitor if this commit turns out to not contain |
| + // any bookmarks, or if the commit fails. |
| + session->context()->extensions_monitor()->GetAndClearRecords( |
| + session->mutable_extensions_activity()); |
| + |
| BuildCommitCommand build_commit_command; |
| build_commit_command.Execute(session); |