Chromium Code Reviews| Index: components/sync_sessions/sessions_sync_manager.cc |
| diff --git a/components/sync_sessions/sessions_sync_manager.cc b/components/sync_sessions/sessions_sync_manager.cc |
| index 5810f8fc937e2677a29fbc6320a2f727e861c95f..839624dd54f6cda4cd6b306075a83bd21837a2ab 100644 |
| --- a/components/sync_sessions/sessions_sync_manager.cc |
| +++ b/components/sync_sessions/sessions_sync_manager.cc |
| @@ -51,6 +51,20 @@ const char kNTPOpenTabSyncURL[] = "chrome://newtab/#open_tabs"; |
| // stale and becomes a candidate for garbage collection. |
| const int kDefaultStaleSessionThresholdDays = 14; // 2 weeks. |
| +// Clean up navigation tracking when we have over this many global_ids. |
| +const size_t kNavigationTrackingCleanupThreshold = 100; |
| + |
| +// When we clean up navigation tracking, delete this many global_ids. |
| +const int kNavigationTrackingCleanupAmount = 10; |
| + |
| +// Used to record conflict information into histogram Sync.GlobalIdConflict. |
| +enum SyncGlobalIdConflict { |
| + CONFLICT = 0, |
| + NO_CONFLICT_NEW_ID, |
| + NO_CONFLICT_SAME_IDS, |
| + CONFLICT_MAX, |
| +}; |
| + |
| // Comparator function for use with std::sort that will sort tabs by |
| // descending timestamp (i.e., most recent first). |
| bool TabsRecencyComparator(const sessions::SessionTab* t1, |
| @@ -578,6 +592,11 @@ void SessionsSyncManager::OnLocalTabModified(SyncedTabDelegate* modified_tab) { |
| } |
| } |
| + sessions::SerializedNavigationEntry current; |
| + modified_tab->GetSerializedNavigationAtIndex( |
|
vabr (Chromium)
2017/07/22 08:22:22
This might be on the path of the crash in https://
|
| + modified_tab->GetCurrentEntryIndex(), ¤t); |
| + TrackNavigationIds(current); |
| + |
| if (local_tab_pool_out_of_sync_) { |
| // If our tab pool is corrupt, pay the price of a full re-association to |
| // fix things up. This takes care of the new tab modification as well. |
| @@ -1285,6 +1304,22 @@ void SessionsSyncManager::DoGarbageCollection() { |
| sync_processor_->ProcessSyncChanges(FROM_HERE, changes); |
| } |
| +void SessionsSyncManager::AddGlobalIdChangeObserver( |
| + syncer::GlobalIdChange callback) { |
| + global_id_change_observers_.push_back(std::move(callback)); |
| +} |
| + |
| +int64_t SessionsSyncManager::GetLatestGlobalId(int64_t global_id) { |
| + auto g2u_iter = global_to_unique_.find(global_id); |
| + if (g2u_iter != global_to_unique_.end()) { |
| + auto u2g_iter = unique_to_current_global_.find(g2u_iter->second); |
| + if (u2g_iter != unique_to_current_global_.end()) { |
| + return u2g_iter->second; |
| + } |
| + } |
| + return global_id; |
| +} |
| + |
| // static |
| std::string SessionsSyncManager::TagHashFromSpecifics( |
| const sync_pb::SessionSpecifics& specifics) { |
| @@ -1292,4 +1327,76 @@ std::string SessionsSyncManager::TagHashFromSpecifics( |
| TagFromSpecifics(specifics)); |
| } |
| +void SessionsSyncManager::TrackNavigationIds( |
| + const sessions::SerializedNavigationEntry& current) { |
| + // The expectation is that global_id will update for a given unique_id, which |
| + // should accurately and uniquely represent a single navigation. It is |
| + // theoretically possible for two unique_ids to map to the same global_id, but |
| + // hopefully rare enough that it doesn't cause much harm. Lets record metrics |
| + // verify this theory. |
| + int64_t global_id = current.timestamp().ToInternalValue(); |
| + // It is possible that the global_id has not been set yet for this navigation. |
| + // In this case there's nothing here for us to track yet. |
| + if (global_id == 0) { |
| + return; |
| + } |
| + |
| + int unique_id = current.unique_id(); |
| + DCHECK_NE(0, unique_id); |
| + |
| + auto g2u_iter = global_to_unique_.find(global_id); |
| + if (g2u_iter == global_to_unique_.end()) { |
| + global_to_unique_.insert(g2u_iter, std::make_pair(global_id, unique_id)); |
| + UMA_HISTOGRAM_ENUMERATION("Sync.GlobalIdConflict", NO_CONFLICT_NEW_ID, |
| + CONFLICT_MAX); |
| + } else if (g2u_iter->second != unique_id) { |
| + UMA_HISTOGRAM_ENUMERATION("Sync.GlobalIdConflict", CONFLICT, CONFLICT_MAX); |
| + } else { |
| + UMA_HISTOGRAM_ENUMERATION("Sync.GlobalIdConflict", NO_CONFLICT_SAME_IDS, |
| + CONFLICT_MAX); |
| + } |
| + |
| + auto u2g_iter = unique_to_current_global_.find(unique_id); |
| + if (u2g_iter == unique_to_current_global_.end()) { |
| + unique_to_current_global_.insert(u2g_iter, |
| + std::make_pair(unique_id, global_id)); |
| + } else if (u2g_iter->second != global_id) { |
| + // Remember the old_global_id before we insert and invalidate out iter. |
| + int64_t old_global_id = u2g_iter->second; |
| + |
| + // TODO(skym): Use insert_or_assign with hint once on C++17. |
| + unique_to_current_global_[unique_id] = global_id; |
| + |
| + // This should be done after updating unique_to_current_global_ in case one |
| + // of our observers calls into GetLatestGlobalId(). |
| + for (auto& observer : global_id_change_observers_) { |
| + observer.Run(old_global_id, global_id); |
| + } |
| + } |
| + |
| + CleanupNavigationTracking(); |
| +} |
| + |
| +void SessionsSyncManager::CleanupNavigationTracking() { |
| + DCHECK(kNavigationTrackingCleanupThreshold > |
| + kNavigationTrackingCleanupAmount); |
| + |
| + // |global_to_unique_| is implicitly ordered by least recently created, which |
| + // means we can drop from the beginning. |
| + if (global_to_unique_.size() > kNavigationTrackingCleanupThreshold) { |
| + auto iter = global_to_unique_.begin(); |
| + std::advance(iter, kNavigationTrackingCleanupAmount); |
| + global_to_unique_.erase(global_to_unique_.begin(), iter); |
| + |
| + // While |unique_id|s do get bigger for the most part, this isn't a great |
| + // thing to make assumptions about, and an old tab may get refreshed often |
| + // and still be very important. So instead just delete anything that's |
| + // orphaned from |global_to_unique_|. |
| + base::EraseIf(unique_to_current_global_, |
| + [this](const std::pair<int, int64_t> kv) { |
| + return !base::ContainsKey(global_to_unique_, kv.second); |
| + }); |
| + } |
| +} |
| + |
| }; // namespace sync_sessions |