Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2017 The Chromium Authors. All rights reserved. | 1 // Copyright 2017 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "components/history/core/browser/typed_url_sync_bridge.h" | 5 #include "components/history/core/browser/typed_url_sync_bridge.h" |
| 6 | 6 |
| 7 #include "base/big_endian.h" | 7 #include "base/big_endian.h" |
| 8 #include "base/memory/ptr_util.h" | 8 #include "base/memory/ptr_util.h" |
| 9 #include "base/metrics/histogram_macros.h" | 9 #include "base/metrics/histogram_macros.h" |
| 10 #include "base/strings/utf_string_conversions.h" | 10 #include "base/strings/utf_string_conversions.h" |
| (...skipping 18 matching lines...) Expand all Loading... | |
| 29 // the size under control we limit the visit array. | 29 // the size under control we limit the visit array. |
| 30 static const int kMaxTypedUrlVisits = 100; | 30 static const int kMaxTypedUrlVisits = 100; |
| 31 | 31 |
| 32 // There's no limit on how many visits the history DB could have for a given | 32 // There's no limit on how many visits the history DB could have for a given |
| 33 // typed URL, so we limit how many we fetch from the DB to avoid crashes due to | 33 // typed URL, so we limit how many we fetch from the DB to avoid crashes due to |
| 34 // running out of memory (http://crbug.com/89793). This value is different | 34 // running out of memory (http://crbug.com/89793). This value is different |
| 35 // from kMaxTypedUrlVisits, as some of the visits fetched from the DB may be | 35 // from kMaxTypedUrlVisits, as some of the visits fetched from the DB may be |
| 36 // RELOAD visits, which will be stripped. | 36 // RELOAD visits, which will be stripped. |
| 37 static const int kMaxVisitsToFetch = 1000; | 37 static const int kMaxVisitsToFetch = 1000; |
| 38 | 38 |
| 39 // This is the threshold at which we start throttling sync updates for typed | |
| 40 // URLs - any URLs with a typed_count >= this threshold will be throttled. | |
| 41 static const int kTypedUrlVisitThrottleThreshold = 10; | |
| 42 | |
| 43 // This is the multiple we use when throttling sync updates. If the multiple is | |
| 44 // N, we sync up every Nth update (i.e. when typed_count % N == 0). | |
| 45 static const int kTypedUrlVisitThrottleMultiple = 10; | |
| 46 | |
| 39 // Enforce oldest to newest visit order. | 47 // Enforce oldest to newest visit order. |
| 40 static bool CheckVisitOrdering(const VisitVector& visits) { | 48 static bool CheckVisitOrdering(const VisitVector& visits) { |
| 41 int64_t previous_visit_time = 0; | 49 int64_t previous_visit_time = 0; |
| 42 for (VisitVector::const_iterator visit = visits.begin(); | 50 for (VisitVector::const_iterator visit = visits.begin(); |
| 43 visit != visits.end(); ++visit) { | 51 visit != visits.end(); ++visit) { |
| 44 if (visit != visits.begin() && | 52 if (visit != visits.begin() && |
| 45 previous_visit_time > visit->visit_time.ToInternalValue()) | 53 previous_visit_time > visit->visit_time.ToInternalValue()) |
| 46 return false; | 54 return false; |
| 47 | 55 |
| 48 previous_visit_time = visit->visit_time.ToInternalValue(); | 56 previous_visit_time = visit->visit_time.ToInternalValue(); |
| 49 } | 57 } |
| 50 return true; | 58 return true; |
| 51 } | 59 } |
| 52 | 60 |
| 53 std::string GetStorageKeyFromURLRow(const URLRow& row) { | 61 std::string GetStorageKeyFromURLRow(const URLRow& row) { |
| 62 DCHECK_NE(row.id(), 0); | |
| 54 std::string storage_key(sizeof(row.id()), 0); | 63 std::string storage_key(sizeof(row.id()), 0); |
| 55 base::WriteBigEndian<URLID>(&storage_key[0], row.id()); | 64 base::WriteBigEndian<URLID>(&storage_key[0], row.id()); |
| 56 return storage_key; | 65 return storage_key; |
| 57 } | 66 } |
| 58 | 67 |
| 59 bool HasTypedUrl(const VisitVector& visits) { | 68 bool HasTypedUrl(const VisitVector& visits) { |
| 60 auto typed_url_visit = std::find_if( | 69 auto typed_url_visit = |
| 61 visits.begin(), visits.end(), [](const history::VisitRow& visit) { | 70 std::find_if(visits.begin(), visits.end(), [](const VisitRow& visit) { |
| 62 return ui::PageTransitionCoreTypeIs(visit.transition, | 71 return ui::PageTransitionCoreTypeIs(visit.transition, |
| 63 ui::PAGE_TRANSITION_TYPED); | 72 ui::PAGE_TRANSITION_TYPED); |
| 64 }); | 73 }); |
| 65 return typed_url_visit != visits.end(); | 74 return typed_url_visit != visits.end(); |
| 66 } | 75 } |
| 67 | 76 |
| 68 } // namespace | 77 } // namespace |
| 69 | 78 |
| 70 TypedURLSyncBridge::TypedURLSyncBridge( | 79 TypedURLSyncBridge::TypedURLSyncBridge( |
| 71 HistoryBackend* history_backend, | 80 HistoryBackend* history_backend, |
| (...skipping 19 matching lines...) Expand all Loading... | |
| 91 return base::MakeUnique<syncer::SyncMetadataStoreChangeList>( | 100 return base::MakeUnique<syncer::SyncMetadataStoreChangeList>( |
| 92 sync_metadata_database_, syncer::TYPED_URLS); | 101 sync_metadata_database_, syncer::TYPED_URLS); |
| 93 } | 102 } |
| 94 | 103 |
| 95 base::Optional<ModelError> TypedURLSyncBridge::MergeSyncData( | 104 base::Optional<ModelError> TypedURLSyncBridge::MergeSyncData( |
| 96 std::unique_ptr<MetadataChangeList> metadata_change_list, | 105 std::unique_ptr<MetadataChangeList> metadata_change_list, |
| 97 EntityChangeList entity_data) { | 106 EntityChangeList entity_data) { |
| 98 DCHECK(sequence_checker_.CalledOnValidSequence()); | 107 DCHECK(sequence_checker_.CalledOnValidSequence()); |
| 99 | 108 |
| 100 // Create a mapping of all local data by URLID. These will be narrowed down | 109 // Create a mapping of all local data by URLID. These will be narrowed down |
| 101 // by CreateOrUpdateUrl() to include only the entries different from sync | 110 // by MergeURLWithSync() to include only the entries different from sync |
| 102 // server data. | 111 // server data. |
| 103 TypedURLMap new_db_urls; | 112 TypedURLMap new_db_urls; |
| 104 | 113 |
| 105 // Get all the visits and map the URLRows by URL. | 114 // Get all the visits and map the URLRows by URL. |
| 106 URLVisitVectorMap local_visit_vectors; | 115 URLVisitVectorMap local_visit_vectors; |
| 107 | 116 |
| 108 if (!GetValidURLsAndVisits(&local_visit_vectors, &new_db_urls)) { | 117 if (!GetValidURLsAndVisits(&local_visit_vectors, &new_db_urls)) { |
| 109 return ModelError( | 118 return ModelError( |
| 110 FROM_HERE, "Could not get the typed_url entries from HistoryBackend."); | 119 FROM_HERE, "Could not get the typed_url entries from HistoryBackend."); |
| 111 } | 120 } |
| 112 | 121 |
| 113 // New sync data organized for different write operations to history backend. | 122 // New sync data organized for different write operations to history backend. |
| 114 history::URLRows new_synced_urls; | 123 URLRows new_synced_urls; |
| 115 history::URLRows updated_synced_urls; | 124 URLRows updated_synced_urls; |
| 116 TypedURLVisitVector new_synced_visits; | 125 TypedURLVisitVector new_synced_visits; |
| 117 | 126 |
| 118 // Iterate through entity_data and check for all the urls that | 127 // Iterate through entity_data and check for all the urls that |
| 119 // sync already knows about. CreateOrUpdateUrl() will remove urls that | 128 // sync already knows about. MergeURLWithSync() will remove urls that |
| 120 // are the same as the synced ones from |new_db_urls|. | 129 // are the same as the synced ones from |new_db_urls|. |
| 121 for (const EntityChange& entity_change : entity_data) { | 130 for (const EntityChange& entity_change : entity_data) { |
| 122 DCHECK(entity_change.data().specifics.has_typed_url()); | 131 DCHECK(entity_change.data().specifics.has_typed_url()); |
| 123 const TypedUrlSpecifics& specifics = | 132 const TypedUrlSpecifics& specifics = |
| 124 entity_change.data().specifics.typed_url(); | 133 entity_change.data().specifics.typed_url(); |
| 125 if (ShouldIgnoreUrl(GURL(specifics.url()))) | 134 if (ShouldIgnoreUrl(GURL(specifics.url()))) |
| 126 continue; | 135 continue; |
| 127 | 136 |
| 128 // Ignore old sync urls that don't have any transition data stored with | 137 // Ignore old sync urls that don't have any transition data stored with |
| 129 // them, or transition data that does not match the visit data (will be | 138 // them, or transition data that does not match the visit data (will be |
| 130 // deleted below). | 139 // deleted below). |
| 131 if (specifics.visit_transitions_size() == 0 || | 140 if (specifics.visit_transitions_size() == 0 || |
| 132 specifics.visit_transitions_size() != specifics.visits_size()) { | 141 specifics.visit_transitions_size() != specifics.visits_size()) { |
| 133 // Generate a debug assertion to help track down http://crbug.com/91473, | 142 // Generate a debug assertion to help track down http://crbug.com/91473, |
| 134 // even though we gracefully handle this case by overwriting this node. | 143 // even though we gracefully handle this case by overwriting this node. |
| 135 DCHECK_EQ(specifics.visits_size(), specifics.visit_transitions_size()); | 144 DCHECK_EQ(specifics.visits_size(), specifics.visit_transitions_size()); |
| 136 DVLOG(1) << "Ignoring obsolete sync url with no visit transition info."; | 145 DVLOG(1) << "Ignoring obsolete sync url with no visit transition info."; |
| 137 | 146 |
| 138 continue; | 147 continue; |
| 139 } | 148 } |
| 140 UpdateUrlFromServer(specifics, &new_db_urls, &local_visit_vectors, | 149 MergeURLWithSync(specifics, &new_db_urls, &local_visit_vectors, |
| 141 &new_synced_urls, &new_synced_visits, | 150 &new_synced_urls, &new_synced_visits, |
| 142 &updated_synced_urls); | 151 &updated_synced_urls); |
| 143 } | 152 } |
| 144 | 153 |
| 145 for (const auto& kv : new_db_urls) { | 154 for (const auto& kv : new_db_urls) { |
| 146 if (!HasTypedUrl(local_visit_vectors[kv.first])) { | 155 SendTypedURLToProcessor(kv.second, local_visit_vectors[kv.first], |
| 147 // This URL has no TYPED visits, don't sync it | 156 metadata_change_list.get()); |
| 148 continue; | |
| 149 } | |
| 150 std::string storage_key = GetStorageKeyFromURLRow(kv.second); | |
| 151 change_processor()->Put( | |
| 152 storage_key, CreateEntityData(kv.second, local_visit_vectors[kv.first]), | |
| 153 metadata_change_list.get()); | |
| 154 } | 157 } |
| 155 | 158 |
| 156 base::Optional<ModelError> error = WriteToHistoryBackend( | 159 base::Optional<ModelError> error = WriteToHistoryBackend( |
| 157 &new_synced_urls, &updated_synced_urls, NULL, &new_synced_visits, NULL); | 160 &new_synced_urls, &updated_synced_urls, NULL, &new_synced_visits, NULL); |
| 158 | 161 |
| 159 if (error) | 162 if (error) |
| 160 return error; | 163 return error; |
| 161 | 164 |
| 162 for (const EntityChange& entity_change : entity_data) { | 165 for (const EntityChange& entity_change : entity_data) { |
| 163 DCHECK(entity_change.data().specifics.has_typed_url()); | 166 DCHECK(entity_change.data().specifics.has_typed_url()); |
| 164 std::string storage_key = | 167 std::string storage_key = |
| 165 GetStorageKeyInternal(entity_change.data().specifics.typed_url().url()); | 168 GetStorageKeyInternal(entity_change.data().specifics.typed_url().url()); |
| 166 if (storage_key.empty()) { | 169 if (storage_key.empty()) { |
| 167 // ignore entity change | 170 // ignore entity change |
| 171 change_processor()->UntrackEntity(entity_change.data()); | |
| 168 } else { | 172 } else { |
| 169 change_processor()->UpdateStorageKey(entity_change.data(), storage_key, | 173 change_processor()->UpdateStorageKey(entity_change.data(), storage_key, |
| 170 metadata_change_list.get()); | 174 metadata_change_list.get()); |
| 171 } | 175 } |
| 172 } | 176 } |
| 173 | 177 |
| 174 UMA_HISTOGRAM_PERCENTAGE("Sync.TypedUrlMergeAndStartSyncingErrors", | 178 UMA_HISTOGRAM_PERCENTAGE("Sync.TypedUrlMergeAndStartSyncingErrors", |
| 175 GetErrorPercentage()); | 179 GetErrorPercentage()); |
| 176 ClearErrorStats(); | 180 ClearErrorStats(); |
| 177 | 181 |
| 178 return static_cast<syncer::SyncMetadataStoreChangeList*>( | 182 return static_cast<syncer::SyncMetadataStoreChangeList*>( |
| 179 metadata_change_list.get()) | 183 metadata_change_list.get()) |
| 180 ->TakeError(); | 184 ->TakeError(); |
| 181 } | 185 } |
| 182 | 186 |
| 183 base::Optional<ModelError> TypedURLSyncBridge::ApplySyncChanges( | 187 base::Optional<ModelError> TypedURLSyncBridge::ApplySyncChanges( |
| 184 std::unique_ptr<MetadataChangeList> metadata_change_list, | 188 std::unique_ptr<MetadataChangeList> metadata_change_list, |
| 185 EntityChangeList entity_changes) { | 189 EntityChangeList entity_changes) { |
| 186 DCHECK(sequence_checker_.CalledOnValidSequence()); | 190 DCHECK(sequence_checker_.CalledOnValidSequence()); |
| 187 NOTIMPLEMENTED(); | 191 DCHECK(sync_metadata_database_); |
| 192 | |
| 193 std::vector<GURL> pending_deleted_urls; | |
| 194 TypedURLVisitVector new_synced_visits; | |
| 195 VisitVector deleted_visits; | |
| 196 URLRows updated_synced_urls; | |
| 197 URLRows new_synced_urls; | |
| 198 | |
| 199 for (const EntityChange& entity_change : entity_changes) { | |
| 200 if (entity_change.type() == EntityChange::ACTION_DELETE) { | |
| 201 URLRow url_row; | |
| 202 int64_t url_id = sync_metadata_database_->StorageKeyToURLID( | |
| 203 entity_change.storage_key()); | |
| 204 if (!history_backend_->GetURLByID(url_id, &url_row)) { | |
| 205 // Ignoring the case that there is no matching URLRow with URLID | |
| 206 // |url_id|. | |
| 207 continue; | |
| 208 } | |
| 209 | |
| 210 pending_deleted_urls.push_back(url_row.url()); | |
| 211 continue; | |
| 212 } | |
| 213 | |
| 214 DCHECK(entity_change.data().specifics.has_typed_url()); | |
| 215 const TypedUrlSpecifics& specifics = | |
| 216 entity_change.data().specifics.typed_url(); | |
| 217 | |
| 218 GURL url(specifics.url()); | |
| 219 | |
| 220 if (ShouldIgnoreUrl(url)) | |
| 221 continue; | |
| 222 | |
| 223 DCHECK(specifics.visits_size()); | |
| 224 sync_pb::TypedUrlSpecifics filtered_url = FilterExpiredVisits(specifics); | |
| 225 if (filtered_url.visits_size() == 0) | |
| 226 continue; | |
| 227 | |
| 228 UpdateFromSync(filtered_url, &new_synced_visits, &deleted_visits, | |
| 229 &updated_synced_urls, &new_synced_urls); | |
| 230 } | |
| 231 | |
| 232 WriteToHistoryBackend(&new_synced_urls, &updated_synced_urls, | |
| 233 &pending_deleted_urls, &new_synced_visits, | |
| 234 &deleted_visits); | |
| 235 | |
| 236 // New entities were either ignored or written to history DB and assigned a | |
| 237 // storage key. Notify processor about updated storage keys. | |
| 238 for (const EntityChange& entity_change : entity_changes) { | |
| 239 if (entity_change.type() == EntityChange::ACTION_ADD) { | |
| 240 std::string storage_key = GetStorageKeyInternal( | |
| 241 entity_change.data().specifics.typed_url().url()); | |
| 242 if (storage_key.empty()) { | |
| 243 // ignore entity change | |
| 244 change_processor()->UntrackEntity(entity_change.data()); | |
| 245 } else { | |
| 246 change_processor()->UpdateStorageKey(entity_change.data(), storage_key, | |
| 247 metadata_change_list.get()); | |
| 248 } | |
| 249 } | |
| 250 } | |
| 251 | |
| 188 return {}; | 252 return {}; |
| 189 } | 253 } |
| 190 | 254 |
| 191 void TypedURLSyncBridge::GetData(StorageKeyList storage_keys, | 255 void TypedURLSyncBridge::GetData(StorageKeyList storage_keys, |
| 192 DataCallback callback) { | 256 DataCallback callback) { |
| 193 DCHECK(sequence_checker_.CalledOnValidSequence()); | 257 DCHECK(sequence_checker_.CalledOnValidSequence()); |
| 194 NOTIMPLEMENTED(); | 258 DCHECK(sync_metadata_database_); |
| 259 | |
| 260 auto batch = base::MakeUnique<MutableDataBatch>(); | |
| 261 for (const std::string& key : storage_keys) { | |
| 262 URLRow url_row; | |
| 263 URLID url_id = sync_metadata_database_->StorageKeyToURLID(key); | |
| 264 | |
| 265 ++num_db_accesses_; | |
| 266 if (!history_backend_->GetURLByID(url_id, &url_row)) { | |
| 267 // Ignoring the case which no matching URLRow with URLID |url_id|. | |
| 268 DLOG(ERROR) << "Could not find URL for id: " << url_id; | |
| 269 continue; | |
| 270 } | |
| 271 | |
| 272 VisitVector visits_vector; | |
| 273 FixupURLAndGetVisits(&url_row, &visits_vector); | |
| 274 batch->Put(key, CreateEntityData(url_row, visits_vector)); | |
|
pavely
2017/07/18 22:35:27
Is it possible for CreateEntityData to return null
Gang Wu
2017/07/19 16:26:34
Batch has already handled empty entitydata, but I
pavely
2017/07/19 16:57:34
The problem is not with the batch, batch is just a
| |
| 275 } | |
| 276 | |
| 277 callback.Run(std::move(batch)); | |
| 195 } | 278 } |
| 196 | 279 |
| 197 void TypedURLSyncBridge::GetAllData(DataCallback callback) { | 280 void TypedURLSyncBridge::GetAllData(DataCallback callback) { |
| 198 DCHECK(sequence_checker_.CalledOnValidSequence()); | 281 DCHECK(sequence_checker_.CalledOnValidSequence()); |
| 199 | 282 |
| 200 history::URLRows typed_urls; | 283 URLRows typed_urls; |
| 201 ++num_db_accesses_; | 284 ++num_db_accesses_; |
| 202 if (!history_backend_->GetAllTypedURLs(&typed_urls)) { | 285 if (!history_backend_->GetAllTypedURLs(&typed_urls)) { |
| 203 ++num_db_errors_; | 286 ++num_db_errors_; |
| 204 change_processor()->ReportError(FROM_HERE, | 287 change_processor()->ReportError(FROM_HERE, |
| 205 "Could not get the typed_url entries."); | 288 "Could not get the typed_url entries."); |
| 206 return; | 289 return; |
| 207 } | 290 } |
| 208 | 291 |
| 209 auto batch = base::MakeUnique<MutableDataBatch>(); | 292 auto batch = base::MakeUnique<MutableDataBatch>(); |
| 210 for (history::URLRow& url : typed_urls) { | 293 for (URLRow& url : typed_urls) { |
| 211 VisitVector visits_vector; | 294 VisitVector visits_vector; |
| 212 FixupURLAndGetVisits(&url, &visits_vector); | 295 FixupURLAndGetVisits(&url, &visits_vector); |
| 213 batch->Put(GetStorageKeyFromURLRow(url), | 296 batch->Put(GetStorageKeyFromURLRow(url), |
| 214 CreateEntityData(url, visits_vector)); | 297 CreateEntityData(url, visits_vector)); |
| 215 } | 298 } |
| 216 callback.Run(std::move(batch)); | 299 callback.Run(std::move(batch)); |
| 217 } | 300 } |
| 218 | 301 |
| 219 // Must be exactly the value of GURL::spec() for backwards comparability with | 302 // Must be exactly the value of GURL::spec() for backwards comparability with |
| 220 // the previous (Directory + SyncableService) iteration of sync integration. | 303 // the previous (Directory + SyncableService) iteration of sync integration. |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 232 // with sync because it has a significantly low memory cost than a URL. | 315 // with sync because it has a significantly low memory cost than a URL. |
| 233 std::string TypedURLSyncBridge::GetStorageKey(const EntityData& entity_data) { | 316 std::string TypedURLSyncBridge::GetStorageKey(const EntityData& entity_data) { |
| 234 NOTREACHED() << "TypedURLSyncBridge do not support GetStorageKey."; | 317 NOTREACHED() << "TypedURLSyncBridge do not support GetStorageKey."; |
| 235 return std::string(); | 318 return std::string(); |
| 236 } | 319 } |
| 237 | 320 |
| 238 bool TypedURLSyncBridge::SupportsGetStorageKey() const { | 321 bool TypedURLSyncBridge::SupportsGetStorageKey() const { |
| 239 return false; | 322 return false; |
| 240 } | 323 } |
| 241 | 324 |
| 242 void TypedURLSyncBridge::OnURLVisited(history::HistoryBackend* history_backend, | 325 void TypedURLSyncBridge::OnURLVisited(HistoryBackend* history_backend, |
| 243 ui::PageTransition transition, | 326 ui::PageTransition transition, |
| 244 const history::URLRow& row, | 327 const URLRow& row, |
| 245 const history::RedirectList& redirects, | 328 const RedirectList& redirects, |
| 246 base::Time visit_time) { | 329 base::Time visit_time) { |
| 247 DCHECK(sequence_checker_.CalledOnValidSequence()); | 330 DCHECK(sequence_checker_.CalledOnValidSequence()); |
| 248 NOTIMPLEMENTED(); | 331 |
| 332 if (!change_processor()->IsTrackingMetadata()) | |
| 333 return; // Sync processor not yet ready, don't sync. | |
| 334 if (!ShouldSyncVisit(row.typed_count(), transition)) | |
| 335 return; | |
| 336 | |
| 337 std::unique_ptr<MetadataChangeList> metadata_change_list = | |
| 338 CreateMetadataChangeList(); | |
| 339 | |
| 340 UpdateSyncFromLocal(row, metadata_change_list.get()); | |
| 249 } | 341 } |
| 250 | 342 |
| 251 void TypedURLSyncBridge::OnURLsModified( | 343 void TypedURLSyncBridge::OnURLsModified(HistoryBackend* history_backend, |
| 252 history::HistoryBackend* history_backend, | 344 const URLRows& changed_urls) { |
| 253 const history::URLRows& changed_urls) { | |
| 254 DCHECK(sequence_checker_.CalledOnValidSequence()); | 345 DCHECK(sequence_checker_.CalledOnValidSequence()); |
| 255 NOTIMPLEMENTED(); | 346 |
| 347 if (!change_processor()->IsTrackingMetadata()) | |
| 348 return; // Sync processor not yet ready, don't sync. | |
| 349 | |
| 350 std::unique_ptr<MetadataChangeList> metadata_change_list = | |
| 351 CreateMetadataChangeList(); | |
| 352 | |
| 353 for (const auto& row : changed_urls) { | |
| 354 // Only care if the modified URL is typed. | |
| 355 if (row.typed_count() >= 0) { | |
| 356 // If there were any errors updating the sync node, just ignore them and | |
| 357 // continue on to process the next URL. | |
| 358 UpdateSyncFromLocal(row, metadata_change_list.get()); | |
| 359 } | |
| 360 } | |
| 256 } | 361 } |
| 257 | 362 |
| 258 void TypedURLSyncBridge::OnURLsDeleted(history::HistoryBackend* history_backend, | 363 void TypedURLSyncBridge::OnURLsDeleted(HistoryBackend* history_backend, |
| 259 bool all_history, | 364 bool all_history, |
| 260 bool expired, | 365 bool expired, |
| 261 const history::URLRows& deleted_rows, | 366 const URLRows& deleted_rows, |
| 262 const std::set<GURL>& favicon_urls) { | 367 const std::set<GURL>& favicon_urls) { |
| 263 DCHECK(sequence_checker_.CalledOnValidSequence()); | 368 DCHECK(sequence_checker_.CalledOnValidSequence()); |
| 264 NOTIMPLEMENTED(); | 369 if (!change_processor()->IsTrackingMetadata()) |
| 370 return; // Sync processor not yet ready, don't sync. | |
| 371 | |
| 372 // Ignore URLs expired due to old age (we don't want to sync them as deletions | |
| 373 // to avoid extra traffic up to the server, and also to make sure that a | |
| 374 // client with a bad clock setting won't go on an expiration rampage and | |
| 375 // delete all history from every client). The server will gracefully age out | |
| 376 // the sync DB entries when they've been idle for long enough. | |
| 377 if (expired) | |
| 378 return; | |
| 379 | |
| 380 std::unique_ptr<MetadataChangeList> metadata_change_list = | |
| 381 CreateMetadataChangeList(); | |
| 382 | |
| 383 if (all_history) { | |
| 384 auto batch = base::MakeUnique<syncer::MetadataBatch>(); | |
| 385 if (!sync_metadata_database_->GetAllSyncMetadata(batch.get())) { | |
| 386 change_processor()->ReportError(FROM_HERE, | |
| 387 "Failed reading typed url metadata from " | |
| 388 "TypedURLSyncMetadataDatabase."); | |
| 389 return; | |
| 390 } | |
| 391 | |
| 392 syncer::EntityMetadataMap metadata_map(batch->TakeAllMetadata()); | |
| 393 for (const auto& kv : metadata_map) { | |
| 394 change_processor()->Delete(kv.first, metadata_change_list.get()); | |
| 395 } | |
| 396 } else { | |
| 397 // Delete rows. | |
| 398 for (const auto& row : deleted_rows) { | |
| 399 std::string storage_key = GetStorageKeyFromURLRow(row); | |
| 400 change_processor()->Delete(storage_key, metadata_change_list.get()); | |
| 401 } | |
| 402 } | |
| 265 } | 403 } |
| 266 | 404 |
| 267 void TypedURLSyncBridge::Init() { | 405 void TypedURLSyncBridge::Init() { |
| 268 DCHECK(sequence_checker_.CalledOnValidSequence()); | 406 DCHECK(sequence_checker_.CalledOnValidSequence()); |
| 269 | 407 |
| 270 history_backend_observer_.Add(history_backend_); | 408 history_backend_observer_.Add(history_backend_); |
| 271 LoadMetadata(); | 409 LoadMetadata(); |
| 272 } | 410 } |
| 273 | 411 |
| 274 int TypedURLSyncBridge::GetErrorPercentage() const { | 412 int TypedURLSyncBridge::GetErrorPercentage() const { |
| 275 return num_db_accesses_ ? (100 * num_db_errors_ / num_db_accesses_) : 0; | 413 return num_db_accesses_ ? (100 * num_db_errors_ / num_db_accesses_) : 0; |
| 276 } | 414 } |
| 277 | 415 |
| 416 // static | |
| 278 bool TypedURLSyncBridge::WriteToTypedUrlSpecifics( | 417 bool TypedURLSyncBridge::WriteToTypedUrlSpecifics( |
| 279 const URLRow& url, | 418 const URLRow& url, |
| 280 const VisitVector& visits, | 419 const VisitVector& visits, |
| 281 TypedUrlSpecifics* typed_url) { | 420 TypedUrlSpecifics* typed_url) { |
| 282 DCHECK(!url.last_visit().is_null()); | 421 DCHECK(!url.last_visit().is_null()); |
| 283 DCHECK(!visits.empty()); | 422 DCHECK(!visits.empty()); |
| 284 DCHECK_EQ(url.last_visit().ToInternalValue(), | 423 DCHECK_EQ(url.last_visit().ToInternalValue(), |
| 285 visits.back().visit_time.ToInternalValue()); | 424 visits.back().visit_time.ToInternalValue()); |
| 286 | 425 |
| 287 typed_url->set_url(url.url().spec()); | 426 typed_url->set_url(url.url().spec()); |
| 288 typed_url->set_title(base::UTF16ToUTF8(url.title())); | 427 typed_url->set_title(base::UTF16ToUTF8(url.title())); |
| 289 typed_url->set_hidden(url.hidden()); | 428 typed_url->set_hidden(url.hidden()); |
| 290 | 429 |
| 291 DCHECK(CheckVisitOrdering(visits)); | 430 DCHECK(CheckVisitOrdering(visits)); |
| 292 | 431 |
| 293 bool only_typed = false; | 432 bool only_typed = false; |
| 294 int skip_count = 0; | 433 int skip_count = 0; |
| 295 | 434 |
| 296 if (std::find_if(visits.begin(), visits.end(), | 435 if (!HasTypedUrl(visits)) { |
| 297 [](const history::VisitRow& visit) { | |
| 298 return ui::PageTransitionCoreTypeIs( | |
| 299 visit.transition, ui::PAGE_TRANSITION_TYPED); | |
| 300 }) == visits.end()) { | |
| 301 // This URL has no TYPED visits, don't sync it | 436 // This URL has no TYPED visits, don't sync it |
| 302 return false; | 437 return false; |
| 303 } | 438 } |
| 304 | 439 |
| 305 if (visits.size() > static_cast<size_t>(kMaxTypedUrlVisits)) { | 440 if (visits.size() > static_cast<size_t>(kMaxTypedUrlVisits)) { |
| 306 int typed_count = 0; | 441 int typed_count = 0; |
| 307 int total = 0; | 442 int total = 0; |
| 308 // Walk the passed-in visit vector and count the # of typed visits. | 443 // Walk the passed-in visit vector and count the # of typed visits. |
| 309 for (VisitRow visit : visits) { | 444 for (VisitRow visit : visits) { |
| 310 // We ignore reload visits. | 445 // We ignore reload visits. |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 361 CHECK_GT(typed_url->visits_size(), 0); | 496 CHECK_GT(typed_url->visits_size(), 0); |
| 362 CHECK_LE(typed_url->visits_size(), kMaxTypedUrlVisits); | 497 CHECK_LE(typed_url->visits_size(), kMaxTypedUrlVisits); |
| 363 CHECK_EQ(typed_url->visits_size(), typed_url->visit_transitions_size()); | 498 CHECK_EQ(typed_url->visits_size(), typed_url->visit_transitions_size()); |
| 364 | 499 |
| 365 return true; | 500 return true; |
| 366 } | 501 } |
| 367 | 502 |
| 368 // static | 503 // static |
| 369 TypedURLSyncBridge::MergeResult TypedURLSyncBridge::MergeUrls( | 504 TypedURLSyncBridge::MergeResult TypedURLSyncBridge::MergeUrls( |
| 370 const TypedUrlSpecifics& sync_url, | 505 const TypedUrlSpecifics& sync_url, |
| 371 const history::URLRow& url, | 506 const URLRow& url, |
| 372 history::VisitVector* visits, | 507 VisitVector* visits, |
| 373 history::URLRow* new_url, | 508 URLRow* new_url, |
| 374 std::vector<history::VisitInfo>* new_visits) { | 509 std::vector<VisitInfo>* new_visits) { |
| 375 DCHECK(new_url); | 510 DCHECK(new_url); |
| 376 DCHECK_EQ(sync_url.url(), url.url().spec()); | 511 DCHECK_EQ(sync_url.url(), url.url().spec()); |
| 377 DCHECK_EQ(sync_url.url(), new_url->url().spec()); | 512 DCHECK_EQ(sync_url.url(), new_url->url().spec()); |
| 378 DCHECK(visits->size()); | 513 DCHECK(visits->size()); |
| 379 DCHECK_GT(sync_url.visits_size(), 0); | 514 DCHECK_GT(sync_url.visits_size(), 0); |
| 380 CHECK_EQ(sync_url.visits_size(), sync_url.visit_transitions_size()); | 515 CHECK_EQ(sync_url.visits_size(), sync_url.visit_transitions_size()); |
| 381 | 516 |
| 382 // Convert these values only once. | 517 // Convert these values only once. |
| 383 base::string16 sync_url_title(base::UTF8ToUTF16(sync_url.title())); | 518 base::string16 sync_url_title(base::UTF8ToUTF16(sync_url.title())); |
| 384 base::Time sync_url_last_visit = base::Time::FromInternalValue( | 519 base::Time sync_url_last_visit = base::Time::FromInternalValue( |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 438 // If the sync_url visit is older than any existing visit in the history | 573 // If the sync_url visit is older than any existing visit in the history |
| 439 // DB, don't re-add it - this keeps us from resurrecting visits that were | 574 // DB, don't re-add it - this keeps us from resurrecting visits that were |
| 440 // aged out locally. | 575 // aged out locally. |
| 441 // | 576 // |
| 442 // TODO(sync): This extra check should be unnecessary now that filtering | 577 // TODO(sync): This extra check should be unnecessary now that filtering |
| 443 // expired visits is performed separately. Non-expired visits older than | 578 // expired visits is performed separately. Non-expired visits older than |
| 444 // the earliest existing history visits should still be synced, so this | 579 // the earliest existing history visits should still be synced, so this |
| 445 // check should be removed. | 580 // check should be removed. |
| 446 if (sync_url_time > earliest_history_time) { | 581 if (sync_url_time > earliest_history_time) { |
| 447 different |= DIFF_LOCAL_VISITS_ADDED; | 582 different |= DIFF_LOCAL_VISITS_ADDED; |
| 448 new_visits->push_back(history::VisitInfo( | 583 new_visits->push_back(VisitInfo( |
| 449 sync_url_time, ui::PageTransitionFromInt(sync_url.visit_transitions( | 584 sync_url_time, ui::PageTransitionFromInt(sync_url.visit_transitions( |
| 450 sync_url_visit_index)))); | 585 sync_url_visit_index)))); |
| 451 } | 586 } |
| 452 // This visit is added to visits below. | 587 // This visit is added to visits below. |
| 453 ++sync_url_visit_index; | 588 ++sync_url_visit_index; |
| 454 } else { | 589 } else { |
| 455 // Same (already synced) entry found in both DBs - no need to do anything. | 590 // Same (already synced) entry found in both DBs - no need to do anything. |
| 456 ++sync_url_visit_index; | 591 ++sync_url_visit_index; |
| 457 ++history_visit_index; | 592 ++history_visit_index; |
| 458 } | 593 } |
| 459 } | 594 } |
| 460 | 595 |
| 461 DCHECK(CheckVisitOrdering(*visits)); | 596 DCHECK(CheckVisitOrdering(*visits)); |
| 462 if (different & DIFF_LOCAL_VISITS_ADDED) { | 597 if (different & DIFF_LOCAL_VISITS_ADDED) { |
| 463 // If the server does not have the same visits as the local db, then the | 598 // If the server does not have the same visits as the local db, then the |
| 464 // new visits from the server need to be added to the vector containing | 599 // new visits from the server need to be added to the vector containing |
| 465 // local visits. These visits will be passed to the server. | 600 // local visits. These visits will be passed to the server. |
| 466 // Insert new visits into the appropriate place in the visits vector. | 601 // Insert new visits into the appropriate place in the visits vector. |
| 467 history::VisitVector::iterator visit_ix = visits->begin(); | 602 VisitVector::iterator visit_ix = visits->begin(); |
| 468 for (std::vector<history::VisitInfo>::iterator new_visit = | 603 for (std::vector<VisitInfo>::iterator new_visit = new_visits->begin(); |
| 469 new_visits->begin(); | |
| 470 new_visit != new_visits->end(); ++new_visit) { | 604 new_visit != new_visits->end(); ++new_visit) { |
| 471 while (visit_ix != visits->end() && | 605 while (visit_ix != visits->end() && |
| 472 new_visit->first > visit_ix->visit_time) { | 606 new_visit->first > visit_ix->visit_time) { |
| 473 ++visit_ix; | 607 ++visit_ix; |
| 474 } | 608 } |
| 475 visit_ix = | 609 visit_ix = visits->insert(visit_ix, VisitRow(url.id(), new_visit->first, |
| 476 visits->insert(visit_ix, history::VisitRow(url.id(), new_visit->first, | 610 0, new_visit->second, 0)); |
| 477 0, new_visit->second, 0)); | |
| 478 ++visit_ix; | 611 ++visit_ix; |
| 479 } | 612 } |
| 480 } | 613 } |
| 481 DCHECK(CheckVisitOrdering(*visits)); | 614 DCHECK(CheckVisitOrdering(*visits)); |
| 482 | 615 |
| 483 new_url->set_last_visit(visits->back().visit_time); | 616 new_url->set_last_visit(visits->back().visit_time); |
| 484 return different; | 617 return different; |
| 485 } | 618 } |
| 486 | 619 |
| 487 // static | 620 // static |
| 621 void TypedURLSyncBridge::DiffVisits( | |
| 622 const VisitVector& history_visits, | |
| 623 const sync_pb::TypedUrlSpecifics& sync_specifics, | |
| 624 std::vector<VisitInfo>* new_visits, | |
| 625 VisitVector* removed_visits) { | |
| 626 DCHECK(new_visits); | |
| 627 size_t old_visit_count = history_visits.size(); | |
| 628 size_t new_visit_count = sync_specifics.visits_size(); | |
| 629 size_t old_index = 0; | |
| 630 size_t new_index = 0; | |
| 631 while (old_index < old_visit_count && new_index < new_visit_count) { | |
| 632 base::Time new_visit_time = | |
| 633 base::Time::FromInternalValue(sync_specifics.visits(new_index)); | |
| 634 if (history_visits[old_index].visit_time < new_visit_time) { | |
| 635 if (new_index > 0 && removed_visits) { | |
| 636 // If there are visits missing from the start of the node, that | |
| 637 // means that they were probably clipped off due to our code that | |
| 638 // limits the size of the sync nodes - don't delete them from our | |
| 639 // local history. | |
| 640 removed_visits->push_back(history_visits[old_index]); | |
| 641 } | |
| 642 ++old_index; | |
| 643 } else if (history_visits[old_index].visit_time > new_visit_time) { | |
| 644 new_visits->push_back(VisitInfo( | |
| 645 new_visit_time, ui::PageTransitionFromInt( | |
| 646 sync_specifics.visit_transitions(new_index)))); | |
| 647 ++new_index; | |
| 648 } else { | |
| 649 ++old_index; | |
| 650 ++new_index; | |
| 651 } | |
| 652 } | |
| 653 | |
| 654 if (removed_visits) { | |
| 655 for (; old_index < old_visit_count; ++old_index) { | |
| 656 removed_visits->push_back(history_visits[old_index]); | |
| 657 } | |
| 658 } | |
| 659 | |
| 660 for (; new_index < new_visit_count; ++new_index) { | |
| 661 new_visits->push_back(VisitInfo( | |
| 662 base::Time::FromInternalValue(sync_specifics.visits(new_index)), | |
| 663 ui::PageTransitionFromInt( | |
| 664 sync_specifics.visit_transitions(new_index)))); | |
| 665 } | |
| 666 } | |
| 667 | |
| 668 // static | |
| 488 void TypedURLSyncBridge::UpdateURLRowFromTypedUrlSpecifics( | 669 void TypedURLSyncBridge::UpdateURLRowFromTypedUrlSpecifics( |
| 489 const TypedUrlSpecifics& typed_url, | 670 const TypedUrlSpecifics& typed_url, |
| 490 history::URLRow* new_url) { | 671 URLRow* new_url) { |
| 491 DCHECK_GT(typed_url.visits_size(), 0); | 672 DCHECK_GT(typed_url.visits_size(), 0); |
| 492 CHECK_EQ(typed_url.visit_transitions_size(), typed_url.visits_size()); | 673 CHECK_EQ(typed_url.visit_transitions_size(), typed_url.visits_size()); |
| 493 if (!new_url->url().is_valid()) { | 674 if (!new_url->url().is_valid()) { |
| 494 new_url->set_url(GURL(typed_url.url())); | 675 new_url->set_url(GURL(typed_url.url())); |
| 495 } | 676 } |
| 496 new_url->set_title(base::UTF8ToUTF16(typed_url.title())); | 677 new_url->set_title(base::UTF8ToUTF16(typed_url.title())); |
| 497 new_url->set_hidden(typed_url.hidden()); | 678 new_url->set_hidden(typed_url.hidden()); |
| 498 // Only provide the initial value for the last_visit field - after that, let | 679 // Only provide the initial value for the last_visit field - after that, let |
| 499 // the history code update the last_visit field on its own. | 680 // the history code update the last_visit field on its own. |
| 500 if (new_url->last_visit().is_null()) { | 681 if (new_url->last_visit().is_null()) { |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 518 return; | 699 return; |
| 519 } | 700 } |
| 520 change_processor()->ModelReadyToSync(std::move(batch)); | 701 change_processor()->ModelReadyToSync(std::move(batch)); |
| 521 } | 702 } |
| 522 | 703 |
| 523 void TypedURLSyncBridge::ClearErrorStats() { | 704 void TypedURLSyncBridge::ClearErrorStats() { |
| 524 num_db_accesses_ = 0; | 705 num_db_accesses_ = 0; |
| 525 num_db_errors_ = 0; | 706 num_db_errors_ = 0; |
| 526 } | 707 } |
| 527 | 708 |
| 528 void TypedURLSyncBridge::UpdateUrlFromServer( | 709 void TypedURLSyncBridge::MergeURLWithSync( |
| 529 const sync_pb::TypedUrlSpecifics& server_typed_url, | 710 const sync_pb::TypedUrlSpecifics& server_typed_url, |
| 530 TypedURLMap* local_typed_urls, | 711 TypedURLMap* local_typed_urls, |
| 531 URLVisitVectorMap* local_visit_vectors, | 712 URLVisitVectorMap* local_visit_vectors, |
| 532 history::URLRows* new_synced_urls, | 713 URLRows* new_synced_urls, |
| 533 TypedURLVisitVector* new_synced_visits, | 714 TypedURLVisitVector* new_synced_visits, |
| 534 history::URLRows* updated_synced_urls) { | 715 URLRows* updated_synced_urls) { |
| 535 DCHECK(server_typed_url.visits_size() != 0); | 716 DCHECK(server_typed_url.visits_size() != 0); |
| 536 DCHECK_EQ(server_typed_url.visits_size(), | 717 DCHECK_EQ(server_typed_url.visits_size(), |
| 537 server_typed_url.visit_transitions_size()); | 718 server_typed_url.visit_transitions_size()); |
| 538 | 719 |
| 539 // Ignore empty urls. | 720 // Ignore empty urls. |
| 540 if (server_typed_url.url().empty()) { | 721 if (server_typed_url.url().empty()) { |
| 541 DVLOG(1) << "Ignoring empty URL in sync DB"; | 722 DVLOG(1) << "Ignoring empty URL in sync DB"; |
| 542 return; | 723 return; |
| 543 } | 724 } |
| 544 // Now, get rid of the expired visits. If there are no un-expired visits | 725 // Now, get rid of the expired visits. If there are no un-expired visits |
| 545 // left, ignore this url - any local data should just replace it. | 726 // left, ignore this url - any local data should just replace it. |
| 546 TypedUrlSpecifics sync_url = FilterExpiredVisits(server_typed_url); | 727 TypedUrlSpecifics sync_url = FilterExpiredVisits(server_typed_url); |
| 547 if (sync_url.visits_size() == 0) { | 728 if (sync_url.visits_size() == 0) { |
| 548 DVLOG(1) << "Ignoring expired URL in sync DB: " << sync_url.url(); | 729 DVLOG(1) << "Ignoring expired URL in sync DB: " << sync_url.url(); |
| 549 return; | 730 return; |
| 550 } | 731 } |
| 551 | 732 |
| 552 // Check if local db already has the url from sync. | 733 // Check if local db already has the url from sync. |
| 553 TypedURLMap::iterator it = local_typed_urls->find(GURL(sync_url.url())); | 734 TypedURLMap::iterator it = local_typed_urls->find(GURL(sync_url.url())); |
| 554 if (it == local_typed_urls->end()) { | 735 if (it == local_typed_urls->end()) { |
| 555 // There are no matching typed urls from the local db, check for untyped | 736 // There are no matching typed urls from the local db, check for untyped |
| 556 history::URLRow untyped_url(GURL(sync_url.url())); | 737 URLRow untyped_url(GURL(sync_url.url())); |
| 557 | 738 |
| 558 // The URL may still exist in the local db if it is an untyped url. | 739 // The URL may still exist in the local db if it is an untyped url. |
| 559 // An untyped url will transition to a typed url after receiving visits | 740 // An untyped url will transition to a typed url after receiving visits |
| 560 // from sync, and sync should receive any visits already existing locally | 741 // from sync, and sync should receive any visits already existing locally |
| 561 // for the url, so the full list of visits is consistent. | 742 // for the url, so the full list of visits is consistent. |
| 562 bool is_existing_url = | 743 bool is_existing_url = |
| 563 history_backend_->GetURL(untyped_url.url(), &untyped_url); | 744 history_backend_->GetURL(untyped_url.url(), &untyped_url); |
| 564 if (is_existing_url) { | 745 if (is_existing_url) { |
| 565 // Add a new entry to |local_typed_urls|, and set the iterator to it. | 746 // Add a new entry to |local_typed_urls|, and set the iterator to it. |
| 566 history::VisitVector untyped_visits; | 747 VisitVector untyped_visits; |
| 567 if (!FixupURLAndGetVisits(&untyped_url, &untyped_visits)) { | 748 if (!FixupURLAndGetVisits(&untyped_url, &untyped_visits)) { |
| 568 return; | 749 return; |
| 569 } | 750 } |
| 570 (*local_visit_vectors)[untyped_url.url()] = untyped_visits; | 751 (*local_visit_vectors)[untyped_url.url()] = untyped_visits; |
| 571 | 752 |
| 572 // Store row info that will be used to update sync's visits. | 753 // Store row info that will be used to update sync's visits. |
| 573 (*local_typed_urls)[untyped_url.url()] = untyped_url; | 754 (*local_typed_urls)[untyped_url.url()] = untyped_url; |
| 574 | 755 |
| 575 // Set iterator |it| to point to this entry. | 756 // Set iterator |it| to point to this entry. |
| 576 it = local_typed_urls->find(untyped_url.url()); | 757 it = local_typed_urls->find(untyped_url.url()); |
| 577 DCHECK(it != local_typed_urls->end()); | 758 DCHECK(it != local_typed_urls->end()); |
| 578 // Continue with merge below. | 759 // Continue with merge below. |
| 579 } else { | 760 } else { |
| 580 // The url is new to the local history DB. | 761 // The url is new to the local history DB. |
| 581 // Create new db entry for url. | 762 // Create new db entry for url. |
| 582 history::URLRow new_url(GURL(sync_url.url())); | 763 URLRow new_url(GURL(sync_url.url())); |
| 583 UpdateURLRowFromTypedUrlSpecifics(sync_url, &new_url); | 764 UpdateURLRowFromTypedUrlSpecifics(sync_url, &new_url); |
| 584 new_synced_urls->push_back(new_url); | 765 new_synced_urls->push_back(new_url); |
| 585 | 766 |
| 586 // Add entries for url visits. | 767 // Add entries for url visits. |
| 587 std::vector<history::VisitInfo> added_visits; | 768 std::vector<VisitInfo> added_visits; |
| 588 size_t visit_count = sync_url.visits_size(); | 769 size_t visit_count = sync_url.visits_size(); |
| 589 | 770 |
| 590 for (size_t index = 0; index < visit_count; ++index) { | 771 for (size_t index = 0; index < visit_count; ++index) { |
| 591 base::Time visit_time = | 772 base::Time visit_time = |
| 592 base::Time::FromInternalValue(sync_url.visits(index)); | 773 base::Time::FromInternalValue(sync_url.visits(index)); |
| 593 ui::PageTransition transition = | 774 ui::PageTransition transition = |
| 594 ui::PageTransitionFromInt(sync_url.visit_transitions(index)); | 775 ui::PageTransitionFromInt(sync_url.visit_transitions(index)); |
| 595 added_visits.push_back(history::VisitInfo(visit_time, transition)); | 776 added_visits.push_back(VisitInfo(visit_time, transition)); |
| 596 } | 777 } |
| 597 new_synced_visits->push_back( | 778 new_synced_visits->push_back( |
| 598 std::pair<GURL, std::vector<history::VisitInfo>>(new_url.url(), | 779 std::pair<GURL, std::vector<VisitInfo>>(new_url.url(), added_visits)); |
| 599 added_visits)); | |
| 600 return; | 780 return; |
| 601 } | 781 } |
| 602 } | 782 } |
| 603 | 783 |
| 604 // Same URL exists in sync data and in history data - compare the | 784 // Same URL exists in sync data and in history data - compare the |
| 605 // entries to see if there's any difference. | 785 // entries to see if there's any difference. |
| 606 history::VisitVector& visits = (*local_visit_vectors)[it->first]; | 786 VisitVector& visits = (*local_visit_vectors)[it->first]; |
| 607 std::vector<history::VisitInfo> added_visits; | 787 std::vector<VisitInfo> added_visits; |
| 608 | 788 |
| 609 // Empty URLs should be filtered out by ShouldIgnoreUrl() previously. | 789 // Empty URLs should be filtered out by ShouldIgnoreUrl() previously. |
| 610 DCHECK(!it->second.url().spec().empty()); | 790 DCHECK(!it->second.url().spec().empty()); |
| 611 | 791 |
| 612 // Initialize fields in |new_url| to the same values as the fields in | 792 // Initialize fields in |new_url| to the same values as the fields in |
| 613 // the existing URLRow in the history DB. This is needed because we | 793 // the existing URLRow in the history DB. This is needed because we |
| 614 // overwrite the existing value in WriteToHistoryBackend(), but some of | 794 // overwrite the existing value in WriteToHistoryBackend(), but some of |
| 615 // the values in that structure are not synced (like typed_count). | 795 // the values in that structure are not synced (like typed_count). |
| 616 history::URLRow new_url(it->second); | 796 URLRow new_url(it->second); |
| 617 | 797 |
| 618 MergeResult difference = | 798 MergeResult difference = |
| 619 MergeUrls(sync_url, it->second, &visits, &new_url, &added_visits); | 799 MergeUrls(sync_url, it->second, &visits, &new_url, &added_visits); |
| 620 | 800 |
| 621 if (difference != DIFF_NONE) { | 801 if (difference != DIFF_NONE) { |
| 622 it->second = new_url; | 802 it->second = new_url; |
| 623 if (difference & DIFF_UPDATE_NODE) { | 803 if (difference & DIFF_UPDATE_NODE) { |
| 624 // We don't want to resurrect old visits that have been aged out by | 804 // We don't want to resurrect old visits that have been aged out by |
| 625 // other clients, so remove all visits that are older than the | 805 // other clients, so remove all visits that are older than the |
| 626 // earliest existing visit in the sync node. | 806 // earliest existing visit in the sync node. |
| 627 // | 807 // |
| 628 // TODO(sync): This logic should be unnecessary now that filtering of | 808 // TODO(sync): This logic should be unnecessary now that filtering of |
| 629 // expired visits is performed separately. Non-expired visits older than | 809 // expired visits is performed separately. Non-expired visits older than |
| 630 // the earliest existing sync visits should still be synced, so this | 810 // the earliest existing sync visits should still be synced, so this |
| 631 // logic should be removed. | 811 // logic should be removed. |
| 632 if (sync_url.visits_size() > 0) { | 812 if (sync_url.visits_size() > 0) { |
| 633 base::Time earliest_visit = | 813 base::Time earliest_visit = |
| 634 base::Time::FromInternalValue(sync_url.visits(0)); | 814 base::Time::FromInternalValue(sync_url.visits(0)); |
| 635 for (history::VisitVector::iterator i = visits.begin(); | 815 for (VisitVector::iterator i = visits.begin(); |
| 636 i != visits.end() && i->visit_time < earliest_visit;) { | 816 i != visits.end() && i->visit_time < earliest_visit;) { |
| 637 i = visits.erase(i); | 817 i = visits.erase(i); |
| 638 } | 818 } |
| 639 // Should never be possible to delete all the items, since the | 819 // Should never be possible to delete all the items, since the |
| 640 // visit vector contains newer local visits it will keep and/or the | 820 // visit vector contains newer local visits it will keep and/or the |
| 641 // visits in typed_url.visits newer than older local visits. | 821 // visits in typed_url.visits newer than older local visits. |
| 642 DCHECK(visits.size() > 0); | 822 DCHECK(visits.size() > 0); |
| 643 } | 823 } |
| 644 DCHECK_EQ(new_url.last_visit().ToInternalValue(), | 824 DCHECK_EQ(new_url.last_visit().ToInternalValue(), |
| 645 visits.back().visit_time.ToInternalValue()); | 825 visits.back().visit_time.ToInternalValue()); |
| 646 } | 826 } |
| 647 if (difference & DIFF_LOCAL_ROW_CHANGED) { | 827 if (difference & DIFF_LOCAL_ROW_CHANGED) { |
| 648 // Add entry to updated_synced_urls to update the local db. | 828 // Add entry to updated_synced_urls to update the local db. |
| 649 DCHECK_EQ(it->second.id(), new_url.id()); | 829 DCHECK_EQ(it->second.id(), new_url.id()); |
| 650 updated_synced_urls->push_back(new_url); | 830 updated_synced_urls->push_back(new_url); |
| 651 } | 831 } |
| 652 if (difference & DIFF_LOCAL_VISITS_ADDED) { | 832 if (difference & DIFF_LOCAL_VISITS_ADDED) { |
| 653 // Add entry with new visits to new_synced_visits to update the local db. | 833 // Add entry with new visits to new_synced_visits to update the local db. |
| 654 new_synced_visits->push_back( | 834 new_synced_visits->push_back( |
| 655 std::pair<GURL, std::vector<history::VisitInfo>>(it->first, | 835 std::pair<GURL, std::vector<VisitInfo>>(it->first, added_visits)); |
| 656 added_visits)); | |
| 657 } | 836 } |
| 658 } else { | 837 } else { |
| 659 // No difference in urls, erase from map | 838 // No difference in urls, erase from map |
| 660 local_typed_urls->erase(it); | 839 local_typed_urls->erase(it); |
| 661 } | 840 } |
| 662 } | 841 } |
| 663 | 842 |
| 843 void TypedURLSyncBridge::UpdateFromSync( | |
| 844 const sync_pb::TypedUrlSpecifics& typed_url, | |
| 845 TypedURLVisitVector* visits_to_add, | |
| 846 VisitVector* visits_to_remove, | |
| 847 URLRows* updated_urls, | |
| 848 URLRows* new_urls) { | |
| 849 URLRow new_url(GURL(typed_url.url())); | |
| 850 VisitVector existing_visits; | |
| 851 bool existing_url = history_backend_->GetURL(new_url.url(), &new_url); | |
| 852 if (existing_url) { | |
| 853 // This URL already exists locally - fetch the visits so we can | |
| 854 // merge them below. | |
| 855 if (!FixupURLAndGetVisits(&new_url, &existing_visits)) { | |
| 856 return; | |
| 857 } | |
| 858 } | |
| 859 visits_to_add->push_back(std::pair<GURL, std::vector<VisitInfo>>( | |
| 860 new_url.url(), std::vector<VisitInfo>())); | |
| 861 | |
| 862 // Update the URL with information from the typed URL. | |
| 863 UpdateURLRowFromTypedUrlSpecifics(typed_url, &new_url); | |
| 864 | |
| 865 // Figure out which visits we need to add. | |
| 866 DiffVisits(existing_visits, typed_url, &visits_to_add->back().second, | |
| 867 visits_to_remove); | |
| 868 | |
| 869 if (existing_url) { | |
| 870 updated_urls->push_back(new_url); | |
| 871 } else { | |
| 872 new_urls->push_back(new_url); | |
| 873 } | |
| 874 } | |
| 875 | |
| 876 void TypedURLSyncBridge::UpdateSyncFromLocal( | |
| 877 URLRow row, | |
| 878 MetadataChangeList* metadata_change_list) { | |
| 879 DCHECK_GE(row.typed_count(), 0); | |
| 880 | |
| 881 if (ShouldIgnoreUrl(row.url())) | |
| 882 return; | |
| 883 | |
| 884 // Get the visits for this node. | |
| 885 VisitVector visit_vector; | |
| 886 if (!FixupURLAndGetVisits(&row, &visit_vector)) { | |
| 887 return; | |
| 888 } | |
| 889 | |
| 890 SendTypedURLToProcessor(row, visit_vector, metadata_change_list); | |
| 891 | |
| 892 return; | |
| 893 } | |
| 894 | |
| 664 base::Optional<ModelError> TypedURLSyncBridge::WriteToHistoryBackend( | 895 base::Optional<ModelError> TypedURLSyncBridge::WriteToHistoryBackend( |
| 665 const history::URLRows* new_urls, | 896 const URLRows* new_urls, |
| 666 const history::URLRows* updated_urls, | 897 const URLRows* updated_urls, |
| 667 const std::vector<GURL>* deleted_urls, | 898 const std::vector<GURL>* deleted_urls, |
| 668 const TypedURLVisitVector* new_visits, | 899 const TypedURLVisitVector* new_visits, |
| 669 const history::VisitVector* deleted_visits) { | 900 const VisitVector* deleted_visits) { |
| 670 if (deleted_urls && !deleted_urls->empty()) | 901 if (deleted_urls && !deleted_urls->empty()) |
| 671 history_backend_->DeleteURLs(*deleted_urls); | 902 history_backend_->DeleteURLs(*deleted_urls); |
| 672 | 903 |
| 673 if (new_urls) { | 904 if (new_urls) { |
| 674 history_backend_->AddPagesWithDetails(*new_urls, history::SOURCE_SYNCED); | 905 history_backend_->AddPagesWithDetails(*new_urls, SOURCE_SYNCED); |
| 675 } | 906 } |
| 676 | 907 |
| 677 if (updated_urls) { | 908 if (updated_urls) { |
| 678 ++num_db_accesses_; | 909 ++num_db_accesses_; |
| 679 // This is an existing entry in the URL database. We don't verify the | 910 // This is an existing entry in the URL database. We don't verify the |
| 680 // visit_count or typed_count values here, because either one (or both) | 911 // visit_count or typed_count values here, because either one (or both) |
| 681 // could be zero in the case of bookmarks, or in the case of a URL | 912 // could be zero in the case of bookmarks, or in the case of a URL |
| 682 // transitioning from non-typed to typed as a result of this sync. | 913 // transitioning from non-typed to typed as a result of this sync. |
| 683 // In the field we sometimes run into errors on specific URLs. It's OK | 914 // In the field we sometimes run into errors on specific URLs. It's OK |
| 684 // to just continue on (we can try writing again on the next model | 915 // to just continue on (we can try writing again on the next model |
| 685 // association). | 916 // association). |
| 686 size_t num_successful_updates = history_backend_->UpdateURLs(*updated_urls); | 917 size_t num_successful_updates = history_backend_->UpdateURLs(*updated_urls); |
| 687 num_db_errors_ += updated_urls->size() - num_successful_updates; | 918 num_db_errors_ += updated_urls->size() - num_successful_updates; |
| 688 } | 919 } |
| 689 | 920 |
| 690 if (new_visits) { | 921 if (new_visits) { |
| 691 for (const auto& visits : *new_visits) { | 922 for (const auto& visits : *new_visits) { |
| 692 // If there are no visits to add, just skip this. | 923 // If there are no visits to add, just skip this. |
| 693 if (visits.second.empty()) | 924 if (visits.second.empty()) |
| 694 continue; | 925 continue; |
| 695 ++num_db_accesses_; | 926 ++num_db_accesses_; |
| 696 if (!history_backend_->AddVisits(visits.first, visits.second, | 927 if (!history_backend_->AddVisits(visits.first, visits.second, |
| 697 history::SOURCE_SYNCED)) { | 928 SOURCE_SYNCED)) { |
| 698 ++num_db_errors_; | 929 ++num_db_errors_; |
| 699 return ModelError(FROM_HERE, "Could not add visits to HistoryBackend."); | 930 return ModelError(FROM_HERE, "Could not add visits to HistoryBackend."); |
| 700 } | 931 } |
| 701 } | 932 } |
| 702 } | 933 } |
| 703 | 934 |
| 704 if (deleted_visits) { | 935 if (deleted_visits) { |
| 705 ++num_db_accesses_; | 936 ++num_db_accesses_; |
| 706 if (!history_backend_->RemoveVisits(*deleted_visits)) { | 937 if (!history_backend_->RemoveVisits(*deleted_visits)) { |
| 707 ++num_db_errors_; | 938 ++num_db_errors_; |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 741 return true; | 972 return true; |
| 742 | 973 |
| 743 // Ignore local file URLs. | 974 // Ignore local file URLs. |
| 744 if (url.SchemeIsFile()) | 975 if (url.SchemeIsFile()) |
| 745 return true; | 976 return true; |
| 746 | 977 |
| 747 // Ignore localhost URLs. | 978 // Ignore localhost URLs. |
| 748 if (net::IsLocalhost(url.host_piece())) | 979 if (net::IsLocalhost(url.host_piece())) |
| 749 return true; | 980 return true; |
| 750 | 981 |
| 751 // Ignore username and password, sonce history backend will remove user name | 982 // Ignore username and password, since history backend will remove user name |
| 752 // and password in URLDatabase::GURLToDatabaseURL and send username/password | 983 // and password in URLDatabase::GURLToDatabaseURL and send username/password |
| 753 // removed url to sync later. | 984 // removed url to sync later. |
| 754 if (url.has_username() || url.has_password()) | 985 if (url.has_username() || url.has_password()) |
| 755 return true; | 986 return true; |
| 756 | 987 |
| 757 return false; | 988 return false; |
| 758 } | 989 } |
| 759 | 990 |
| 760 bool TypedURLSyncBridge::ShouldIgnoreVisits( | 991 bool TypedURLSyncBridge::ShouldIgnoreVisits(const VisitVector& visits) { |
| 761 const history::VisitVector& visits) { | |
| 762 // We ignore URLs that were imported, but have never been visited by | 992 // We ignore URLs that were imported, but have never been visited by |
| 763 // chromium. | 993 // chromium. |
| 764 static const int kFirstImportedSource = history::SOURCE_FIREFOX_IMPORTED; | 994 static const int kFirstImportedSource = SOURCE_FIREFOX_IMPORTED; |
| 765 history::VisitSourceMap map; | 995 VisitSourceMap map; |
| 766 if (!history_backend_->GetVisitsSource(visits, &map)) | 996 if (!history_backend_->GetVisitsSource(visits, &map)) |
| 767 return false; // If we can't read the visit, assume it's not imported. | 997 return false; // If we can't read the visit, assume it's not imported. |
| 768 | 998 |
| 769 // Walk the list of visits and look for a non-imported item. | 999 // Walk the list of visits and look for a non-imported item. |
| 770 for (const auto& visit : visits) { | 1000 for (const auto& visit : visits) { |
| 771 if (map.count(visit.visit_id) == 0 || | 1001 if (map.count(visit.visit_id) == 0 || |
| 772 map[visit.visit_id] < kFirstImportedSource) { | 1002 map[visit.visit_id] < kFirstImportedSource) { |
| 773 return false; | 1003 return false; |
| 774 } | 1004 } |
| 775 } | 1005 } |
| 776 // We only saw imported visits, so tell the caller to ignore them. | 1006 // We only saw imported visits, so tell the caller to ignore them. |
| 777 return true; | 1007 return true; |
| 778 } | 1008 } |
| 779 | 1009 |
| 1010 bool TypedURLSyncBridge::ShouldSyncVisit(int typed_count, | |
| 1011 ui::PageTransition transition) { | |
| 1012 // Just use an ad-hoc criteria to determine whether to ignore this | |
| 1013 // notification. For most users, the distribution of visits is roughly a bell | |
| 1014 // curve with a long tail - there are lots of URLs with < 5 visits so we want | |
| 1015 // to make sure we sync up every visit to ensure the proper ordering of | |
| 1016 // suggestions. But there are relatively few URLs with > 10 visits, and those | |
| 1017 // tend to be more broadly distributed such that there's no need to sync up | |
| 1018 // every visit to preserve their relative ordering. | |
| 1019 return (ui::PageTransitionCoreTypeIs(transition, ui::PAGE_TRANSITION_TYPED) && | |
| 1020 typed_count >= 0 && | |
| 1021 (typed_count < kTypedUrlVisitThrottleThreshold || | |
| 1022 (typed_count % kTypedUrlVisitThrottleMultiple) == 0)); | |
| 1023 } | |
| 1024 | |
| 780 bool TypedURLSyncBridge::FixupURLAndGetVisits(URLRow* url, | 1025 bool TypedURLSyncBridge::FixupURLAndGetVisits(URLRow* url, |
| 781 VisitVector* visits) { | 1026 VisitVector* visits) { |
| 782 ++num_db_accesses_; | 1027 ++num_db_accesses_; |
| 783 if (!history_backend_->GetMostRecentVisitsForURL(url->id(), kMaxVisitsToFetch, | 1028 if (!history_backend_->GetMostRecentVisitsForURL(url->id(), kMaxVisitsToFetch, |
| 784 visits)) { | 1029 visits)) { |
| 785 ++num_db_errors_; | 1030 ++num_db_errors_; |
| 786 // Couldn't load the visits for this URL due to some kind of DB error. | 1031 // Couldn't load the visits for this URL due to some kind of DB error. |
| 787 // Don't bother writing this URL to the history DB (if we ignore the | 1032 // Don't bother writing this URL to the history DB (if we ignore the |
| 788 // error and continue, we might end up duplicating existing visits). | 1033 // error and continue, we might end up duplicating existing visits). |
| 789 DLOG(ERROR) << "Could not load visits for url: " << url->url(); | 1034 DLOG(ERROR) << "Could not load visits for url: " << url->url(); |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 845 } | 1090 } |
| 846 | 1091 |
| 847 std::unique_ptr<EntityData> TypedURLSyncBridge::CreateEntityData( | 1092 std::unique_ptr<EntityData> TypedURLSyncBridge::CreateEntityData( |
| 848 const URLRow& row, | 1093 const URLRow& row, |
| 849 const VisitVector& visits) { | 1094 const VisitVector& visits) { |
| 850 auto entity_data = base::MakeUnique<EntityData>(); | 1095 auto entity_data = base::MakeUnique<EntityData>(); |
| 851 TypedUrlSpecifics* specifics = entity_data->specifics.mutable_typed_url(); | 1096 TypedUrlSpecifics* specifics = entity_data->specifics.mutable_typed_url(); |
| 852 | 1097 |
| 853 if (!WriteToTypedUrlSpecifics(row, visits, specifics)) { | 1098 if (!WriteToTypedUrlSpecifics(row, visits, specifics)) { |
| 854 // Cannot write to specifics, ex. no TYPED visits. | 1099 // Cannot write to specifics, ex. no TYPED visits. |
| 855 return base::MakeUnique<EntityData>(); | 1100 return nullptr; |
| 856 } | 1101 } |
| 857 entity_data->non_unique_name = row.url().spec(); | 1102 entity_data->non_unique_name = row.url().spec(); |
| 858 | |
| 859 return entity_data; | 1103 return entity_data; |
| 860 } | 1104 } |
| 861 | 1105 |
| 862 bool TypedURLSyncBridge::GetValidURLsAndVisits(URLVisitVectorMap* url_to_visit, | 1106 bool TypedURLSyncBridge::GetValidURLsAndVisits(URLVisitVectorMap* url_to_visit, |
| 863 TypedURLMap* url_to_urlrow) { | 1107 TypedURLMap* url_to_urlrow) { |
| 864 DCHECK(url_to_visit); | 1108 DCHECK(url_to_visit); |
| 865 DCHECK(url_to_urlrow); | 1109 DCHECK(url_to_urlrow); |
| 866 | 1110 |
| 867 history::URLRows local_typed_urls; | 1111 URLRows local_typed_urls; |
| 868 ++num_db_accesses_; | 1112 ++num_db_accesses_; |
| 869 if (!history_backend_->GetAllTypedURLs(&local_typed_urls)) { | 1113 if (!history_backend_->GetAllTypedURLs(&local_typed_urls)) { |
| 870 ++num_db_errors_; | 1114 ++num_db_errors_; |
| 871 return false; | 1115 return false; |
| 872 } | 1116 } |
| 873 for (history::URLRow& url : local_typed_urls) { | 1117 for (URLRow& url : local_typed_urls) { |
| 874 DCHECK_EQ(0U, url_to_visit->count(url.url())); | 1118 DCHECK_EQ(0U, url_to_visit->count(url.url())); |
| 875 if (!FixupURLAndGetVisits(&url, &((*url_to_visit)[url.url()])) || | 1119 if (!FixupURLAndGetVisits(&url, &((*url_to_visit)[url.url()])) || |
| 876 ShouldIgnoreUrl(url.url()) || | 1120 ShouldIgnoreUrl(url.url()) || |
| 877 ShouldIgnoreVisits((*url_to_visit)[url.url()])) { | 1121 ShouldIgnoreVisits((*url_to_visit)[url.url()])) { |
| 878 // Ignore this URL if we couldn't load the visits or if there's some | 1122 // Ignore this URL if we couldn't load the visits or if there's some |
| 879 // other problem with it (it was empty, or imported and never visited). | 1123 // other problem with it (it was empty, or imported and never visited). |
| 880 } else { | 1124 } else { |
| 881 // Add url to url_to_urlrow. | 1125 // Add url to url_to_urlrow. |
| 882 (*url_to_urlrow)[url.url()] = url; | 1126 (*url_to_urlrow)[url.url()] = url; |
| 883 } | 1127 } |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 894 | 1138 |
| 895 if (!is_existing_url) { | 1139 if (!is_existing_url) { |
| 896 // The typed url did not save to local history database, so return empty | 1140 // The typed url did not save to local history database, so return empty |
| 897 // string. | 1141 // string. |
| 898 return std::string(); | 1142 return std::string(); |
| 899 } | 1143 } |
| 900 | 1144 |
| 901 return GetStorageKeyFromURLRow(existing_url); | 1145 return GetStorageKeyFromURLRow(existing_url); |
| 902 } | 1146 } |
| 903 | 1147 |
| 1148 void TypedURLSyncBridge::SendTypedURLToProcessor( | |
| 1149 const URLRow& row, | |
| 1150 const VisitVector& visits, | |
| 1151 MetadataChangeList* metadata_change_list) { | |
| 1152 DCHECK(!visits.empty()); | |
| 1153 DCHECK(metadata_change_list); | |
| 1154 | |
| 1155 std::unique_ptr<syncer::EntityData> entity_data = | |
| 1156 CreateEntityData(row, visits); | |
| 1157 if (!entity_data.get()) { | |
| 1158 // Cannot create EntityData, ex. no TYPED visits. | |
| 1159 return; | |
| 1160 } | |
| 1161 | |
| 1162 std::string storage_key = GetStorageKeyFromURLRow(row); | |
| 1163 change_processor()->Put(storage_key, std::move(entity_data), | |
| 1164 metadata_change_list); | |
| 1165 } | |
| 1166 | |
| 904 } // namespace history | 1167 } // namespace history |
| OLD | NEW |