Chromium Code Reviews| Index: components/omnibox/browser/url_index_private_data.cc |
| diff --git a/components/omnibox/browser/url_index_private_data.cc b/components/omnibox/browser/url_index_private_data.cc |
| index 8088389cd365abc0af51a37e3658a86b14058399..e9b0b7e9930bb3bb8b6a5551853ea45066d5269d 100644 |
| --- a/components/omnibox/browser/url_index_private_data.cc |
| +++ b/components/omnibox/browser/url_index_private_data.cc |
| @@ -37,6 +37,8 @@ |
| #include "third_party/protobuf/src/google/protobuf/repeated_field.h" |
| #endif |
| +namespace { |
| + |
| using google::protobuf::RepeatedField; |
| using google::protobuf::RepeatedPtrField; |
| using in_memory_url_index::InMemoryURLIndexCacheItem; |
| @@ -77,6 +79,29 @@ bool LengthGreater(const base::string16& string_a, |
| return string_a.length() > string_b.length(); |
| } |
| +template <typename ResultingSet, typename I, typename Transformation> |
|
Peter Kasting
2017/02/18 01:46:31
Nit: I -> Iter?
dyaroshev
2017/02/18 11:48:13
Done.
|
| +// Requires ResultingSet is a Container, I is an InputIterator, Transformation |
|
Peter Kasting
2017/02/18 01:46:31
Nit: template<...> is part of the function declara
dyaroshev
2017/02/18 11:48:14
This comment describes concepts. Used concepts her
Peter Kasting
2017/02/20 10:02:54
You can leave them if you want, but don't insert t
|
| +// is an UnaryFunction on ValueType<I> that returns a Container. |
| +ResultingSet IntersectSets(I first, I last, Transformation get_set) { |
|
Peter Kasting
2017/02/18 01:46:31
Nit: GetTransformIntersection()? IntersectConstru
dyaroshev
2017/02/18 11:48:13
Done.
|
| + if (first == last) |
| + return ResultingSet(); |
| + |
| + auto tmp_set = get_set(*first); |
| + ResultingSet res(tmp_set.begin(), tmp_set.end()); |
|
dyaroshev
2017/02/18 01:22:14
If we keep this, I should write smth like Contain
dyaroshev
2017/02/18 11:48:14
Done.
@pkasting: there are number of places here w
Peter Kasting
2017/02/20 10:02:54
I find things more readable without ContainerCast.
|
| + |
| + // Like std::accumulate but short circuits if set is empty. |
| + for (++first; first != last && !res.empty(); ++first) { |
| + auto tmp_set = get_set(*first); |
| + if (tmp_set.empty()) |
| + break; |
|
Peter Kasting
2017/02/18 01:46:31
Shouldn't this return the empty set instead of bre
dyaroshev
2017/02/18 11:48:14
Done.
One of the advantages of keeping algorithms
Peter Kasting
2017/02/20 10:02:54
If there's really a need to test them, they can be
|
| + |
| + res = base::STLSetIntersection<ResultingSet>(res, tmp_set); |
| + } |
| + |
| + return res; |
| +} |
| + |
| +} // namespace |
| // UpdateRecentVisitsFromHistoryDBTask ----------------------------------------- |
| @@ -194,41 +219,24 @@ ScoredHistoryMatches URLIndexPrivateData::HistoryItemsForTerms( |
| net::UnescapeRule::URL_SPECIAL_CHARS_EXCEPT_PATH_SEPARATORS); |
| // Extract individual 'words' (as opposed to 'terms'; see comment in |
| - // HistoryIdSetToScoredMatches()) from the search string. When the user |
| + // HistoryIdsToScoredMatches()) from the search string. When the user |
| // types "colspec=ID%20Mstone Release" we get four 'words': "colspec", "id", |
| // "mstone" and "release". |
| String16Vector lower_words( |
| String16VectorFromString16(lower_unescaped_string, false, nullptr)); |
| if (lower_words.empty()) |
| continue; |
| - HistoryIDSet history_id_set = HistoryIDSetFromWords(lower_words); |
| - pre_filter_item_count_ += history_id_set.size(); |
| - // Trim the candidate pool if it is large. Note that we do not filter out |
| - // items that do not contain the search terms as proper substrings -- |
| - // doing so is the performance-costly operation we are trying to avoid in |
| - // order to maintain omnibox responsiveness. |
| - const size_t kItemsToScoreLimit = 500; |
| - if (history_id_set.size() > kItemsToScoreLimit) { |
| - HistoryIDVector history_ids; |
| - std::copy(history_id_set.begin(), history_id_set.end(), |
| - std::back_inserter(history_ids)); |
| - // Trim down the set by sorting by typed-count, visit-count, and last |
| - // visit. |
| - HistoryItemFactorGreater item_factor_functor(history_info_map_); |
| - std::partial_sort(history_ids.begin(), |
| - history_ids.begin() + kItemsToScoreLimit, |
| - history_ids.end(), item_factor_functor); |
| - history_id_set.clear(); |
| - std::copy(history_ids.begin(), history_ids.begin() + kItemsToScoreLimit, |
| - std::inserter(history_id_set, history_id_set.end())); |
| - post_filter_item_count_ += history_id_set.size(); |
| - } else { |
| - post_filter_item_count_ += pre_filter_item_count_; |
| - } |
| + |
| + HistoryIDVector history_ids = HistoryIDsFromWords(lower_words); |
| + |
| + pre_filter_item_count_ += history_ids.size(); |
| + TrimHistoryIdsPool(&history_ids); |
| + post_filter_item_count_ += history_ids.size(); |
|
Peter Kasting
2017/02/18 01:46:31
This line seems like a functional change (it was a
dyaroshev
2017/02/18 11:48:14
Moved this logic into TrimHistoryIdsPool and rever
Peter Kasting
2017/02/20 10:02:54
Ugh, don't to that. Now the code is as wrong as b
dyaroshev
2017/02/20 22:21:31
Other than creating a separate CL with this fix se
dyaroshev
2017/02/22 22:57:25
New CL: https://codereview.chromium.org/2702413007
|
| + |
| ScoredHistoryMatches temp_scored_items; |
| - HistoryIdSetToScoredMatches(history_id_set, lower_raw_string, |
| - template_url_service, bookmark_model, |
| - &temp_scored_items); |
| + HistoryIdsToScoredMatches(std::move(history_ids), lower_raw_string, |
| + template_url_service, bookmark_model, |
| + &temp_scored_items); |
| scored_items.insert(scored_items.end(), temp_scored_items.begin(), |
| temp_scored_items.end()); |
| } |
| @@ -484,7 +492,7 @@ bool URLIndexPrivateData::Empty() const { |
| void URLIndexPrivateData::Clear() { |
| last_time_rebuilt_from_history_ = base::Time(); |
| word_list_.clear(); |
| - available_words_.clear(); |
| + available_words_ = {}; |
| word_map_.clear(); |
| char_word_map_.clear(); |
| word_id_history_map_.clear(); |
| @@ -495,37 +503,43 @@ void URLIndexPrivateData::Clear() { |
| URLIndexPrivateData::~URLIndexPrivateData() {} |
| -HistoryIDSet URLIndexPrivateData::HistoryIDSetFromWords( |
| +HistoryIDVector URLIndexPrivateData::HistoryIDsFromWords( |
| const String16Vector& unsorted_words) { |
| + // The name of the histogram wasn't changed to HistoryIdsFromWords to preserve |
| + // old data valid. |
|
Peter Kasting
2017/02/18 01:46:31
Should we worry about this? Maybe it's OK to just
dyaroshev
2017/02/18 11:48:14
Well, it's for you mostly - I don't have access to
Mark P
2017/02/24 00:34:16
Might as well keep the name of the histogram to ke
|
| SCOPED_UMA_HISTOGRAM_TIMER("Omnibox.HistoryQuickHistoryIDSetFromWords"); |
| // Break the terms down into individual terms (words), get the candidate |
| // set for each term, and intersect each to get a final candidate list. |
| // Note that a single 'term' from the user's perspective might be |
| // a string like "http://www.somewebsite.com" which, from our perspective, |
| // is four words: 'http', 'www', 'somewebsite', and 'com'. |
| - HistoryIDSet history_id_set; |
| + HistoryIDVector history_ids; |
|
dyaroshev
2017/02/18 01:22:14
Unused variable.
dyaroshev
2017/02/18 11:48:14
Done.
|
| String16Vector words(unsorted_words); |
| // Sort the words into the longest first as such are likely to narrow down |
| // the results quicker. Also, single character words are the most expensive |
| // to process so save them for last. |
| std::sort(words.begin(), words.end(), LengthGreater); |
| - for (String16Vector::iterator iter = words.begin(); iter != words.end(); |
| - ++iter) { |
| - base::string16 uni_word = *iter; |
| - HistoryIDSet term_history_set = HistoryIDsForTerm(uni_word); |
| - if (term_history_set.empty()) { |
| - history_id_set.clear(); |
| - break; |
| - } |
| - if (iter == words.begin()) { |
| - history_id_set.swap(term_history_set); |
| - } else { |
| - HistoryIDSet new_history_id_set = base::STLSetIntersection<HistoryIDSet>( |
| - history_id_set, term_history_set); |
| - history_id_set.swap(new_history_id_set); |
| - } |
| - } |
| - return history_id_set; |
| + |
| + return IntersectSets<HistoryIDVector>( |
| + words.begin(), words.end(), |
| + [this](const base::string16& word) { return HistoryIDsForTerm(word); }); |
| +} |
| + |
| +void URLIndexPrivateData::TrimHistoryIdsPool( |
| + HistoryIDVector* history_ids) const { |
| + constexpr size_t kItemsToScoreLimit = 500; |
| + |
|
Peter Kasting
2017/02/18 01:46:31
Nit: No blank line
dyaroshev
2017/02/18 11:48:14
Done.
|
| + if (history_ids->size() < kItemsToScoreLimit) |
| + return; |
| + |
| + // Trim down the set by sorting by typed-count, visit-count, and last |
| + // visit. |
| + auto new_end = history_ids->begin() + kItemsToScoreLimit; |
| + HistoryItemFactorGreater item_factor_functor(history_info_map_); |
| + |
| + std::nth_element(history_ids->begin(), new_end, history_ids->end(), |
| + item_factor_functor); |
| + history_ids->erase(new_end, history_ids->end()); |
| } |
| HistoryIDSet URLIndexPrivateData::HistoryIDsForTerm( |
| @@ -592,13 +606,9 @@ HistoryIDSet URLIndexPrivateData::HistoryIDsForTerm( |
| return HistoryIDSet(); |
| } |
| // Or there may not have been a prefix from which to start. |
| - if (prefix_chars.empty()) { |
| - word_id_set.swap(leftover_set); |
| - } else { |
| - WordIDSet new_word_id_set = base::STLSetIntersection<WordIDSet>( |
| - word_id_set, leftover_set); |
| - word_id_set.swap(new_word_id_set); |
| - } |
| + word_id_set = prefix_chars.empty() ? std::move(leftover_set) |
| + : base::STLSetIntersection<WordIDSet>( |
| + word_id_set, leftover_set); |
| } |
| // We must filter the word list because the resulting word set surely |
| @@ -618,9 +628,7 @@ HistoryIDSet URLIndexPrivateData::HistoryIDsForTerm( |
| // the sets from each word. |
| HistoryIDSet history_id_set; |
| if (!word_id_set.empty()) { |
| - for (WordIDSet::iterator word_id_iter = word_id_set.begin(); |
| - word_id_iter != word_id_set.end(); ++word_id_iter) { |
| - WordID word_id = *word_id_iter; |
| + for (WordID word_id : word_id_set) { |
| WordIDHistoryMap::iterator word_iter = word_id_history_map_.find(word_id); |
| if (word_iter != word_id_history_map_.end()) { |
| HistoryIDSet& word_history_id_set(word_iter->second); |
| @@ -640,43 +648,22 @@ HistoryIDSet URLIndexPrivateData::HistoryIDsForTerm( |
| WordIDSet URLIndexPrivateData::WordIDSetForTermChars( |
| const Char16Set& term_chars) { |
| - WordIDSet word_id_set; |
| - for (Char16Set::const_iterator c_iter = term_chars.begin(); |
| - c_iter != term_chars.end(); ++c_iter) { |
| - CharWordIDMap::iterator char_iter = char_word_map_.find(*c_iter); |
| - if (char_iter == char_word_map_.end()) { |
| - // A character was not found so there are no matching results: bail. |
| - word_id_set.clear(); |
| - break; |
| - } |
| - WordIDSet& char_word_id_set(char_iter->second); |
| - // It is possible for there to no longer be any words associated with |
| - // a particular character. Give up in that case. |
| - if (char_word_id_set.empty()) { |
| - word_id_set.clear(); |
| - break; |
| - } |
| - |
| - if (c_iter == term_chars.begin()) { |
| - // First character results becomes base set of results. |
| - word_id_set = char_word_id_set; |
| - } else { |
| - // Subsequent character results get intersected in. |
| - WordIDSet new_word_id_set = base::STLSetIntersection<WordIDSet>( |
| - word_id_set, char_word_id_set); |
| - word_id_set.swap(new_word_id_set); |
| - } |
| - } |
| - return word_id_set; |
| + return IntersectSets<WordIDSet>(term_chars.begin(), term_chars.end(), |
| + [&](base::char16 c) { |
| + auto char_iter = char_word_map_.find(c); |
| + if (char_iter == char_word_map_.end()) |
| + return WordIDSet(); |
| + return char_iter->second; |
|
Peter Kasting
2017/02/18 01:46:31
Nit: Shorter:
auto GetSet = [this](base::char16
dyaroshev
2017/02/18 11:48:13
Done.
|
| + }); |
| } |
| -void URLIndexPrivateData::HistoryIdSetToScoredMatches( |
| - HistoryIDSet history_id_set, |
| +void URLIndexPrivateData::HistoryIdsToScoredMatches( |
| + HistoryIDVector history_ids, |
| const base::string16& lower_raw_string, |
| const TemplateURLService* template_url_service, |
| bookmarks::BookmarkModel* bookmark_model, |
| ScoredHistoryMatches* scored_items) const { |
| - if (history_id_set.empty()) |
| + if (history_ids.empty()) |
| return; |
| // Break up the raw search string (complete with escaped URL elements) into |
| @@ -704,22 +691,19 @@ void URLIndexPrivateData::HistoryIdSetToScoredMatches( |
| &lower_terms_to_word_starts_offsets); |
| // Filter bad matches and other matches we don't want to display. |
| - for (auto it = history_id_set.begin();;) { |
| - it = std::find_if(it, history_id_set.end(), |
| - [this, template_url_service](const HistoryID history_id) { |
| - return ShouldFilter(history_id, template_url_service); |
| - }); |
| - if (it == history_id_set.end()) |
| - break; |
| - it = history_id_set.erase(it); |
| - } |
| + auto filter = [this, template_url_service](const HistoryID history_id) { |
| + return ShouldFilter(history_id, template_url_service); |
| + }; |
| + history_ids.erase( |
| + std::remove_if(history_ids.begin(), history_ids.end(), filter), |
| + history_ids.end()); |
|
Peter Kasting
2017/02/18 01:46:31
Nit: Pulling history_ids.end() out into a temp |en
dyaroshev
2017/02/18 11:48:14
I don't like invalid iterators lying around and it
Peter Kasting
2017/02/20 10:02:54
That's a reasonable objection to have.
|
| // Score the matches. |
| - const size_t num_matches = history_id_set.size(); |
| + const size_t num_matches = history_ids.size(); |
| const base::Time now = base::Time::Now(); |
| std::transform( |
| - history_id_set.begin(), history_id_set.end(), |
| - std::back_inserter(*scored_items), [&](const HistoryID history_id) { |
| + history_ids.begin(), history_ids.end(), std::back_inserter(*scored_items), |
| + [&](const HistoryID history_id) { |
| auto hist_pos = history_info_map_.find(history_id); |
| const history::URLRow& hist_item = hist_pos->second.url_row; |
| auto starts_pos = word_starts_map_.find(history_id); |
| @@ -830,80 +814,44 @@ void URLIndexPrivateData::AddRowWordsToIndex(const history::URLRow& row, |
| const base::string16& title = bookmarks::CleanUpTitleForMatching(row.title()); |
| String16Set title_words = String16SetFromString16(title, |
| word_starts ? &word_starts->title_word_starts_ : nullptr); |
| - String16Set words = base::STLSetUnion<String16Set>(url_words, title_words); |
| - for (String16Set::iterator word_iter = words.begin(); |
| - word_iter != words.end(); ++word_iter) |
| - AddWordToIndex(*word_iter, history_id); |
| + for (const auto& word : |
| + base::STLSetUnion<String16Set>(url_words, title_words)) |
| + AddWordToIndex(word, history_id); |
| search_term_cache_.clear(); // Invalidate the term cache. |
| } |
| void URLIndexPrivateData::AddWordToIndex(const base::string16& term, |
| HistoryID history_id) { |
| - WordMap::iterator word_pos = word_map_.find(term); |
| - if (word_pos != word_map_.end()) |
| - UpdateWordHistory(word_pos->second, history_id); |
| - else |
| - AddWordHistory(term, history_id); |
| -} |
| + WordMap::iterator word_pos = word_map_.lower_bound(term); |
| -void URLIndexPrivateData::AddWordHistory(const base::string16& term, |
| - HistoryID history_id) { |
| - WordID word_id = word_list_.size(); |
| - if (available_words_.empty()) { |
| - word_list_.push_back(term); |
| - } else { |
| - word_id = *(available_words_.begin()); |
| - word_list_[word_id] = term; |
| - available_words_.erase(word_id); |
| - } |
| - word_map_[term] = word_id; |
| + // Adding a new word (i.e. a word that is not already in the word index). |
| + if (word_pos->first != term) { |
| + word_pos = |
| + word_map_.emplace_hint(word_pos, term, AddNewWordToWordList(term)); |
| - HistoryIDSet history_id_set; |
| - history_id_set.insert(history_id); |
| - word_id_history_map_[word_id] = history_id_set; |
| - AddToHistoryIDWordMap(history_id, word_id); |
| - |
| - // For each character in the newly added word (i.e. a word that is not |
| - // already in the word index), add the word to the character index. |
| - Char16Set characters = Char16SetFromString16(term); |
| - for (Char16Set::iterator uni_char_iter = characters.begin(); |
| - uni_char_iter != characters.end(); ++uni_char_iter) { |
| - base::char16 uni_char = *uni_char_iter; |
| - CharWordIDMap::iterator char_iter = char_word_map_.find(uni_char); |
| - if (char_iter != char_word_map_.end()) { |
| - // Update existing entry in the char/word index. |
| - WordIDSet& word_id_set(char_iter->second); |
| - word_id_set.insert(word_id); |
| - } else { |
| - // Create a new entry in the char/word index. |
| - WordIDSet word_id_set; |
| - word_id_set.insert(word_id); |
| - char_word_map_[uni_char] = word_id_set; |
| - } |
| + // For each character in the newly added word add the word to the character |
| + // index. |
| + for (base::char16 uni_char : Char16SetFromString16(term)) |
| + char_word_map_[uni_char].insert(word_pos->second); |
| } |
| -} |
| -void URLIndexPrivateData::UpdateWordHistory(WordID word_id, |
| - HistoryID history_id) { |
| - WordIDHistoryMap::iterator history_pos = word_id_history_map_.find(word_id); |
| - DCHECK(history_pos != word_id_history_map_.end()); |
| - HistoryIDSet& history_id_set(history_pos->second); |
| - history_id_set.insert(history_id); |
| - AddToHistoryIDWordMap(history_id, word_id); |
| + DCHECK_EQ(word_pos->first, term); |
| + |
| + word_id_history_map_[word_pos->second].insert(history_id); |
| + history_id_word_map_[history_id].insert(word_pos->second); |
| } |
| -void URLIndexPrivateData::AddToHistoryIDWordMap(HistoryID history_id, |
| - WordID word_id) { |
| - HistoryIDWordMap::iterator iter = history_id_word_map_.find(history_id); |
| - if (iter != history_id_word_map_.end()) { |
| - WordIDSet& word_id_set(iter->second); |
| - word_id_set.insert(word_id); |
| - } else { |
| - WordIDSet word_id_set; |
| - word_id_set.insert(word_id); |
| - history_id_word_map_[history_id] = word_id_set; |
| +WordID URLIndexPrivateData::AddNewWordToWordList(const base::string16& term) { |
| + WordID word_id = word_list_.size(); |
| + if (available_words_.empty()) { |
| + word_list_.push_back(term); |
| + return word_id; |
| } |
| + |
| + word_id = available_words_.top(); |
| + available_words_.pop(); |
| + return word_id; |
| } |
| void URLIndexPrivateData::RemoveRowFromIndex(const history::URLRow& row) { |
| @@ -921,36 +869,34 @@ void URLIndexPrivateData::RemoveRowWordsFromIndex(const history::URLRow& row) { |
| history_id_word_map_.erase(history_id); |
| // Reconcile any changes to word usage. |
| - for (WordIDSet::iterator word_id_iter = word_id_set.begin(); |
| - word_id_iter != word_id_set.end(); ++word_id_iter) { |
| - WordID word_id = *word_id_iter; |
| - word_id_history_map_[word_id].erase(history_id); |
| - if (!word_id_history_map_[word_id].empty()) |
| - continue; // The word is still in use. |
| + for (WordID word_id : word_id_set) { |
| + auto word_id_history_map_iter = word_id_history_map_.find(word_id); |
| + DCHECK(word_id_history_map_iter != word_id_history_map_.end()); |
| + |
| + word_id_history_map_iter->second.erase(history_id); |
| + if (!word_id_history_map_iter->second.empty()) |
| + continue; |
| // The word is no longer in use. Reconcile any changes to character usage. |
| base::string16 word = word_list_[word_id]; |
| - Char16Set characters = Char16SetFromString16(word); |
| - for (Char16Set::iterator uni_char_iter = characters.begin(); |
| - uni_char_iter != characters.end(); ++uni_char_iter) { |
| - base::char16 uni_char = *uni_char_iter; |
| - char_word_map_[uni_char].erase(word_id); |
| - if (char_word_map_[uni_char].empty()) |
| - char_word_map_.erase(uni_char); // No longer in use. |
| + for (base::char16 uni_char : Char16SetFromString16(word)) { |
| + auto char_word_map_iter = char_word_map_.find(uni_char); |
| + char_word_map_iter->second.erase(word_id); |
| + if (char_word_map_iter->second.empty()) |
| + char_word_map_.erase(char_word_map_iter); |
| } |
| // Complete the removal of references to the word. |
| - word_id_history_map_.erase(word_id); |
| + word_id_history_map_.erase(word_id_history_map_iter); |
| word_map_.erase(word); |
| word_list_[word_id] = base::string16(); |
| - available_words_.insert(word_id); |
| + available_words_.push(word_id); |
| } |
| } |
| void URLIndexPrivateData::ResetSearchTermCache() { |
| - for (SearchTermCacheMap::iterator iter = search_term_cache_.begin(); |
| - iter != search_term_cache_.end(); ++iter) |
| - iter->second.used_ = false; |
| + for (auto& item : search_term_cache_) |
| + item.second.used_ = false; |
| } |
| bool URLIndexPrivateData::SaveToFile(const base::FilePath& file_path) { |
| @@ -995,9 +941,8 @@ void URLIndexPrivateData::SaveWordList(InMemoryURLIndexCacheItem* cache) const { |
| return; |
| WordListItem* list_item = cache->mutable_word_list(); |
| list_item->set_word_count(word_list_.size()); |
| - for (String16Vector::const_iterator iter = word_list_.begin(); |
| - iter != word_list_.end(); ++iter) |
| - list_item->add_word(base::UTF16ToUTF8(*iter)); |
| + for (const base::string16& word : word_list_) |
| + list_item->add_word(base::UTF16ToUTF8(word)); |
| } |
| void URLIndexPrivateData::SaveWordMap(InMemoryURLIndexCacheItem* cache) const { |
| @@ -1005,11 +950,10 @@ void URLIndexPrivateData::SaveWordMap(InMemoryURLIndexCacheItem* cache) const { |
| return; |
| WordMapItem* map_item = cache->mutable_word_map(); |
| map_item->set_item_count(word_map_.size()); |
| - for (WordMap::const_iterator iter = word_map_.begin(); |
| - iter != word_map_.end(); ++iter) { |
| + for (const auto& elem : word_map_) { |
| WordMapEntry* map_entry = map_item->add_word_map_entry(); |
| - map_entry->set_word(base::UTF16ToUTF8(iter->first)); |
| - map_entry->set_word_id(iter->second); |
| + map_entry->set_word(base::UTF16ToUTF8(elem.first)); |
| + map_entry->set_word_id(elem.second); |
| } |
| } |
| @@ -1019,15 +963,13 @@ void URLIndexPrivateData::SaveCharWordMap( |
| return; |
| CharWordMapItem* map_item = cache->mutable_char_word_map(); |
| map_item->set_item_count(char_word_map_.size()); |
| - for (CharWordIDMap::const_iterator iter = char_word_map_.begin(); |
| - iter != char_word_map_.end(); ++iter) { |
| + for (const auto& entry : char_word_map_) { |
| CharWordMapEntry* map_entry = map_item->add_char_word_map_entry(); |
| - map_entry->set_char_16(iter->first); |
| - const WordIDSet& word_id_set(iter->second); |
| + map_entry->set_char_16(entry.first); |
| + const WordIDSet& word_id_set(entry.second); |
| map_entry->set_item_count(word_id_set.size()); |
| - for (WordIDSet::const_iterator set_iter = word_id_set.begin(); |
| - set_iter != word_id_set.end(); ++set_iter) |
| - map_entry->add_word_id(*set_iter); |
| + for (WordID word_id : word_id_set) |
| + map_entry->add_word_id(word_id); |
| } |
| } |
| @@ -1037,16 +979,14 @@ void URLIndexPrivateData::SaveWordIDHistoryMap( |
| return; |
| WordIDHistoryMapItem* map_item = cache->mutable_word_id_history_map(); |
| map_item->set_item_count(word_id_history_map_.size()); |
| - for (WordIDHistoryMap::const_iterator iter = word_id_history_map_.begin(); |
| - iter != word_id_history_map_.end(); ++iter) { |
| + for (const auto& entry : word_id_history_map_) { |
| WordIDHistoryMapEntry* map_entry = |
| map_item->add_word_id_history_map_entry(); |
| - map_entry->set_word_id(iter->first); |
| - const HistoryIDSet& history_id_set(iter->second); |
| + map_entry->set_word_id(entry.first); |
| + const HistoryIDSet& history_id_set(entry.second); |
| map_entry->set_item_count(history_id_set.size()); |
| - for (HistoryIDSet::const_iterator set_iter = history_id_set.begin(); |
| - set_iter != history_id_set.end(); ++set_iter) |
| - map_entry->add_history_id(*set_iter); |
| + for (HistoryID history_id : history_id_set) |
| + map_entry->add_history_id(history_id); |
| } |
| } |
| @@ -1056,11 +996,10 @@ void URLIndexPrivateData::SaveHistoryInfoMap( |
| return; |
| HistoryInfoMapItem* map_item = cache->mutable_history_info_map(); |
| map_item->set_item_count(history_info_map_.size()); |
| - for (HistoryInfoMap::const_iterator iter = history_info_map_.begin(); |
| - iter != history_info_map_.end(); ++iter) { |
| + for (const auto& entry : history_info_map_) { |
| HistoryInfoMapEntry* map_entry = map_item->add_history_info_map_entry(); |
| - map_entry->set_history_id(iter->first); |
| - const history::URLRow& url_row(iter->second.url_row); |
| + map_entry->set_history_id(entry.first); |
| + const history::URLRow& url_row(entry.second.url_row); |
| // Note: We only save information that contributes to the index so there |
| // is no need to save search_term_cache_ (not persistent). |
| map_entry->set_visit_count(url_row.visit_count()); |
| @@ -1068,12 +1007,10 @@ void URLIndexPrivateData::SaveHistoryInfoMap( |
| map_entry->set_last_visit(url_row.last_visit().ToInternalValue()); |
| map_entry->set_url(url_row.url().spec()); |
| map_entry->set_title(base::UTF16ToUTF8(url_row.title())); |
| - const VisitInfoVector& visits(iter->second.visits); |
| - for (VisitInfoVector::const_iterator visit_iter = visits.begin(); |
| - visit_iter != visits.end(); ++visit_iter) { |
| + for (const auto& visit : entry.second.visits) { |
| HistoryInfoMapEntry_VisitInfo* visit_info = map_entry->add_visits(); |
| - visit_info->set_visit_time(visit_iter->first.ToInternalValue()); |
| - visit_info->set_transition_type(visit_iter->second); |
| + visit_info->set_visit_time(visit.first.ToInternalValue()); |
| + visit_info->set_transition_type(visit.second); |
| } |
| } |
| } |
| @@ -1092,17 +1029,14 @@ void URLIndexPrivateData::SaveWordStartsMap( |
| WordStartsMapItem* map_item = cache->mutable_word_starts_map(); |
| map_item->set_item_count(word_starts_map_.size()); |
| - for (WordStartsMap::const_iterator iter = word_starts_map_.begin(); |
| - iter != word_starts_map_.end(); ++iter) { |
| + for (const auto& entry : word_starts_map_) { |
| WordStartsMapEntry* map_entry = map_item->add_word_starts_map_entry(); |
| - map_entry->set_history_id(iter->first); |
| - const RowWordStarts& word_starts(iter->second); |
| - for (WordStarts::const_iterator i = word_starts.url_word_starts_.begin(); |
| - i != word_starts.url_word_starts_.end(); ++i) |
| - map_entry->add_url_word_starts(*i); |
| - for (WordStarts::const_iterator i = word_starts.title_word_starts_.begin(); |
| - i != word_starts.title_word_starts_.end(); ++i) |
| - map_entry->add_title_word_starts(*i); |
| + map_entry->set_history_id(entry.first); |
| + const RowWordStarts& word_starts(entry.second); |
| + for (auto url_word_start : word_starts.url_word_starts_) |
| + map_entry->add_url_word_starts(url_word_start); |
| + for (auto title_word_start : word_starts.title_word_starts_) |
| + map_entry->add_title_word_starts(title_word_start); |
| } |
| } |
| @@ -1145,9 +1079,10 @@ bool URLIndexPrivateData::RestoreWordList( |
| if (actual_item_count == 0 || actual_item_count != expected_item_count) |
| return false; |
| const RepeatedPtrField<std::string>& words(list_item.word()); |
| - for (RepeatedPtrField<std::string>::const_iterator iter = words.begin(); |
| - iter != words.end(); ++iter) |
| - word_list_.push_back(base::UTF8ToUTF16(*iter)); |
| + word_list_.reserve(words.size()); |
| + std::transform( |
| + words.begin(), words.end(), std::back_inserter(word_list_), |
| + [](const std::string& word) { return base::UTF8ToUTF16(word); }); |
| return true; |
| } |
| @@ -1160,10 +1095,9 @@ bool URLIndexPrivateData::RestoreWordMap( |
| uint32_t actual_item_count = list_item.word_map_entry_size(); |
| if (actual_item_count == 0 || actual_item_count != expected_item_count) |
| return false; |
| - const RepeatedPtrField<WordMapEntry>& entries(list_item.word_map_entry()); |
| - for (RepeatedPtrField<WordMapEntry>::const_iterator iter = entries.begin(); |
| - iter != entries.end(); ++iter) |
| - word_map_[base::UTF8ToUTF16(iter->word())] = iter->word_id(); |
| + for (const auto& entry : list_item.word_map_entry()) |
| + word_map_[base::UTF8ToUTF16(entry.word())] = entry.word_id(); |
| + |
| return true; |
| } |
| @@ -1176,21 +1110,15 @@ bool URLIndexPrivateData::RestoreCharWordMap( |
| uint32_t actual_item_count = list_item.char_word_map_entry_size(); |
| if (actual_item_count == 0 || actual_item_count != expected_item_count) |
| return false; |
| - const RepeatedPtrField<CharWordMapEntry>& |
| - entries(list_item.char_word_map_entry()); |
| - for (RepeatedPtrField<CharWordMapEntry>::const_iterator iter = |
| - entries.begin(); iter != entries.end(); ++iter) { |
| - expected_item_count = iter->item_count(); |
| - actual_item_count = iter->word_id_size(); |
| + |
| + for (const auto& entry : list_item.char_word_map_entry()) { |
| + expected_item_count = entry.item_count(); |
| + actual_item_count = entry.word_id_size(); |
| if (actual_item_count == 0 || actual_item_count != expected_item_count) |
| return false; |
| - base::char16 uni_char = static_cast<base::char16>(iter->char_16()); |
| - WordIDSet word_id_set; |
| - const RepeatedField<int32_t>& word_ids(iter->word_id()); |
| - for (RepeatedField<int32_t>::const_iterator jiter = word_ids.begin(); |
| - jiter != word_ids.end(); ++jiter) |
| - word_id_set.insert(*jiter); |
| - char_word_map_[uni_char] = word_id_set; |
| + base::char16 uni_char = static_cast<base::char16>(entry.char_16()); |
| + const RepeatedField<int32_t>& word_ids(entry.word_id()); |
| + char_word_map_[uni_char] = {word_ids.begin(), word_ids.end()}; |
| } |
| return true; |
| } |
| @@ -1204,23 +1132,16 @@ bool URLIndexPrivateData::RestoreWordIDHistoryMap( |
| uint32_t actual_item_count = list_item.word_id_history_map_entry_size(); |
| if (actual_item_count == 0 || actual_item_count != expected_item_count) |
| return false; |
| - const RepeatedPtrField<WordIDHistoryMapEntry>& |
| - entries(list_item.word_id_history_map_entry()); |
| - for (RepeatedPtrField<WordIDHistoryMapEntry>::const_iterator iter = |
| - entries.begin(); iter != entries.end(); ++iter) { |
| - expected_item_count = iter->item_count(); |
| - actual_item_count = iter->history_id_size(); |
| + for (const auto& entry : list_item.word_id_history_map_entry()) { |
| + expected_item_count = entry.item_count(); |
| + actual_item_count = entry.history_id_size(); |
| if (actual_item_count == 0 || actual_item_count != expected_item_count) |
| return false; |
| - WordID word_id = iter->word_id(); |
| - HistoryIDSet history_id_set; |
| - const RepeatedField<int64_t>& history_ids(iter->history_id()); |
| - for (RepeatedField<int64_t>::const_iterator jiter = history_ids.begin(); |
| - jiter != history_ids.end(); ++jiter) { |
| - history_id_set.insert(*jiter); |
| - AddToHistoryIDWordMap(*jiter, word_id); |
| - } |
| - word_id_history_map_[word_id] = history_id_set; |
| + WordID word_id = entry.word_id(); |
| + const RepeatedField<int64_t>& history_ids(entry.history_id()); |
| + word_id_history_map_[word_id] = {history_ids.begin(), history_ids.end()}; |
| + for (HistoryID history_id : history_ids) |
| + history_id_word_map_[history_id].insert(word_id); |
| } |
| return true; |
| } |
| @@ -1234,31 +1155,26 @@ bool URLIndexPrivateData::RestoreHistoryInfoMap( |
| uint32_t actual_item_count = list_item.history_info_map_entry_size(); |
| if (actual_item_count == 0 || actual_item_count != expected_item_count) |
| return false; |
| - const RepeatedPtrField<HistoryInfoMapEntry>& |
| - entries(list_item.history_info_map_entry()); |
| - for (RepeatedPtrField<HistoryInfoMapEntry>::const_iterator iter = |
| - entries.begin(); iter != entries.end(); ++iter) { |
| - HistoryID history_id = iter->history_id(); |
| - GURL url(iter->url()); |
| - history::URLRow url_row(url, history_id); |
| - url_row.set_visit_count(iter->visit_count()); |
| - url_row.set_typed_count(iter->typed_count()); |
| - url_row.set_last_visit(base::Time::FromInternalValue(iter->last_visit())); |
| - if (iter->has_title()) { |
| - base::string16 title(base::UTF8ToUTF16(iter->title())); |
| - url_row.set_title(title); |
| - } |
| - history_info_map_[history_id].url_row = url_row; |
| + |
| + for (const auto& entry : list_item.history_info_map_entry()) { |
| + HistoryID history_id = entry.history_id(); |
| + history::URLRow url_row(GURL(entry.url()), history_id); |
| + url_row.set_visit_count(entry.visit_count()); |
| + url_row.set_typed_count(entry.typed_count()); |
| + url_row.set_last_visit(base::Time::FromInternalValue(entry.last_visit())); |
| + if (entry.has_title()) |
| + url_row.set_title(base::UTF8ToUTF16(entry.title())); |
| + history_info_map_[history_id].url_row = std::move(url_row); |
| // Restore visits list. |
| VisitInfoVector visits; |
| - visits.reserve(iter->visits_size()); |
| - for (int i = 0; i < iter->visits_size(); ++i) { |
| - visits.push_back(std::make_pair( |
| - base::Time::FromInternalValue(iter->visits(i).visit_time()), |
| - ui::PageTransitionFromInt(iter->visits(i).transition_type()))); |
| + visits.reserve(entry.visits_size()); |
| + for (const auto& entry_visit : entry.visits()) { |
| + visits.emplace_back( |
| + base::Time::FromInternalValue(entry_visit.visit_time()), |
| + ui::PageTransitionFromInt(entry_visit.transition_type())); |
| } |
| - history_info_map_[history_id].visits = visits; |
| + history_info_map_[history_id].visits = std::move(visits); |
| } |
| return true; |
| } |
| @@ -1274,38 +1190,33 @@ bool URLIndexPrivateData::RestoreWordStartsMap( |
| uint32_t actual_item_count = list_item.word_starts_map_entry_size(); |
| if (actual_item_count == 0 || actual_item_count != expected_item_count) |
| return false; |
| - const RepeatedPtrField<WordStartsMapEntry>& |
| - entries(list_item.word_starts_map_entry()); |
| - for (RepeatedPtrField<WordStartsMapEntry>::const_iterator iter = |
| - entries.begin(); iter != entries.end(); ++iter) { |
| - HistoryID history_id = iter->history_id(); |
| + for (const auto& entry : list_item.word_starts_map_entry()) { |
| + HistoryID history_id = entry.history_id(); |
| RowWordStarts word_starts; |
| // Restore the URL word starts. |
| - const RepeatedField<int32_t>& url_starts(iter->url_word_starts()); |
| - for (RepeatedField<int32_t>::const_iterator jiter = url_starts.begin(); |
| - jiter != url_starts.end(); ++jiter) |
| - word_starts.url_word_starts_.push_back(*jiter); |
| + const RepeatedField<int32_t>& url_starts(entry.url_word_starts()); |
| + word_starts.url_word_starts_ = {url_starts.begin(), url_starts.end()}; |
| + |
| // Restore the page title word starts. |
| - const RepeatedField<int32_t>& title_starts(iter->title_word_starts()); |
| - for (RepeatedField<int32_t>::const_iterator jiter = title_starts.begin(); |
| - jiter != title_starts.end(); ++jiter) |
| - word_starts.title_word_starts_.push_back(*jiter); |
| - word_starts_map_[history_id] = word_starts; |
| + const RepeatedField<int32_t>& title_starts(entry.title_word_starts()); |
| + word_starts.title_word_starts_ = {title_starts.begin(), |
| + title_starts.end()}; |
| + |
| + word_starts_map_[history_id] = std::move(word_starts); |
| } |
| } else { |
| // Since the cache did not contain any word starts we must rebuild then from |
| // the URL and page titles. |
| - for (HistoryInfoMap::const_iterator iter = history_info_map_.begin(); |
| - iter != history_info_map_.end(); ++iter) { |
| + for (const auto& entry : history_info_map_) { |
| RowWordStarts word_starts; |
| - const history::URLRow& row(iter->second.url_row); |
| + const history::URLRow& row(entry.second.url_row); |
| const base::string16& url = |
| bookmarks::CleanUpUrlForMatching(row.url(), nullptr); |
| String16VectorFromString16(url, false, &word_starts.url_word_starts_); |
| const base::string16& title = |
| bookmarks::CleanUpTitleForMatching(row.title()); |
| String16VectorFromString16(title, false, &word_starts.title_word_starts_); |
| - word_starts_map_[iter->first] = word_starts; |
| + word_starts_map_[entry.first] = std::move(word_starts); |
| } |
| } |
| return true; |