Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(413)

Unified Diff: components/omnibox/browser/url_index_private_data.cc

Issue 2690303012: Cleaning up url_index_private_data and in_memory_url_index_types. (Closed)
Patch Set: Rough sketch. Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: components/omnibox/browser/url_index_private_data.cc
diff --git a/components/omnibox/browser/url_index_private_data.cc b/components/omnibox/browser/url_index_private_data.cc
index 8088389cd365abc0af51a37e3658a86b14058399..274876ede4d35aab2db3365b527ff000d3496ac1 100644
--- a/components/omnibox/browser/url_index_private_data.cc
+++ b/components/omnibox/browser/url_index_private_data.cc
@@ -209,18 +209,17 @@ ScoredHistoryMatches URLIndexPrivateData::HistoryItemsForTerms(
// order to maintain omnibox responsiveness.
const size_t kItemsToScoreLimit = 500;
if (history_id_set.size() > kItemsToScoreLimit) {
- HistoryIDVector history_ids;
- std::copy(history_id_set.begin(), history_id_set.end(),
- std::back_inserter(history_ids));
+ HistoryIDVector history_ids = {history_id_set.begin(),
+ history_id_set.end()};
Peter Kasting 2017/02/17 01:33:46 Nit: Prefer the direct constructor: History
dyaroshev 2017/02/17 20:17:22 Done.
+
// Trim down the set by sorting by typed-count, visit-count, and last
// visit.
HistoryItemFactorGreater item_factor_functor(history_info_map_);
- std::partial_sort(history_ids.begin(),
- history_ids.begin() + kItemsToScoreLimit,
- history_ids.end(), item_factor_functor);
- history_id_set.clear();
- std::copy(history_ids.begin(), history_ids.begin() + kItemsToScoreLimit,
- std::inserter(history_id_set, history_id_set.end()));
+ std::nth_element(history_ids.begin(),
Peter Kasting 2017/02/17 01:33:46 I've never seen this algorithm before. Good use o
dyaroshev 2017/02/17 20:17:22 thx) While we are here - I'm not 100% sure, but it
Peter Kasting 2017/02/17 21:52:26 Correct, we're going to score them and then sort t
+ history_ids.begin() + kItemsToScoreLimit,
+ history_ids.end(), item_factor_functor);
+ history_id_set = {history_ids.begin(),
+ history_ids.begin() + kItemsToScoreLimit};
post_filter_item_count_ += history_id_set.size();
} else {
post_filter_item_count_ += pre_filter_item_count_;
@@ -511,18 +510,15 @@ HistoryIDSet URLIndexPrivateData::HistoryIDSetFromWords(
std::sort(words.begin(), words.end(), LengthGreater);
for (String16Vector::iterator iter = words.begin(); iter != words.end();
++iter) {
- base::string16 uni_word = *iter;
- HistoryIDSet term_history_set = HistoryIDsForTerm(uni_word);
+ HistoryIDSet term_history_set = HistoryIDsForTerm(*iter);
if (term_history_set.empty()) {
Peter Kasting 2017/02/17 01:33:46 Nit: No {}
dyaroshev 2017/02/17 20:17:22 Done.
- history_id_set.clear();
- break;
+ return {};
Peter Kasting 2017/02/17 01:33:46 Nit: I'd return HistoryIDSet(); (3 similar places)
dyaroshev 2017/02/17 20:17:22 Done.
}
if (iter == words.begin()) {
- history_id_set.swap(term_history_set);
+ history_id_set = std::move(term_history_set);
} else {
- HistoryIDSet new_history_id_set = base::STLSetIntersection<HistoryIDSet>(
- history_id_set, term_history_set);
- history_id_set.swap(new_history_id_set);
+ history_id_set = base::STLSetIntersection<HistoryIDSet>(history_id_set,
+ term_history_set);
}
}
return history_id_set;
@@ -592,43 +588,40 @@ HistoryIDSet URLIndexPrivateData::HistoryIDsForTerm(
return HistoryIDSet();
}
// Or there may not have been a prefix from which to start.
- if (prefix_chars.empty()) {
- word_id_set.swap(leftover_set);
- } else {
- WordIDSet new_word_id_set = base::STLSetIntersection<WordIDSet>(
- word_id_set, leftover_set);
- word_id_set.swap(new_word_id_set);
- }
+ word_id_set = prefix_chars.empty() ? leftover_set
+ : base::STLSetIntersection<WordIDSet>(
+ word_id_set, leftover_set);
}
// We must filter the word list because the resulting word set surely
// contains words which do not have the search term as a proper subset.
- for (WordIDSet::iterator word_set_iter = word_id_set.begin();
- word_set_iter != word_id_set.end(); ) {
- if (word_list_[*word_set_iter].find(term) == base::string16::npos)
- word_set_iter = word_id_set.erase(word_set_iter);
- else
- ++word_set_iter;
- }
+ word_id_set.erase(std::remove_if(word_id_set.begin(), word_id_set.end(),
+ [&](WordID word_id) {
+ return word_list_[word_id].find(term) ==
+ base::string16::npos;
+ }),
+ word_id_set.end());
Peter Kasting 2017/02/17 01:33:46 Nit: Slightly shorter, reads a bit less awkwardly
+
} else {
word_id_set = WordIDSetForTermChars(Char16SetFromString16(term));
}
// If any words resulted then we can compose a set of history IDs by unioning
// the sets from each word.
- HistoryIDSet history_id_set;
- if (!word_id_set.empty()) {
- for (WordIDSet::iterator word_id_iter = word_id_set.begin();
- word_id_iter != word_id_set.end(); ++word_id_iter) {
- WordID word_id = *word_id_iter;
+ auto history_id_set = [&]() -> HistoryIDSet {
Peter Kasting 2017/02/17 01:33:46 Why do this in a lambda? Why not just "inline" th
+ HistoryIDVector buffer;
Peter Kasting 2017/02/17 01:33:46 Nit: Deserves a comment about why it's important t
+
Peter Kasting 2017/02/17 01:33:46 Nit: No blank line
+ for (WordID word_id : word_id_set) {
WordIDHistoryMap::iterator word_iter = word_id_history_map_.find(word_id);
- if (word_iter != word_id_history_map_.end()) {
- HistoryIDSet& word_history_id_set(word_iter->second);
- history_id_set.insert(word_history_id_set.begin(),
- word_history_id_set.end());
- }
+ if (word_iter == word_id_history_map_.end())
+ continue;
Peter Kasting 2017/02/17 01:33:46 Nit: I probably wouldn't change the old form here
+ HistoryIDSet& word_history_id_set(word_iter->second);
+ buffer.insert(buffer.end(), word_history_id_set.begin(),
+ word_history_id_set.end());
}
- }
+
+ return {buffer.begin(), buffer.end()};
+ }();
// Record a new cache entry for this word if the term is longer than
// a single character.
@@ -641,32 +634,20 @@ HistoryIDSet URLIndexPrivateData::HistoryIDsForTerm(
WordIDSet URLIndexPrivateData::WordIDSetForTermChars(
const Char16Set& term_chars) {
WordIDSet word_id_set;
- for (Char16Set::const_iterator c_iter = term_chars.begin();
- c_iter != term_chars.end(); ++c_iter) {
- CharWordIDMap::iterator char_iter = char_word_map_.find(*c_iter);
- if (char_iter == char_word_map_.end()) {
- // A character was not found so there are no matching results: bail.
- word_id_set.clear();
- break;
- }
- WordIDSet& char_word_id_set(char_iter->second);
- // It is possible for there to no longer be any words associated with
- // a particular character. Give up in that case.
- if (char_word_id_set.empty()) {
- word_id_set.clear();
- break;
- }
- if (c_iter == term_chars.begin()) {
- // First character results becomes base set of results.
- word_id_set = char_word_id_set;
- } else {
- // Subsequent character results get intersected in.
- WordIDSet new_word_id_set = base::STLSetIntersection<WordIDSet>(
- word_id_set, char_word_id_set);
- word_id_set.swap(new_word_id_set);
- }
+ for (base::char16 c : term_chars) {
+ CharWordIDMap::iterator char_iter = char_word_map_.find(c);
+ if (char_iter == char_word_map_.end())
Peter Kasting 2017/02/17 01:33:46 Nit: Preserve the old comments
dyaroshev 2017/02/17 20:17:23 Done.
+ return {};
+
+ const WordIDSet& char_word_id_set(char_iter->second);
+ if (char_word_id_set.empty())
+ return {};
+
+ word_id_set =
+ base::STLSetIntersection<WordIDSet>(word_id_set, char_word_id_set);
Peter Kasting 2017/02/17 01:33:46 Does dropping the assignment the old code used in
dyaroshev 2017/02/17 20:17:23 Yes, my mistake. Done. Reverted almost everything
}
+
return word_id_set;
}
@@ -704,15 +685,12 @@ void URLIndexPrivateData::HistoryIdSetToScoredMatches(
&lower_terms_to_word_starts_offsets);
// Filter bad matches and other matches we don't want to display.
- for (auto it = history_id_set.begin();;) {
- it = std::find_if(it, history_id_set.end(),
- [this, template_url_service](const HistoryID history_id) {
- return ShouldFilter(history_id, template_url_service);
- });
- if (it == history_id_set.end())
- break;
- it = history_id_set.erase(it);
- }
+ history_id_set.erase(
+ std::remove_if(history_id_set.begin(), history_id_set.end(),
+ [this, template_url_service](const HistoryID history_id) {
+ return ShouldFilter(history_id, template_url_service);
+ }),
+ history_id_set.end());
// Score the matches.
const size_t num_matches = history_id_set.size();
@@ -830,10 +808,9 @@ void URLIndexPrivateData::AddRowWordsToIndex(const history::URLRow& row,
const base::string16& title = bookmarks::CleanUpTitleForMatching(row.title());
String16Set title_words = String16SetFromString16(title,
word_starts ? &word_starts->title_word_starts_ : nullptr);
- String16Set words = base::STLSetUnion<String16Set>(url_words, title_words);
- for (String16Set::iterator word_iter = words.begin();
- word_iter != words.end(); ++word_iter)
- AddWordToIndex(*word_iter, history_id);
+ for (const auto& word :
+ base::STLSetUnion<String16Set>(url_words, title_words))
+ AddWordToIndex(word, history_id);
search_term_cache_.clear(); // Invalidate the term cache.
}
@@ -866,21 +843,8 @@ void URLIndexPrivateData::AddWordHistory(const base::string16& term,
// For each character in the newly added word (i.e. a word that is not
// already in the word index), add the word to the character index.
- Char16Set characters = Char16SetFromString16(term);
- for (Char16Set::iterator uni_char_iter = characters.begin();
- uni_char_iter != characters.end(); ++uni_char_iter) {
- base::char16 uni_char = *uni_char_iter;
- CharWordIDMap::iterator char_iter = char_word_map_.find(uni_char);
- if (char_iter != char_word_map_.end()) {
- // Update existing entry in the char/word index.
- WordIDSet& word_id_set(char_iter->second);
- word_id_set.insert(word_id);
- } else {
- // Create a new entry in the char/word index.
- WordIDSet word_id_set;
- word_id_set.insert(word_id);
- char_word_map_[uni_char] = word_id_set;
- }
+ for (base::char16 uni_char : Char16SetFromString16(term)) {
Peter Kasting 2017/02/17 01:33:46 Nit: No {}
+ char_word_map_[uni_char].insert(word_id);
Peter Kasting 2017/02/17 01:33:46 Nice simplification!
dyaroshev 2017/02/17 20:17:23 I rewrote AddWordToIndex, AddWordHistory and Updat
}
}
@@ -895,15 +859,7 @@ void URLIndexPrivateData::UpdateWordHistory(WordID word_id,
void URLIndexPrivateData::AddToHistoryIDWordMap(HistoryID history_id,
WordID word_id) {
- HistoryIDWordMap::iterator iter = history_id_word_map_.find(history_id);
- if (iter != history_id_word_map_.end()) {
- WordIDSet& word_id_set(iter->second);
- word_id_set.insert(word_id);
- } else {
- WordIDSet word_id_set;
- word_id_set.insert(word_id);
- history_id_word_map_[history_id] = word_id_set;
- }
+ history_id_word_map_[history_id].insert(word_id);
}
void URLIndexPrivateData::RemoveRowFromIndex(const history::URLRow& row) {
@@ -921,26 +877,27 @@ void URLIndexPrivateData::RemoveRowWordsFromIndex(const history::URLRow& row) {
history_id_word_map_.erase(history_id);
// Reconcile any changes to word usage.
- for (WordIDSet::iterator word_id_iter = word_id_set.begin();
- word_id_iter != word_id_set.end(); ++word_id_iter) {
- WordID word_id = *word_id_iter;
- word_id_history_map_[word_id].erase(history_id);
- if (!word_id_history_map_[word_id].empty())
- continue; // The word is still in use.
+ for (WordID word_id : word_id_set) {
+ auto word_id_history_map_iter = word_id_history_map_.find(word_id);
+
Peter Kasting 2017/02/17 01:33:46 Nit: No blank line
dyaroshev 2017/02/17 20:17:22 Done.
+ if (word_id_history_map_iter == word_id_history_map_.end())
Peter Kasting 2017/02/17 01:33:46 Can this conditional succeed? The old code doesn'
dyaroshev 2017/02/17 20:17:22 Seems like no, we always add word to this map. And
+ continue;
+
+ word_id_history_map_iter->second.erase(history_id);
+ if (!word_id_history_map_iter->second.empty())
+ continue;
// The word is no longer in use. Reconcile any changes to character usage.
base::string16 word = word_list_[word_id];
- Char16Set characters = Char16SetFromString16(word);
- for (Char16Set::iterator uni_char_iter = characters.begin();
- uni_char_iter != characters.end(); ++uni_char_iter) {
- base::char16 uni_char = *uni_char_iter;
- char_word_map_[uni_char].erase(word_id);
- if (char_word_map_[uni_char].empty())
- char_word_map_.erase(uni_char); // No longer in use.
+ for (base::char16 uni_char : Char16SetFromString16(word)) {
+ auto char_word_map_iter = char_word_map_.find(uni_char);
+ char_word_map_iter->second.erase(word_id);
+ if (char_word_map_iter->second.empty())
+ char_word_map_.erase(char_word_map_iter);
}
// Complete the removal of references to the word.
- word_id_history_map_.erase(word_id);
+ word_id_history_map_.erase(word_id_history_map_iter);
word_map_.erase(word);
word_list_[word_id] = base::string16();
available_words_.insert(word_id);
@@ -948,9 +905,8 @@ void URLIndexPrivateData::RemoveRowWordsFromIndex(const history::URLRow& row) {
}
void URLIndexPrivateData::ResetSearchTermCache() {
- for (SearchTermCacheMap::iterator iter = search_term_cache_.begin();
- iter != search_term_cache_.end(); ++iter)
- iter->second.used_ = false;
+ for (auto& item : search_term_cache_)
+ item.second.used_ = false;
}
bool URLIndexPrivateData::SaveToFile(const base::FilePath& file_path) {
@@ -995,9 +951,8 @@ void URLIndexPrivateData::SaveWordList(InMemoryURLIndexCacheItem* cache) const {
return;
WordListItem* list_item = cache->mutable_word_list();
list_item->set_word_count(word_list_.size());
- for (String16Vector::const_iterator iter = word_list_.begin();
- iter != word_list_.end(); ++iter)
- list_item->add_word(base::UTF16ToUTF8(*iter));
+ for (const base::string16& word : word_list_)
+ list_item->add_word(base::UTF16ToUTF8(word));
}
void URLIndexPrivateData::SaveWordMap(InMemoryURLIndexCacheItem* cache) const {
@@ -1005,11 +960,10 @@ void URLIndexPrivateData::SaveWordMap(InMemoryURLIndexCacheItem* cache) const {
return;
WordMapItem* map_item = cache->mutable_word_map();
map_item->set_item_count(word_map_.size());
- for (WordMap::const_iterator iter = word_map_.begin();
- iter != word_map_.end(); ++iter) {
+ for (const auto& elem : word_map_) {
WordMapEntry* map_entry = map_item->add_word_map_entry();
- map_entry->set_word(base::UTF16ToUTF8(iter->first));
- map_entry->set_word_id(iter->second);
+ map_entry->set_word(base::UTF16ToUTF8(elem.first));
+ map_entry->set_word_id(elem.second);
}
}
@@ -1019,15 +973,13 @@ void URLIndexPrivateData::SaveCharWordMap(
return;
CharWordMapItem* map_item = cache->mutable_char_word_map();
map_item->set_item_count(char_word_map_.size());
- for (CharWordIDMap::const_iterator iter = char_word_map_.begin();
- iter != char_word_map_.end(); ++iter) {
+ for (const auto& elem : char_word_map_) {
CharWordMapEntry* map_entry = map_item->add_char_word_map_entry();
- map_entry->set_char_16(iter->first);
- const WordIDSet& word_id_set(iter->second);
+ map_entry->set_char_16(elem.first);
+ const WordIDSet& word_id_set(elem.second);
map_entry->set_item_count(word_id_set.size());
- for (WordIDSet::const_iterator set_iter = word_id_set.begin();
- set_iter != word_id_set.end(); ++set_iter)
- map_entry->add_word_id(*set_iter);
+ for (WordID word_id : word_id_set)
+ map_entry->add_word_id(word_id);
}
}
@@ -1037,16 +989,14 @@ void URLIndexPrivateData::SaveWordIDHistoryMap(
return;
WordIDHistoryMapItem* map_item = cache->mutable_word_id_history_map();
map_item->set_item_count(word_id_history_map_.size());
- for (WordIDHistoryMap::const_iterator iter = word_id_history_map_.begin();
- iter != word_id_history_map_.end(); ++iter) {
+ for (const auto& elem : word_id_history_map_) {
WordIDHistoryMapEntry* map_entry =
map_item->add_word_id_history_map_entry();
- map_entry->set_word_id(iter->first);
- const HistoryIDSet& history_id_set(iter->second);
+ map_entry->set_word_id(elem.first);
+ const HistoryIDSet& history_id_set(elem.second);
map_entry->set_item_count(history_id_set.size());
- for (HistoryIDSet::const_iterator set_iter = history_id_set.begin();
- set_iter != history_id_set.end(); ++set_iter)
- map_entry->add_history_id(*set_iter);
+ for (HistoryID history_id : history_id_set)
+ map_entry->add_history_id(history_id);
}
}
@@ -1056,11 +1006,10 @@ void URLIndexPrivateData::SaveHistoryInfoMap(
return;
HistoryInfoMapItem* map_item = cache->mutable_history_info_map();
map_item->set_item_count(history_info_map_.size());
- for (HistoryInfoMap::const_iterator iter = history_info_map_.begin();
- iter != history_info_map_.end(); ++iter) {
+ for (const auto& elem : history_info_map_) {
HistoryInfoMapEntry* map_entry = map_item->add_history_info_map_entry();
- map_entry->set_history_id(iter->first);
- const history::URLRow& url_row(iter->second.url_row);
+ map_entry->set_history_id(elem.first);
+ const history::URLRow& url_row(elem.second.url_row);
// Note: We only save information that contributes to the index so there
// is no need to save search_term_cache_ (not persistent).
map_entry->set_visit_count(url_row.visit_count());
@@ -1068,12 +1017,10 @@ void URLIndexPrivateData::SaveHistoryInfoMap(
map_entry->set_last_visit(url_row.last_visit().ToInternalValue());
map_entry->set_url(url_row.url().spec());
map_entry->set_title(base::UTF16ToUTF8(url_row.title()));
- const VisitInfoVector& visits(iter->second.visits);
- for (VisitInfoVector::const_iterator visit_iter = visits.begin();
- visit_iter != visits.end(); ++visit_iter) {
+ for (const auto& visit : elem.second.visits) {
HistoryInfoMapEntry_VisitInfo* visit_info = map_entry->add_visits();
- visit_info->set_visit_time(visit_iter->first.ToInternalValue());
- visit_info->set_transition_type(visit_iter->second);
+ visit_info->set_visit_time(visit.first.ToInternalValue());
+ visit_info->set_transition_type(visit.second);
}
}
}
@@ -1092,17 +1039,14 @@ void URLIndexPrivateData::SaveWordStartsMap(
WordStartsMapItem* map_item = cache->mutable_word_starts_map();
map_item->set_item_count(word_starts_map_.size());
- for (WordStartsMap::const_iterator iter = word_starts_map_.begin();
- iter != word_starts_map_.end(); ++iter) {
+ for (const auto& entrie : word_starts_map_) {
Peter Kasting 2017/02/17 01:33:46 Nit: Did you mean "entry"? (many places)
dyaroshev 2017/02/17 20:17:22 Oops, spelling( Done.
WordStartsMapEntry* map_entry = map_item->add_word_starts_map_entry();
- map_entry->set_history_id(iter->first);
- const RowWordStarts& word_starts(iter->second);
- for (WordStarts::const_iterator i = word_starts.url_word_starts_.begin();
- i != word_starts.url_word_starts_.end(); ++i)
- map_entry->add_url_word_starts(*i);
- for (WordStarts::const_iterator i = word_starts.title_word_starts_.begin();
- i != word_starts.title_word_starts_.end(); ++i)
- map_entry->add_title_word_starts(*i);
+ map_entry->set_history_id(entrie.first);
+ const RowWordStarts& word_starts(entrie.second);
+ for (auto url_word_start : word_starts.url_word_starts_)
+ map_entry->add_url_word_starts(url_word_start);
+ for (auto title_word_start : word_starts.title_word_starts_)
+ map_entry->add_title_word_starts(title_word_start);
}
}
@@ -1145,9 +1089,10 @@ bool URLIndexPrivateData::RestoreWordList(
if (actual_item_count == 0 || actual_item_count != expected_item_count)
return false;
const RepeatedPtrField<std::string>& words(list_item.word());
- for (RepeatedPtrField<std::string>::const_iterator iter = words.begin();
- iter != words.end(); ++iter)
- word_list_.push_back(base::UTF8ToUTF16(*iter));
+ word_list_.reserve(words.size());
+ std::transform(
+ words.begin(), words.end(), std::back_inserter(word_list_),
+ [](const std::string& word) { return base::UTF8ToUTF16(word); });
return true;
}
@@ -1160,10 +1105,9 @@ bool URLIndexPrivateData::RestoreWordMap(
uint32_t actual_item_count = list_item.word_map_entry_size();
if (actual_item_count == 0 || actual_item_count != expected_item_count)
return false;
- const RepeatedPtrField<WordMapEntry>& entries(list_item.word_map_entry());
- for (RepeatedPtrField<WordMapEntry>::const_iterator iter = entries.begin();
- iter != entries.end(); ++iter)
- word_map_[base::UTF8ToUTF16(iter->word())] = iter->word_id();
+ for (const auto& entrie : list_item.word_map_entry())
+ word_map_[base::UTF8ToUTF16(entrie.word())] = entrie.word_id();
+
return true;
}
@@ -1176,21 +1120,15 @@ bool URLIndexPrivateData::RestoreCharWordMap(
uint32_t actual_item_count = list_item.char_word_map_entry_size();
if (actual_item_count == 0 || actual_item_count != expected_item_count)
return false;
- const RepeatedPtrField<CharWordMapEntry>&
- entries(list_item.char_word_map_entry());
- for (RepeatedPtrField<CharWordMapEntry>::const_iterator iter =
- entries.begin(); iter != entries.end(); ++iter) {
- expected_item_count = iter->item_count();
- actual_item_count = iter->word_id_size();
+
+ for (const auto& entrie : list_item.char_word_map_entry()) {
+ expected_item_count = entrie.item_count();
+ actual_item_count = entrie.word_id_size();
if (actual_item_count == 0 || actual_item_count != expected_item_count)
return false;
- base::char16 uni_char = static_cast<base::char16>(iter->char_16());
- WordIDSet word_id_set;
- const RepeatedField<int32_t>& word_ids(iter->word_id());
- for (RepeatedField<int32_t>::const_iterator jiter = word_ids.begin();
- jiter != word_ids.end(); ++jiter)
- word_id_set.insert(*jiter);
- char_word_map_[uni_char] = word_id_set;
+ base::char16 uni_char = static_cast<base::char16>(entrie.char_16());
+ const RepeatedField<int32_t>& word_ids(entrie.word_id());
+ char_word_map_[uni_char] = {word_ids.begin(), word_ids.end()};
}
return true;
}
@@ -1204,23 +1142,16 @@ bool URLIndexPrivateData::RestoreWordIDHistoryMap(
uint32_t actual_item_count = list_item.word_id_history_map_entry_size();
if (actual_item_count == 0 || actual_item_count != expected_item_count)
return false;
- const RepeatedPtrField<WordIDHistoryMapEntry>&
- entries(list_item.word_id_history_map_entry());
- for (RepeatedPtrField<WordIDHistoryMapEntry>::const_iterator iter =
- entries.begin(); iter != entries.end(); ++iter) {
- expected_item_count = iter->item_count();
- actual_item_count = iter->history_id_size();
+ for (const auto& entrie : list_item.word_id_history_map_entry()) {
+ expected_item_count = entrie.item_count();
+ actual_item_count = entrie.history_id_size();
if (actual_item_count == 0 || actual_item_count != expected_item_count)
return false;
- WordID word_id = iter->word_id();
- HistoryIDSet history_id_set;
- const RepeatedField<int64_t>& history_ids(iter->history_id());
- for (RepeatedField<int64_t>::const_iterator jiter = history_ids.begin();
- jiter != history_ids.end(); ++jiter) {
- history_id_set.insert(*jiter);
- AddToHistoryIDWordMap(*jiter, word_id);
- }
- word_id_history_map_[word_id] = history_id_set;
+ WordID word_id = entrie.word_id();
+ const RepeatedField<int64_t>& history_ids(entrie.history_id());
+ word_id_history_map_[word_id] = {history_ids.begin(), history_ids.end()};
+ for (HistoryID history_id : history_ids)
+ AddToHistoryIDWordMap(history_id, word_id);
}
return true;
}
@@ -1234,31 +1165,28 @@ bool URLIndexPrivateData::RestoreHistoryInfoMap(
uint32_t actual_item_count = list_item.history_info_map_entry_size();
if (actual_item_count == 0 || actual_item_count != expected_item_count)
return false;
- const RepeatedPtrField<HistoryInfoMapEntry>&
- entries(list_item.history_info_map_entry());
- for (RepeatedPtrField<HistoryInfoMapEntry>::const_iterator iter =
- entries.begin(); iter != entries.end(); ++iter) {
- HistoryID history_id = iter->history_id();
- GURL url(iter->url());
+
+ for (const auto& entrie : list_item.history_info_map_entry()) {
+ HistoryID history_id = entrie.history_id();
+ GURL url(entrie.url());
Peter Kasting 2017/02/17 01:33:46 Nit: Prefer = to () here
dyaroshev 2017/02/17 20:17:22 Removed the temporary altogether.
history::URLRow url_row(url, history_id);
- url_row.set_visit_count(iter->visit_count());
- url_row.set_typed_count(iter->typed_count());
- url_row.set_last_visit(base::Time::FromInternalValue(iter->last_visit()));
- if (iter->has_title()) {
- base::string16 title(base::UTF8ToUTF16(iter->title()));
- url_row.set_title(title);
+ url_row.set_visit_count(entrie.visit_count());
+ url_row.set_typed_count(entrie.typed_count());
+ url_row.set_last_visit(base::Time::FromInternalValue(entrie.last_visit()));
+ if (entrie.has_title()) {
Peter Kasting 2017/02/17 01:33:46 Nit: No {}
dyaroshev 2017/02/17 20:17:22 Done.
+ url_row.set_title(base::UTF8ToUTF16(entrie.title()));
}
- history_info_map_[history_id].url_row = url_row;
+ history_info_map_[history_id].url_row = std::move(url_row);
// Restore visits list.
VisitInfoVector visits;
- visits.reserve(iter->visits_size());
- for (int i = 0; i < iter->visits_size(); ++i) {
- visits.push_back(std::make_pair(
- base::Time::FromInternalValue(iter->visits(i).visit_time()),
- ui::PageTransitionFromInt(iter->visits(i).transition_type())));
+ visits.reserve(entrie.visits_size());
+ for (const auto& entrie_visit : entrie.visits()) {
+ visits.emplace_back(
+ base::Time::FromInternalValue(entrie_visit.visit_time()),
+ ui::PageTransitionFromInt(entrie_visit.transition_type()));
}
- history_info_map_[history_id].visits = visits;
+ history_info_map_[history_id].visits = std::move(visits);
}
return true;
}
@@ -1276,36 +1204,33 @@ bool URLIndexPrivateData::RestoreWordStartsMap(
return false;
const RepeatedPtrField<WordStartsMapEntry>&
entries(list_item.word_starts_map_entry());
- for (RepeatedPtrField<WordStartsMapEntry>::const_iterator iter =
- entries.begin(); iter != entries.end(); ++iter) {
- HistoryID history_id = iter->history_id();
+ for (const auto& entrie : entries) {
+ HistoryID history_id = entrie.history_id();
RowWordStarts word_starts;
// Restore the URL word starts.
- const RepeatedField<int32_t>& url_starts(iter->url_word_starts());
- for (RepeatedField<int32_t>::const_iterator jiter = url_starts.begin();
- jiter != url_starts.end(); ++jiter)
- word_starts.url_word_starts_.push_back(*jiter);
+ const RepeatedField<int32_t>& url_starts(entrie.url_word_starts());
+ word_starts.url_word_starts_ = {url_starts.begin(), url_starts.end()};
+
// Restore the page title word starts.
- const RepeatedField<int32_t>& title_starts(iter->title_word_starts());
- for (RepeatedField<int32_t>::const_iterator jiter = title_starts.begin();
- jiter != title_starts.end(); ++jiter)
- word_starts.title_word_starts_.push_back(*jiter);
- word_starts_map_[history_id] = word_starts;
+ const RepeatedField<int32_t>& title_starts(entrie.title_word_starts());
+ word_starts.title_word_starts_ = {title_starts.begin(),
+ title_starts.end()};
+
+ word_starts_map_[history_id] = std::move(word_starts);
}
} else {
// Since the cache did not contain any word starts we must rebuild then from
// the URL and page titles.
- for (HistoryInfoMap::const_iterator iter = history_info_map_.begin();
- iter != history_info_map_.end(); ++iter) {
+ for (const auto& entrie : history_info_map_) {
RowWordStarts word_starts;
- const history::URLRow& row(iter->second.url_row);
+ const history::URLRow& row(entrie.second.url_row);
const base::string16& url =
bookmarks::CleanUpUrlForMatching(row.url(), nullptr);
String16VectorFromString16(url, false, &word_starts.url_word_starts_);
const base::string16& title =
bookmarks::CleanUpTitleForMatching(row.title());
String16VectorFromString16(title, false, &word_starts.title_word_starts_);
- word_starts_map_[iter->first] = word_starts;
+ word_starts_map_[entrie.first] = std::move(word_starts);
}
}
return true;

Powered by Google App Engine
This is Rietveld 408576698