OLD | NEW |
---|---|
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "components/bookmarks/browser/bookmark_index.h" | 5 #include "components/bookmarks/browser/bookmark_index.h" |
6 | 6 |
7 #include <stdint.h> | 7 #include <stdint.h> |
8 | 8 |
9 #include <algorithm> | |
10 #include <functional> | |
11 #include <iterator> | |
12 #include <list> | |
13 | |
14 #include "base/i18n/case_conversion.h" | 9 #include "base/i18n/case_conversion.h" |
15 #include "base/logging.h" | 10 #include "base/logging.h" |
16 #include "base/stl_util.h" | 11 #include "base/stl_util.h" |
17 #include "base/strings/utf_offset_string_conversions.h" | 12 #include "base/strings/utf_offset_string_conversions.h" |
18 #include "build/build_config.h" | 13 #include "build/build_config.h" |
19 #include "components/bookmarks/browser/bookmark_client.h" | 14 #include "components/bookmarks/browser/bookmark_client.h" |
20 #include "components/bookmarks/browser/bookmark_match.h" | 15 #include "components/bookmarks/browser/bookmark_match.h" |
21 #include "components/bookmarks/browser/bookmark_node.h" | |
22 #include "components/bookmarks/browser/bookmark_utils.h" | 16 #include "components/bookmarks/browser/bookmark_utils.h" |
17 #include "components/bookmarks/browser/titled_url_node.h" | |
23 #include "components/query_parser/snippet.h" | 18 #include "components/query_parser/snippet.h" |
24 #include "third_party/icu/source/common/unicode/normalizer2.h" | 19 #include "third_party/icu/source/common/unicode/normalizer2.h" |
25 #include "third_party/icu/source/common/unicode/utypes.h" | 20 #include "third_party/icu/source/common/unicode/utypes.h" |
26 | 21 |
27 namespace bookmarks { | 22 namespace bookmarks { |
28 | 23 |
29 using UrlTypedCountMap = BookmarkClient::UrlTypedCountMap; | 24 using UrlTypedCountMap = BookmarkClient::UrlTypedCountMap; |
30 | 25 |
31 namespace { | 26 namespace { |
32 | 27 |
33 using UrlNodeMap = std::unordered_map<const GURL*, const BookmarkNode*>; | 28 using UrlNodeMap = std::unordered_map<const GURL*, const TitledUrlNode*>; |
34 using UrlTypedCountPair = std::pair<const GURL*, int>; | 29 using UrlTypedCountPair = std::pair<const GURL*, int>; |
35 using UrlTypedCountPairs = std::vector<UrlTypedCountPair>; | 30 using UrlTypedCountPairs = std::vector<UrlTypedCountPair>; |
36 | 31 |
37 // Returns a normalized version of the UTF16 string |text|. If it fails to | 32 // Returns a normalized version of the UTF16 string |text|. If it fails to |
38 // normalize the string, returns |text| itself as a best-effort. | 33 // normalize the string, returns |text| itself as a best-effort. |
39 base::string16 Normalize(const base::string16& text) { | 34 base::string16 Normalize(const base::string16& text) { |
40 UErrorCode status = U_ZERO_ERROR; | 35 UErrorCode status = U_ZERO_ERROR; |
41 const icu::Normalizer2* normalizer2 = | 36 const icu::Normalizer2* normalizer2 = |
42 icu::Normalizer2::getInstance(nullptr, "nfkc", UNORM2_COMPOSE, status); | 37 icu::Normalizer2::getInstance(nullptr, "nfkc", UNORM2_COMPOSE, status); |
43 if (U_FAILURE(status)) { | 38 if (U_FAILURE(status)) { |
44 // Log and crash right away to capture the error code in the crash report. | 39 // Log and crash right away to capture the error code in the crash report. |
45 LOG(FATAL) << "failed to create a normalizer: " << u_errorName(status); | 40 LOG(FATAL) << "failed to create a normalizer: " << u_errorName(status); |
46 } | 41 } |
47 icu::UnicodeString unicode_text( | 42 icu::UnicodeString unicode_text( |
48 text.data(), static_cast<int32_t>(text.length())); | 43 text.data(), static_cast<int32_t>(text.length())); |
49 icu::UnicodeString unicode_normalized_text; | 44 icu::UnicodeString unicode_normalized_text; |
50 normalizer2->normalize(unicode_text, unicode_normalized_text, status); | 45 normalizer2->normalize(unicode_text, unicode_normalized_text, status); |
51 if (U_FAILURE(status)) { | 46 if (U_FAILURE(status)) { |
52 // This should not happen. Log the error and fall back. | 47 // This should not happen. Log the error and fall back. |
53 LOG(ERROR) << "normalization failed: " << u_errorName(status); | 48 LOG(ERROR) << "normalization failed: " << u_errorName(status); |
54 return text; | 49 return text; |
55 } | 50 } |
56 return base::string16(unicode_normalized_text.getBuffer(), | 51 return base::string16(unicode_normalized_text.getBuffer(), |
57 unicode_normalized_text.length()); | 52 unicode_normalized_text.length()); |
58 } | 53 } |
59 | 54 |
60 // Sort functor for sorting bookmark URLs by typed count. We sort in decreasing | 55 // Sort functor for UrlTypedCountPairs. We sort in decreasing order of typed |
61 // order of typed count so that the best matches will always be added to the | 56 // count so that the best matches will always be added to the results. |
62 // results. | |
63 struct UrlTypedCountPairSortFunctor { | 57 struct UrlTypedCountPairSortFunctor { |
64 bool operator()(const UrlTypedCountPair& a, | 58 bool operator()(const UrlTypedCountPair& a, |
65 const UrlTypedCountPair& b) const { | 59 const UrlTypedCountPair& b) const { |
66 return a.second > b.second; | 60 return a.second > b.second; |
67 } | 61 } |
68 }; | 62 }; |
69 | 63 |
70 // Extract the GURL stored in a BookmarkClient::UrlTypedCountPair and use it to | 64 // Extract the GURL stored in an UrlTypedCountPair and use it to look up the |
71 // look up the corresponding BookmarkNode. | 65 // corresponding TitledUrlNode. |
72 class UrlTypedCountPairNodeLookupFunctor { | 66 class UrlTypedCountPairNodeLookupFunctor { |
73 public: | 67 public: |
74 explicit UrlTypedCountPairNodeLookupFunctor(UrlNodeMap& url_node_map) | 68 explicit UrlTypedCountPairNodeLookupFunctor(UrlNodeMap& url_node_map) |
75 : url_node_map_(url_node_map) { | 69 : url_node_map_(url_node_map) { |
76 } | 70 } |
77 | 71 |
78 const BookmarkNode* operator()(const UrlTypedCountPair& pair) const { | 72 const TitledUrlNode* operator()(const UrlTypedCountPair& pair) const { |
79 return url_node_map_[pair.first]; | 73 return url_node_map_[pair.first]; |
80 } | 74 } |
81 | 75 |
82 private: | 76 private: |
83 UrlNodeMap& url_node_map_; | 77 UrlNodeMap& url_node_map_; |
84 }; | 78 }; |
85 | 79 |
86 } // namespace | 80 } // namespace |
87 | 81 |
88 BookmarkIndex::BookmarkIndex(BookmarkClient* client) | 82 BookmarkIndex::BookmarkIndex(BookmarkClient* client) |
89 : client_(client) { | 83 : client_(client) { |
90 DCHECK(client_); | 84 DCHECK(client_); |
91 } | 85 } |
92 | 86 |
93 BookmarkIndex::~BookmarkIndex() { | 87 BookmarkIndex::~BookmarkIndex() { |
94 } | 88 } |
95 | 89 |
96 void BookmarkIndex::Add(const BookmarkNode* node) { | 90 void BookmarkIndex::Add(const TitledUrlNode* node) { |
97 if (!node->is_url()) | 91 if (!node->GetTitledUrlNodeUrl().is_valid()) |
98 return; | 92 return; |
99 std::vector<base::string16> terms = | 93 std::vector<base::string16> terms = |
100 ExtractQueryWords(Normalize(node->GetTitle())); | 94 ExtractQueryWords(Normalize(node->GetTitledUrlNodeTitle())); |
101 for (size_t i = 0; i < terms.size(); ++i) | 95 for (size_t i = 0; i < terms.size(); ++i) |
102 RegisterNode(terms[i], node); | 96 RegisterNode(terms[i], node); |
103 terms = | 97 terms = ExtractQueryWords( |
104 ExtractQueryWords(CleanUpUrlForMatching(node->url(), nullptr)); | 98 CleanUpUrlForMatching(node->GetTitledUrlNodeUrl(), nullptr)); |
105 for (size_t i = 0; i < terms.size(); ++i) | 99 for (size_t i = 0; i < terms.size(); ++i) |
106 RegisterNode(terms[i], node); | 100 RegisterNode(terms[i], node); |
107 } | 101 } |
108 | 102 |
109 void BookmarkIndex::Remove(const BookmarkNode* node) { | 103 void BookmarkIndex::Remove(const TitledUrlNode* node) { |
110 if (!node->is_url()) | 104 if (!node->GetTitledUrlNodeUrl().is_valid()) |
sky
2016/12/08 23:01:07
This bothered me last time, but I'll ask now. The
mattreynolds
2016/12/09 02:01:14
BookmarkModel::AddNodeToInternalMaps already check
| |
111 return; | 105 return; |
112 | 106 |
113 std::vector<base::string16> terms = | 107 std::vector<base::string16> terms = |
114 ExtractQueryWords(Normalize(node->GetTitle())); | 108 ExtractQueryWords(Normalize(node->GetTitledUrlNodeTitle())); |
115 for (size_t i = 0; i < terms.size(); ++i) | 109 for (size_t i = 0; i < terms.size(); ++i) |
116 UnregisterNode(terms[i], node); | 110 UnregisterNode(terms[i], node); |
117 terms = | 111 terms = ExtractQueryWords( |
118 ExtractQueryWords(CleanUpUrlForMatching(node->url(), nullptr)); | 112 CleanUpUrlForMatching(node->GetTitledUrlNodeUrl(), nullptr)); |
119 for (size_t i = 0; i < terms.size(); ++i) | 113 for (size_t i = 0; i < terms.size(); ++i) |
120 UnregisterNode(terms[i], node); | 114 UnregisterNode(terms[i], node); |
121 } | 115 } |
122 | 116 |
123 void BookmarkIndex::GetBookmarksMatching( | 117 void BookmarkIndex::GetResultsMatching( |
124 const base::string16& input_query, | 118 const base::string16& input_query, |
125 size_t max_count, | 119 size_t max_count, |
126 query_parser::MatchingAlgorithm matching_algorithm, | 120 query_parser::MatchingAlgorithm matching_algorithm, |
127 std::vector<BookmarkMatch>* results) { | 121 std::vector<BookmarkMatch>* results) { |
128 const base::string16 query = Normalize(input_query); | 122 const base::string16 query = Normalize(input_query); |
129 std::vector<base::string16> terms = ExtractQueryWords(query); | 123 std::vector<base::string16> terms = ExtractQueryWords(query); |
130 if (terms.empty()) | 124 if (terms.empty()) |
131 return; | 125 return; |
132 | 126 |
133 NodeSet matches; | 127 TitledUrlNodeSet matches; |
134 for (size_t i = 0; i < terms.size(); ++i) { | 128 for (size_t i = 0; i < terms.size(); ++i) { |
135 if (!GetBookmarksMatchingTerm( | 129 if (!GetResultsMatchingTerm(terms[i], i == 0, matching_algorithm, |
136 terms[i], i == 0, matching_algorithm, &matches)) { | 130 &matches)) { |
137 return; | 131 return; |
138 } | 132 } |
139 } | 133 } |
140 | 134 |
141 Nodes sorted_nodes; | 135 TitledUrlNodes sorted_nodes; |
142 SortMatches(matches, &sorted_nodes); | 136 SortMatches(matches, &sorted_nodes); |
143 | 137 |
144 // We use a QueryParser to fill in match positions for us. It's not the most | 138 // We use a QueryParser to fill in match positions for us. It's not the most |
145 // efficient way to go about this, but by the time we get here we know what | 139 // efficient way to go about this, but by the time we get here we know what |
146 // matches and so this shouldn't be performance critical. | 140 // matches and so this shouldn't be performance critical. |
147 query_parser::QueryParser parser; | 141 query_parser::QueryParser parser; |
148 query_parser::QueryNodeVector query_nodes; | 142 query_parser::QueryNodeVector query_nodes; |
149 parser.ParseQueryNodes(query, matching_algorithm, &query_nodes); | 143 parser.ParseQueryNodes(query, matching_algorithm, &query_nodes); |
150 | 144 |
151 // The highest typed counts should be at the beginning of the results vector | 145 // The highest typed counts should be at the beginning of the results vector |
152 // so that the best matches will always be included in the results. The loop | 146 // so that the best matches will always be included in the results. The loop |
153 // that calculates result relevance in HistoryContentsProvider::ConvertResults | 147 // that calculates result relevance in HistoryContentsProvider::ConvertResults |
154 // will run backwards to assure higher relevance will be attributed to the | 148 // will run backwards to assure higher relevance will be attributed to the |
155 // best matches. | 149 // best matches. |
156 for (Nodes::const_iterator i = sorted_nodes.begin(); | 150 for (TitledUrlNodes::const_iterator i = sorted_nodes.begin(); |
157 i != sorted_nodes.end() && results->size() < max_count; | 151 i != sorted_nodes.end() && results->size() < max_count; |
158 ++i) | 152 ++i) |
159 AddMatchToResults(*i, &parser, query_nodes, results); | 153 AddMatchToResults(*i, &parser, query_nodes, results); |
160 } | 154 } |
161 | 155 |
162 void BookmarkIndex::SortMatches(const NodeSet& matches, | 156 void BookmarkIndex::SortMatches(const TitledUrlNodeSet& matches, |
163 Nodes* sorted_nodes) const { | 157 TitledUrlNodes* sorted_nodes) const { |
164 sorted_nodes->reserve(matches.size()); | 158 sorted_nodes->reserve(matches.size()); |
165 if (client_->SupportsTypedCountForUrls()) { | 159 if (client_->SupportsTypedCountForUrls()) { |
166 UrlNodeMap url_node_map; | 160 UrlNodeMap url_node_map; |
167 UrlTypedCountMap url_typed_count_map; | 161 UrlTypedCountMap url_typed_count_map; |
168 for (auto node : matches) { | 162 for (auto node : matches) { |
169 url_node_map.insert(std::make_pair(&node->url(), node)); | 163 const GURL& url = node->GetTitledUrlNodeUrl(); |
170 url_typed_count_map.insert(std::make_pair(&node->url(), 0)); | 164 url_node_map.insert(std::make_pair(&url, node)); |
165 url_typed_count_map.insert(std::make_pair(&url, 0)); | |
171 } | 166 } |
172 | 167 |
173 client_->GetTypedCountForUrls(&url_typed_count_map); | 168 client_->GetTypedCountForUrls(&url_typed_count_map); |
174 | 169 |
175 UrlTypedCountPairs url_typed_counts; | 170 UrlTypedCountPairs url_typed_counts; |
176 std::copy(url_typed_count_map.begin(), | 171 std::copy(url_typed_count_map.begin(), |
177 url_typed_count_map.end(), | 172 url_typed_count_map.end(), |
178 std::back_inserter(url_typed_counts)); | 173 std::back_inserter(url_typed_counts)); |
179 std::sort(url_typed_counts.begin(), | 174 std::sort(url_typed_counts.begin(), |
180 url_typed_counts.end(), | 175 url_typed_counts.end(), |
181 UrlTypedCountPairSortFunctor()); | 176 UrlTypedCountPairSortFunctor()); |
182 std::transform(url_typed_counts.begin(), | 177 std::transform(url_typed_counts.begin(), |
183 url_typed_counts.end(), | 178 url_typed_counts.end(), |
184 std::back_inserter(*sorted_nodes), | 179 std::back_inserter(*sorted_nodes), |
185 UrlTypedCountPairNodeLookupFunctor(url_node_map)); | 180 UrlTypedCountPairNodeLookupFunctor(url_node_map)); |
186 } else { | 181 } else { |
187 sorted_nodes->insert(sorted_nodes->end(), matches.begin(), matches.end()); | 182 sorted_nodes->insert(sorted_nodes->end(), matches.begin(), matches.end()); |
188 } | 183 } |
189 } | 184 } |
190 | 185 |
191 void BookmarkIndex::AddMatchToResults( | 186 void BookmarkIndex::AddMatchToResults( |
192 const BookmarkNode* node, | 187 const TitledUrlNode* node, |
193 query_parser::QueryParser* parser, | 188 query_parser::QueryParser* parser, |
194 const query_parser::QueryNodeVector& query_nodes, | 189 const query_parser::QueryNodeVector& query_nodes, |
195 std::vector<BookmarkMatch>* results) { | 190 std::vector<BookmarkMatch>* results) { |
196 if (!node) { | 191 if (!node) { |
197 return; | 192 return; |
198 } | 193 } |
199 // Check that the result matches the query. The previous search | 194 // Check that the result matches the query. The previous search |
200 // was a simple per-word search, while the more complex matching | 195 // was a simple per-word search, while the more complex matching |
201 // of QueryParser may filter it out. For example, the query | 196 // of QueryParser may filter it out. For example, the query |
202 // ["thi"] will match the bookmark titled [Thinking], but since | 197 // ["thi"] will match the title [Thinking], but since |
203 // ["thi"] is quoted we don't want to do a prefix match. | 198 // ["thi"] is quoted we don't want to do a prefix match. |
204 query_parser::QueryWordVector title_words, url_words; | 199 query_parser::QueryWordVector title_words, url_words; |
205 const base::string16 lower_title = | 200 const base::string16 lower_title = |
206 base::i18n::ToLower(Normalize(node->GetTitle())); | 201 base::i18n::ToLower(Normalize(node->GetTitledUrlNodeTitle())); |
207 parser->ExtractQueryWords(lower_title, &title_words); | 202 parser->ExtractQueryWords(lower_title, &title_words); |
208 base::OffsetAdjuster::Adjustments adjustments; | 203 base::OffsetAdjuster::Adjustments adjustments; |
209 parser->ExtractQueryWords( | 204 parser->ExtractQueryWords( |
210 CleanUpUrlForMatching(node->url(), &adjustments), | 205 CleanUpUrlForMatching(node->GetTitledUrlNodeUrl(), &adjustments), |
211 &url_words); | 206 &url_words); |
212 query_parser::Snippet::MatchPositions title_matches, url_matches; | 207 query_parser::Snippet::MatchPositions title_matches, url_matches; |
213 for (const auto& node : query_nodes) { | 208 for (const auto& node : query_nodes) { |
214 const bool has_title_matches = | 209 const bool has_title_matches = |
215 node->HasMatchIn(title_words, &title_matches); | 210 node->HasMatchIn(title_words, &title_matches); |
216 const bool has_url_matches = node->HasMatchIn(url_words, &url_matches); | 211 const bool has_url_matches = node->HasMatchIn(url_words, &url_matches); |
217 if (!has_title_matches && !has_url_matches) | 212 if (!has_title_matches && !has_url_matches) |
218 return; | 213 return; |
219 query_parser::QueryParser::SortAndCoalesceMatchPositions(&title_matches); | 214 query_parser::QueryParser::SortAndCoalesceMatchPositions(&title_matches); |
220 query_parser::QueryParser::SortAndCoalesceMatchPositions(&url_matches); | 215 query_parser::QueryParser::SortAndCoalesceMatchPositions(&url_matches); |
221 } | 216 } |
222 BookmarkMatch match; | 217 BookmarkMatch match; |
223 if (lower_title.length() == node->GetTitle().length()) { | 218 if (lower_title.length() == node->GetTitledUrlNodeTitle().length()) { |
224 // Only use title matches if the lowercase string is the same length | 219 // Only use title matches if the lowercase string is the same length |
225 // as the original string, otherwise the matches are meaningless. | 220 // as the original string, otherwise the matches are meaningless. |
226 // TODO(mpearson): revise match positions appropriately. | 221 // TODO(mpearson): revise match positions appropriately. |
227 match.title_match_positions.swap(title_matches); | 222 match.title_match_positions.swap(title_matches); |
228 } | 223 } |
229 // Now that we're done processing this entry, correct the offsets of the | 224 // Now that we're done processing this entry, correct the offsets of the |
230 // matches in |url_matches| so they point to offsets in the original URL | 225 // matches in |url_matches| so they point to offsets in the original URL |
231 // spec, not the cleaned-up URL string that we used for matching. | 226 // spec, not the cleaned-up URL string that we used for matching. |
232 std::vector<size_t> offsets = | 227 std::vector<size_t> offsets = |
233 BookmarkMatch::OffsetsFromMatchPositions(url_matches); | 228 BookmarkMatch::OffsetsFromMatchPositions(url_matches); |
234 base::OffsetAdjuster::UnadjustOffsets(adjustments, &offsets); | 229 base::OffsetAdjuster::UnadjustOffsets(adjustments, &offsets); |
235 url_matches = | 230 url_matches = |
236 BookmarkMatch::ReplaceOffsetsInMatchPositions(url_matches, offsets); | 231 BookmarkMatch::ReplaceOffsetsInMatchPositions(url_matches, offsets); |
237 match.url_match_positions.swap(url_matches); | 232 match.url_match_positions.swap(url_matches); |
238 match.node = node; | 233 match.node = node; |
239 results->push_back(match); | 234 results->push_back(match); |
240 } | 235 } |
241 | 236 |
242 bool BookmarkIndex::GetBookmarksMatchingTerm( | 237 bool BookmarkIndex::GetResultsMatchingTerm( |
243 const base::string16& term, | 238 const base::string16& term, |
244 bool first_term, | 239 bool first_term, |
245 query_parser::MatchingAlgorithm matching_algorithm, | 240 query_parser::MatchingAlgorithm matching_algorithm, |
246 NodeSet* matches) { | 241 TitledUrlNodeSet* matches) { |
247 Index::const_iterator i = index_.lower_bound(term); | 242 Index::const_iterator i = index_.lower_bound(term); |
248 if (i == index_.end()) | 243 if (i == index_.end()) |
249 return false; | 244 return false; |
250 | 245 |
251 if (!query_parser::QueryParser::IsWordLongEnoughForPrefixSearch( | 246 if (!query_parser::QueryParser::IsWordLongEnoughForPrefixSearch( |
252 term, matching_algorithm)) { | 247 term, matching_algorithm)) { |
253 // Term is too short for prefix match, compare using exact match. | 248 // Term is too short for prefix match, compare using exact match. |
254 if (i->first != term) | 249 if (i->first != term) |
255 return false; // No bookmarks with this term. | 250 return false; // No title/URL pairs with this term. |
256 | 251 |
257 if (first_term) { | 252 if (first_term) { |
258 (*matches) = i->second; | 253 (*matches) = i->second; |
259 return true; | 254 return true; |
260 } | 255 } |
261 *matches = base::STLSetIntersection<NodeSet>(i->second, *matches); | 256 *matches = base::STLSetIntersection<TitledUrlNodeSet>(i->second, *matches); |
262 } else { | 257 } else { |
263 // Loop through index adding all entries that start with term to | 258 // Loop through index adding all entries that start with term to |
264 // |prefix_matches|. | 259 // |prefix_matches|. |
265 NodeSet tmp_prefix_matches; | 260 TitledUrlNodeSet tmp_prefix_matches; |
266 // If this is the first term, then store the result directly in |matches| | 261 // If this is the first term, then store the result directly in |matches| |
267 // to avoid calling stl intersection (which requires a copy). | 262 // to avoid calling stl intersection (which requires a copy). |
268 NodeSet* prefix_matches = first_term ? matches : &tmp_prefix_matches; | 263 TitledUrlNodeSet* prefix_matches = |
264 first_term ? matches : &tmp_prefix_matches; | |
269 while (i != index_.end() && | 265 while (i != index_.end() && |
270 i->first.size() >= term.size() && | 266 i->first.size() >= term.size() && |
271 term.compare(0, term.size(), i->first, 0, term.size()) == 0) { | 267 term.compare(0, term.size(), i->first, 0, term.size()) == 0) { |
272 #if !defined(OS_ANDROID) | 268 #if !defined(OS_ANDROID) |
273 prefix_matches->insert(i->second.begin(), i->second.end()); | 269 prefix_matches->insert(i->second.begin(), i->second.end()); |
274 #else | 270 #else |
275 // Work around a bug in the implementation of std::set::insert in the STL | 271 // Work around a bug in the implementation of std::set::insert in the STL |
276 // used on android (http://crbug.com/367050). | 272 // used on android (http://crbug.com/367050). |
277 for (NodeSet::const_iterator n = i->second.begin(); n != i->second.end(); | 273 for (TitledUrlNodeSet::const_iterator n = i->second.begin(); |
274 n != i->second.end(); | |
278 ++n) | 275 ++n) |
279 prefix_matches->insert(prefix_matches->end(), *n); | 276 prefix_matches->insert(prefix_matches->end(), *n); |
280 #endif | 277 #endif |
281 ++i; | 278 ++i; |
282 } | 279 } |
283 if (!first_term) | 280 if (!first_term) { |
284 *matches = base::STLSetIntersection<NodeSet>(*prefix_matches, *matches); | 281 *matches = |
282 base::STLSetIntersection<TitledUrlNodeSet>(*prefix_matches, *matches); | |
283 } | |
285 } | 284 } |
286 return !matches->empty(); | 285 return !matches->empty(); |
287 } | 286 } |
288 | 287 |
289 std::vector<base::string16> BookmarkIndex::ExtractQueryWords( | 288 std::vector<base::string16> BookmarkIndex::ExtractQueryWords( |
290 const base::string16& query) { | 289 const base::string16& query) { |
291 std::vector<base::string16> terms; | 290 std::vector<base::string16> terms; |
292 if (query.empty()) | 291 if (query.empty()) |
293 return std::vector<base::string16>(); | 292 return std::vector<base::string16>(); |
294 query_parser::QueryParser parser; | 293 query_parser::QueryParser parser; |
295 parser.ParseQueryWords(base::i18n::ToLower(query), | 294 parser.ParseQueryWords(base::i18n::ToLower(query), |
296 query_parser::MatchingAlgorithm::DEFAULT, | 295 query_parser::MatchingAlgorithm::DEFAULT, |
297 &terms); | 296 &terms); |
298 return terms; | 297 return terms; |
299 } | 298 } |
300 | 299 |
301 void BookmarkIndex::RegisterNode(const base::string16& term, | 300 void BookmarkIndex::RegisterNode(const base::string16& term, |
302 const BookmarkNode* node) { | 301 const TitledUrlNode* node) { |
303 index_[term].insert(node); | 302 index_[term].insert(node); |
304 } | 303 } |
305 | 304 |
306 void BookmarkIndex::UnregisterNode(const base::string16& term, | 305 void BookmarkIndex::UnregisterNode(const base::string16& term, |
307 const BookmarkNode* node) { | 306 const TitledUrlNode* node) { |
308 Index::iterator i = index_.find(term); | 307 Index::iterator i = index_.find(term); |
309 if (i == index_.end()) { | 308 if (i == index_.end()) { |
310 // We can get here if the node has the same term more than once. For | 309 // We can get here if the node has the same term more than once. For |
311 // example, a bookmark with the title 'foo foo' would end up here. | 310 // example, a node with the title 'foo foo' would end up here. |
312 return; | 311 return; |
313 } | 312 } |
314 i->second.erase(node); | 313 i->second.erase(node); |
315 if (i->second.empty()) | 314 if (i->second.empty()) |
316 index_.erase(i); | 315 index_.erase(i); |
317 } | 316 } |
318 | 317 |
319 } // namespace bookmarks | 318 } // namespace bookmarks |
OLD | NEW |