OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "chrome/browser/history/scored_history_match.h" | 5 #include "chrome/browser/autocomplete/scored_history_match_builder.h" |
| 6 |
| 7 #include <math.h> |
6 | 8 |
7 #include <algorithm> | 9 #include <algorithm> |
8 #include <functional> | |
9 #include <iterator> | |
10 #include <numeric> | |
11 #include <set> | |
12 | |
13 #include <math.h> | |
14 | 10 |
15 #include "base/logging.h" | 11 #include "base/logging.h" |
16 #include "base/metrics/histogram.h" | 12 #include "base/metrics/histogram.h" |
17 #include "base/strings/string_util.h" | 13 #include "base/strings/string_util.h" |
18 #include "base/strings/utf_string_conversions.h" | 14 #include "base/strings/utf_string_conversions.h" |
19 #include "chrome/browser/autocomplete/history_url_provider.h" | 15 #include "chrome/browser/autocomplete/history_url_provider.h" |
| 16 #include "components/bookmarks/browser/bookmark_model.h" |
20 #include "components/bookmarks/browser/bookmark_utils.h" | 17 #include "components/bookmarks/browser/bookmark_utils.h" |
21 #include "components/history/core/browser/history_client.h" | 18 #include "components/history/core/browser/history_client.h" |
22 #include "components/omnibox/omnibox_field_trial.h" | 19 #include "components/omnibox/omnibox_field_trial.h" |
23 #include "components/omnibox/url_prefix.h" | 20 #include "components/omnibox/url_prefix.h" |
24 #include "content/public/browser/browser_thread.h" | 21 #include "content/public/browser/browser_thread.h" |
25 | 22 |
26 namespace history { | 23 namespace { |
27 | 24 |
28 // ScoredHistoryMatch ---------------------------------------------------------- | 25 // The number of days of recency scores to precompute. |
| 26 const int kDaysToPrecomputeRecencyScoresFor = 366; |
| 27 |
| 28 // The number of raw term score buckets use; raw term scores |
| 29 // greater this are capped at the score of the largest bucket. |
| 30 const int kMaxRawTermScore = 30; |
| 31 |
| 32 // If true, assign raw scores to be max(whatever it normally would be, |
| 33 // a score that's similar to the score HistoryURL provider would assign). |
| 34 // This variable is set in the constructor by examining the field trial |
| 35 // state. |
| 36 const bool kAlsoDoHupLikeScoring = false; |
| 37 |
| 38 // Pre-computed information to speed up calculating recency scores. |
| 39 // |days_ago_to_recency_score_| is a simple array mapping how long |
| 40 // ago a page was visited (in days) to the recency score we should |
| 41 // assign it. This allows easy lookups of scores without requiring |
| 42 // math. This is initialized upon first use of GetRecencyScore(), |
| 43 // which calls FillInDaysAgoToRecencyScoreArray(), |
| 44 const float* days_ago_to_recency_score = nullptr; |
| 45 |
| 46 // Pre-computed information to speed up calculating topicality |
| 47 // scores. |raw_term_score_to_topicality_score_| is a simple array |
| 48 // mapping how raw terms scores (a weighted sum of the number of |
| 49 // hits for the term, weighted by how important the hit is: |
| 50 // hostname, path, etc.) to the topicality score we should assign |
| 51 // it. This allows easy lookups of scores without requiring math. |
| 52 // This is initialized upon first use of GetTopicalityScore(), |
| 53 // which calls FillInTermScoreToTopicalityScoreArray(). |
| 54 const float* raw_term_score_to_topicality_score = nullptr; |
| 55 |
| 56 // The maximum score that can be assigned to non-inlineable matches. |
| 57 // This is useful because often we want inlineable matches to come |
| 58 // first (even if they don't sometimes score as well as non-inlineable |
| 59 // matches) because if a non-inlineable match comes first than all matches |
| 60 // will get demoted later in HistoryQuickProvider to non-inlineable scores. |
| 61 // Set to -1 to indicate no maximum score. |
| 62 int max_assigned_score_for_non_inlineable_matches = -1; |
| 63 |
| 64 // Precalculates raw_term_score_to_topicality_score, used in |
| 65 // GetTopicalityScore(). |
| 66 void InitRawTermScoreToTopicalityScoreArray() { |
| 67 DCHECK(!raw_term_score_to_topicality_score); |
| 68 float* new_raw_term_score_to_topicality_score = new float[kMaxRawTermScore]; |
| 69 for (int term_score = 0; term_score < kMaxRawTermScore; ++term_score) { |
| 70 float topicality_score; |
| 71 if (term_score < 10) { |
| 72 // If the term scores less than 10 points (no full-credit hit, or |
| 73 // no combination of hits that score that well), then the topicality |
| 74 // score is linear in the term score. |
| 75 topicality_score = 0.1 * term_score; |
| 76 } else { |
| 77 // For term scores of at least ten points, pass them through a log |
| 78 // function so a score of 10 points gets a 1.0 (to meet up exactly |
| 79 // with the linear component) and increases logarithmically until |
| 80 // maxing out at 30 points, with computes to a score around 2.1. |
| 81 topicality_score = (1.0 + 2.25 * log10(0.1 * term_score)); |
| 82 } |
| 83 new_raw_term_score_to_topicality_score[term_score] = topicality_score; |
| 84 } |
| 85 raw_term_score_to_topicality_score = new_raw_term_score_to_topicality_score; |
| 86 } |
| 87 |
| 88 // Pre-calculates days_ago_to_recency_score, used in GetRecencyScore(). |
| 89 void InitDaysAgoToRecencyScoreArray() { |
| 90 DCHECK(!days_ago_to_recency_score); |
| 91 float* new_days_ago_to_recency_score = |
| 92 new float[kDaysToPrecomputeRecencyScoresFor]; |
| 93 for (int days_ago = 0; days_ago < kDaysToPrecomputeRecencyScoresFor; |
| 94 days_ago++) { |
| 95 int unnormalized_recency_score; |
| 96 if (days_ago <= 4) { |
| 97 unnormalized_recency_score = 100; |
| 98 } else if (days_ago <= 14) { |
| 99 // Linearly extrapolate between 4 and 14 days so 14 days has a score |
| 100 // of 70. |
| 101 unnormalized_recency_score = 70 + (14 - days_ago) * (100 - 70) / (14 - 4); |
| 102 } else if (days_ago <= 31) { |
| 103 // Linearly extrapolate between 14 and 31 days so 31 days has a score |
| 104 // of 50. |
| 105 unnormalized_recency_score = 50 + (31 - days_ago) * (70 - 50) / (31 - 14); |
| 106 } else if (days_ago <= 90) { |
| 107 // Linearly extrapolate between 30 and 90 days so 90 days has a score |
| 108 // of 30. |
| 109 unnormalized_recency_score = 30 + (90 - days_ago) * (50 - 30) / (90 - 30); |
| 110 } else { |
| 111 // Linearly extrapolate between 90 and 365 days so 365 days has a score |
| 112 // of 10. |
| 113 unnormalized_recency_score = |
| 114 10 + (365 - days_ago) * (20 - 10) / (365 - 90); |
| 115 } |
| 116 new_days_ago_to_recency_score[days_ago] = |
| 117 unnormalized_recency_score / 100.0; |
| 118 if (days_ago > 0) { |
| 119 DCHECK_LE(new_days_ago_to_recency_score[days_ago], |
| 120 new_days_ago_to_recency_score[days_ago - 1]); |
| 121 } |
| 122 } |
| 123 days_ago_to_recency_score = new_days_ago_to_recency_score; |
| 124 } |
| 125 |
| 126 } // namespace |
29 | 127 |
30 // static | 128 // static |
31 const size_t ScoredHistoryMatch::kMaxVisitsToScore = 10; | 129 int ScoredHistoryMatchBuilder::bookmark_value_ = 1; |
32 const int ScoredHistoryMatch::kDaysToPrecomputeRecencyScoresFor = 366; | 130 bool ScoredHistoryMatchBuilder::allow_tld_matches_ = false; |
33 const int ScoredHistoryMatch::kMaxRawTermScore = 30; | 131 bool ScoredHistoryMatchBuilder::allow_scheme_matches_ = false; |
34 float* ScoredHistoryMatch::raw_term_score_to_topicality_score_ = NULL; | 132 |
35 float* ScoredHistoryMatch::days_ago_to_recency_score_ = NULL; | 133 ScoredHistoryMatchBuilder::ScoredHistoryMatchBuilder( |
36 bool ScoredHistoryMatch::initialized_ = false; | 134 const IsBookmarkedCallback& is_bookmarked) |
37 int ScoredHistoryMatch::bookmark_value_ = 1; | 135 : is_bookmarked_(is_bookmarked) { |
38 bool ScoredHistoryMatch::allow_tld_matches_ = false; | |
39 bool ScoredHistoryMatch::allow_scheme_matches_ = false; | |
40 bool ScoredHistoryMatch::also_do_hup_like_scoring_ = false; | |
41 int ScoredHistoryMatch::max_assigned_score_for_non_inlineable_matches_ = -1; | |
42 | |
43 ScoredHistoryMatch::ScoredHistoryMatch() | |
44 : raw_score_(0), | |
45 can_inline_(false) { | |
46 Init(); | 136 Init(); |
47 } | 137 } |
48 | 138 |
49 ScoredHistoryMatch::ScoredHistoryMatch( | 139 ScoredHistoryMatchBuilder::~ScoredHistoryMatchBuilder() { |
50 const URLRow& row, | |
51 const VisitInfoVector& visits, | |
52 const std::string& languages, | |
53 const base::string16& lower_string, | |
54 const String16Vector& terms, | |
55 const WordStarts& terms_to_word_starts_offsets, | |
56 const RowWordStarts& word_starts, | |
57 const base::Time now, | |
58 HistoryClient* history_client) | |
59 : HistoryMatch(row, 0, false, false), | |
60 raw_score_(0), | |
61 can_inline_(false) { | |
62 Init(); | |
63 | |
64 GURL gurl = row.url(); | |
65 if (!gurl.is_valid()) | |
66 return; | |
67 | |
68 // Figure out where each search term appears in the URL and/or page title | |
69 // so that we can score as well as provide autocomplete highlighting. | |
70 base::OffsetAdjuster::Adjustments adjustments; | |
71 base::string16 url = | |
72 bookmarks::CleanUpUrlForMatching(gurl, languages, &adjustments); | |
73 base::string16 title = bookmarks::CleanUpTitleForMatching(row.title()); | |
74 int term_num = 0; | |
75 for (String16Vector::const_iterator iter = terms.begin(); iter != terms.end(); | |
76 ++iter, ++term_num) { | |
77 base::string16 term = *iter; | |
78 TermMatches url_term_matches = MatchTermInString(term, url, term_num); | |
79 TermMatches title_term_matches = MatchTermInString(term, title, term_num); | |
80 if (url_term_matches.empty() && title_term_matches.empty()) | |
81 return; // A term was not found in either URL or title - reject. | |
82 url_matches_.insert(url_matches_.end(), url_term_matches.begin(), | |
83 url_term_matches.end()); | |
84 title_matches_.insert(title_matches_.end(), title_term_matches.begin(), | |
85 title_term_matches.end()); | |
86 } | |
87 | |
88 // Sort matches by offset and eliminate any which overlap. | |
89 // TODO(mpearson): Investigate whether this has any meaningful | |
90 // effect on scoring. (It's necessary at some point: removing | |
91 // overlaps and sorting is needed to decide what to highlight in the | |
92 // suggestion string. But this sort and de-overlap doesn't have to | |
93 // be done before scoring.) | |
94 url_matches_ = SortAndDeoverlapMatches(url_matches_); | |
95 title_matches_ = SortAndDeoverlapMatches(title_matches_); | |
96 | |
97 // We can inline autocomplete a match if: | |
98 // 1) there is only one search term | |
99 // 2) AND the match begins immediately after one of the prefixes in | |
100 // URLPrefix such as http://www and https:// (note that one of these | |
101 // is the empty prefix, for cases where the user has typed the scheme) | |
102 // 3) AND the search string does not end in whitespace (making it look to | |
103 // the IMUI as though there is a single search term when actually there | |
104 // is a second, empty term). | |
105 // |best_inlineable_prefix| stores the inlineable prefix computed in | |
106 // clause (2) or NULL if no such prefix exists. (The URL is not inlineable.) | |
107 // Note that using the best prefix here means that when multiple | |
108 // prefixes match, we'll choose to inline following the longest one. | |
109 // For a URL like "http://www.washingtonmutual.com", this means | |
110 // typing "w" will inline "ashington..." instead of "ww.washington...". | |
111 const URLPrefix* best_inlineable_prefix = | |
112 (!url_matches_.empty() && (terms.size() == 1)) ? | |
113 URLPrefix::BestURLPrefix(base::UTF8ToUTF16(gurl.spec()), terms[0]) : | |
114 NULL; | |
115 can_inline_ = (best_inlineable_prefix != NULL) && | |
116 !IsWhitespace(*(lower_string.rbegin())); | |
117 if (can_inline_) { | |
118 // Initialize innermost_match. | |
119 // The idea here is that matches that occur in the scheme or | |
120 // "www." are worse than matches which don't. For the URLs | |
121 // "http://www.google.com" and "http://wellsfargo.com", we want | |
122 // the omnibox input "w" to cause the latter URL to rank higher | |
123 // than the former. Note that this is not the same as checking | |
124 // whether one match's inlinable prefix has more components than | |
125 // the other match's, since in this example, both matches would | |
126 // have an inlinable prefix of "http://", which is one component. | |
127 // | |
128 // Instead, we look for the overall best (i.e., most components) | |
129 // prefix of the current URL, and then check whether the inlinable | |
130 // prefix has that many components. If it does, this is an | |
131 // "innermost" match, and should be boosted. In the example | |
132 // above, the best prefixes for the two URLs have two and one | |
133 // components respectively, while the inlinable prefixes each | |
134 // have one component; this means the first match is not innermost | |
135 // and the second match is innermost, resulting in us boosting the | |
136 // second match. | |
137 // | |
138 // Now, the code that implements this. | |
139 // The deepest prefix for this URL regardless of where the match is. | |
140 const URLPrefix* best_prefix = URLPrefix::BestURLPrefix( | |
141 base::UTF8ToUTF16(gurl.spec()), base::string16()); | |
142 DCHECK(best_prefix != NULL); | |
143 const int num_components_in_best_prefix = best_prefix->num_components; | |
144 // If the URL is inlineable, we must have a match. Note the prefix that | |
145 // makes it inlineable may be empty. | |
146 DCHECK(best_inlineable_prefix != NULL); | |
147 const int num_components_in_best_inlineable_prefix = | |
148 best_inlineable_prefix->num_components; | |
149 innermost_match = (num_components_in_best_inlineable_prefix == | |
150 num_components_in_best_prefix); | |
151 } | |
152 | |
153 const float topicality_score = GetTopicalityScore( | |
154 terms.size(), url, terms_to_word_starts_offsets, word_starts); | |
155 const float frequency_score = GetFrequency( | |
156 now, (history_client && history_client->IsBookmarked(gurl)), visits); | |
157 raw_score_ = GetFinalRelevancyScore(topicality_score, frequency_score); | |
158 raw_score_ = | |
159 (raw_score_ <= kint32max) ? static_cast<int>(raw_score_) : kint32max; | |
160 | |
161 if (also_do_hup_like_scoring_ && can_inline_) { | |
162 // HistoryURL-provider-like scoring gives any match that is | |
163 // capable of being inlined a certain minimum score. Some of these | |
164 // are given a higher score that lets them be shown in inline. | |
165 // This test here derives from the test in | |
166 // HistoryURLProvider::PromoteMatchForInlineAutocomplete(). | |
167 const bool promote_to_inline = (row.typed_count() > 1) || | |
168 (IsHostOnly() && (row.typed_count() == 1)); | |
169 int hup_like_score = promote_to_inline ? | |
170 HistoryURLProvider::kScoreForBestInlineableResult : | |
171 HistoryURLProvider::kBaseScoreForNonInlineableResult; | |
172 | |
173 // Also, if the user types the hostname of a host with a typed | |
174 // visit, then everything from that host get given inlineable scores | |
175 // (because the URL-that-you-typed will go first and everything | |
176 // else will be assigned one minus the previous score, as coded | |
177 // at the end of HistoryURLProvider::DoAutocomplete(). | |
178 if (base::UTF8ToUTF16(gurl.host()) == terms[0]) | |
179 hup_like_score = HistoryURLProvider::kScoreForBestInlineableResult; | |
180 | |
181 // HistoryURLProvider has the function PromoteOrCreateShorterSuggestion() | |
182 // that's meant to promote prefixes of the best match (if they've | |
183 // been visited enough related to the best match) or | |
184 // create/promote host-only suggestions (even if they've never | |
185 // been typed). The code is complicated and we don't try to | |
186 // duplicate the logic here. Instead, we handle a simple case: in | |
187 // low-typed-count ranges, give host-only matches (i.e., | |
188 // http://www.foo.com/ vs. http://www.foo.com/bar.html) a boost so | |
189 // that the host-only match outscores all the other matches that | |
190 // would normally have the same base score. This behavior is not | |
191 // identical to what happens in HistoryURLProvider even in these | |
192 // low typed count ranges--sometimes it will create/promote when | |
193 // this test does not (indeed, we cannot create matches like HUP | |
194 // can) and vice versa--but the underlying philosophy is similar. | |
195 if (!promote_to_inline && IsHostOnly()) | |
196 hup_like_score++; | |
197 | |
198 // All the other logic to goes into hup-like-scoring happens in | |
199 // the tie-breaker case of MatchScoreGreater(). | |
200 | |
201 // Incorporate hup_like_score into raw_score. | |
202 raw_score_ = std::max(raw_score_, hup_like_score); | |
203 } | |
204 | |
205 // If this match is not inlineable and there's a cap on the maximum | |
206 // score that can be given to non-inlineable matches, apply the cap. | |
207 if (!can_inline_ && (max_assigned_score_for_non_inlineable_matches_ != -1)) { | |
208 raw_score_ = std::min(max_assigned_score_for_non_inlineable_matches_, | |
209 raw_score_); | |
210 } | |
211 | |
212 // Now that we're done processing this entry, correct the offsets of the | |
213 // matches in |url_matches_| so they point to offsets in the original URL | |
214 // spec, not the cleaned-up URL string that we used for matching. | |
215 std::vector<size_t> offsets = OffsetsFromTermMatches(url_matches_); | |
216 base::OffsetAdjuster::UnadjustOffsets(adjustments, &offsets); | |
217 url_matches_ = ReplaceOffsetsInTermMatches(url_matches_, offsets); | |
218 } | |
219 | |
220 ScoredHistoryMatch::~ScoredHistoryMatch() {} | |
221 | |
222 // Comparison function for sorting ScoredMatches by their scores with | |
223 // intelligent tie-breaking. | |
224 bool ScoredHistoryMatch::MatchScoreGreater(const ScoredHistoryMatch& m1, | |
225 const ScoredHistoryMatch& m2) { | |
226 if (m1.raw_score_ != m2.raw_score_) | |
227 return m1.raw_score_ > m2.raw_score_; | |
228 | |
229 // This tie-breaking logic is inspired by / largely copied from the | |
230 // ordering logic in history_url_provider.cc CompareHistoryMatch(). | |
231 | |
232 // A URL that has been typed at all is better than one that has never been | |
233 // typed. (Note "!"s on each side.) | |
234 if (!m1.url_info.typed_count() != !m2.url_info.typed_count()) | |
235 return m1.url_info.typed_count() > m2.url_info.typed_count(); | |
236 | |
237 // Innermost matches (matches after any scheme or "www.") are better than | |
238 // non-innermost matches. | |
239 if (m1.innermost_match != m2.innermost_match) | |
240 return m1.innermost_match; | |
241 | |
242 // URLs that have been typed more often are better. | |
243 if (m1.url_info.typed_count() != m2.url_info.typed_count()) | |
244 return m1.url_info.typed_count() > m2.url_info.typed_count(); | |
245 | |
246 // For URLs that have each been typed once, a host (alone) is better | |
247 // than a page inside. | |
248 if (m1.url_info.typed_count() == 1) { | |
249 if (m1.IsHostOnly() != m2.IsHostOnly()) | |
250 return m1.IsHostOnly(); | |
251 } | |
252 | |
253 // URLs that have been visited more often are better. | |
254 if (m1.url_info.visit_count() != m2.url_info.visit_count()) | |
255 return m1.url_info.visit_count() > m2.url_info.visit_count(); | |
256 | |
257 // URLs that have been visited more recently are better. | |
258 return m1.url_info.last_visit() > m2.url_info.last_visit(); | |
259 } | 140 } |
260 | 141 |
261 // static | 142 // static |
262 TermMatches ScoredHistoryMatch::FilterTermMatchesByWordStarts( | 143 history::TermMatches ScoredHistoryMatchBuilder::FilterTermMatchesByWordStarts( |
263 const TermMatches& term_matches, | 144 const history::TermMatches& term_matches, |
264 const WordStarts& terms_to_word_starts_offsets, | 145 const history::WordStarts& terms_to_word_starts_offsets, |
265 const WordStarts& word_starts, | 146 const history::WordStarts& word_starts, |
266 size_t start_pos, | 147 size_t start_pos, |
267 size_t end_pos) { | 148 size_t end_pos) { |
268 // Return early if no filtering is needed. | 149 // Return early if no filtering is needed. |
269 if (start_pos == std::string::npos) | 150 if (start_pos == std::string::npos) |
270 return term_matches; | 151 return term_matches; |
271 TermMatches filtered_matches; | 152 history::TermMatches filtered_matches; |
272 WordStarts::const_iterator next_word_starts = word_starts.begin(); | 153 history::WordStarts::const_iterator next_word_starts = word_starts.begin(); |
273 WordStarts::const_iterator end_word_starts = word_starts.end(); | 154 history::WordStarts::const_iterator end_word_starts = word_starts.end(); |
274 for (TermMatches::const_iterator iter = term_matches.begin(); | 155 for (const auto& term_match : term_matches) { |
275 iter != term_matches.end(); ++iter) { | 156 const size_t term_offset = |
276 const size_t term_offset = terms_to_word_starts_offsets[iter->term_num]; | 157 terms_to_word_starts_offsets[term_match.term_num]; |
277 // Advance next_word_starts until it's >= the position of the term we're | 158 // Advance next_word_starts until it's >= the position of the term we're |
278 // considering (adjusted for where the word begins within the term). | 159 // considering (adjusted for where the word begins within the term). |
279 while ((next_word_starts != end_word_starts) && | 160 while ((next_word_starts != end_word_starts) && |
280 (*next_word_starts < (iter->offset + term_offset))) | 161 (*next_word_starts < (term_match.offset + term_offset))) |
281 ++next_word_starts; | 162 ++next_word_starts; |
282 // Add the match if it's before the position we start filtering at or | 163 // Add the match if it's before the position we start filtering at or |
283 // after the position we stop filtering at (assuming we have a position | 164 // after the position we stop filtering at (assuming we have a position |
284 // to stop filtering at) or if it's at a word boundary. | 165 // to stop filtering at) or if it's at a word boundary. |
285 if ((iter->offset < start_pos) || | 166 if ((term_match.offset < start_pos) || |
286 ((end_pos != std::string::npos) && (iter->offset >= end_pos)) || | 167 ((end_pos != std::string::npos) && (term_match.offset >= end_pos)) || |
287 ((next_word_starts != end_word_starts) && | 168 ((next_word_starts != end_word_starts) && |
288 (*next_word_starts == iter->offset + term_offset))) | 169 (*next_word_starts == term_match.offset + term_offset))) |
289 filtered_matches.push_back(*iter); | 170 filtered_matches.push_back(term_match); |
290 } | 171 } |
291 return filtered_matches; | 172 return filtered_matches; |
292 } | 173 } |
293 | 174 |
294 float ScoredHistoryMatch::GetTopicalityScore( | 175 void ScoredHistoryMatchBuilder::Init() { |
| 176 // Because the code below is not thread safe, we check that we're only calling |
| 177 // it from one thread: the UI thread. Specifically, we check "if we've heard |
| 178 // of the UI thread then we'd better be on it." The first part is necessary |
| 179 // so unit tests pass. (Many unit tests don't set up the threading naming |
| 180 // system; hence CurrentlyOn(UI thread) will fail.) |
| 181 using content::BrowserThread; |
| 182 DCHECK(!BrowserThread::IsThreadInitialized(BrowserThread::UI) || |
| 183 BrowserThread::CurrentlyOn(BrowserThread::UI)); |
| 184 |
| 185 static bool initialized = false; |
| 186 if (initialized) |
| 187 return; |
| 188 |
| 189 initialized = true; |
| 190 |
| 191 // When doing HUP-like scoring, don't allow a non-inlineable match |
| 192 // to beat the score of good inlineable matches. This is a problem |
| 193 // because if a non-inlineable match ends up with the highest score |
| 194 // from HistoryQuick provider, all HistoryQuick matches get demoted |
| 195 // to non-inlineable scores (scores less than 1200). Without |
| 196 // HUP-like-scoring, these results would actually come from the HUP |
| 197 // and not be demoted, thus outscoring the demoted HQP results. |
| 198 // When the HQP provides these, we need to clamp the non-inlineable |
| 199 // results to preserve this behavior. |
| 200 if (kAlsoDoHupLikeScoring) { |
| 201 max_assigned_score_for_non_inlineable_matches = |
| 202 HistoryURLProvider::kScoreForBestInlineableResult - 1; |
| 203 } |
| 204 bookmark_value_ = OmniboxFieldTrial::HQPBookmarkValue(); |
| 205 allow_tld_matches_ = OmniboxFieldTrial::HQPAllowMatchInTLDValue(); |
| 206 allow_scheme_matches_ = OmniboxFieldTrial::HQPAllowMatchInSchemeValue(); |
| 207 |
| 208 InitRawTermScoreToTopicalityScoreArray(); |
| 209 InitDaysAgoToRecencyScoreArray(); |
| 210 } |
| 211 |
| 212 // static |
| 213 float ScoredHistoryMatchBuilder::GetTopicalityScore( |
295 const int num_terms, | 214 const int num_terms, |
296 const base::string16& url, | 215 const base::string16& url, |
297 const WordStarts& terms_to_word_starts_offsets, | 216 const history::WordStarts& terms_to_word_starts_offsets, |
298 const RowWordStarts& word_starts) { | 217 const history::RowWordStarts& word_starts, |
299 // Because the below thread is not thread safe, we check that we're | 218 history::ScoredHistoryMatch* scored_history_match) { |
300 // only calling it from one thread: the UI thread. Specifically, | 219 DCHECK(raw_term_score_to_topicality_score); |
301 // we check "if we've heard of the UI thread then we'd better | |
302 // be on it." The first part is necessary so unit tests pass. (Many | |
303 // unit tests don't set up the threading naming system; hence | |
304 // CurrentlyOn(UI thread) will fail.) | |
305 DCHECK(!content::BrowserThread::IsThreadInitialized( | |
306 content::BrowserThread::UI) || | |
307 content::BrowserThread::CurrentlyOn(content::BrowserThread::UI)); | |
308 if (raw_term_score_to_topicality_score_ == NULL) { | |
309 raw_term_score_to_topicality_score_ = new float[kMaxRawTermScore]; | |
310 FillInTermScoreToTopicalityScoreArray(); | |
311 } | |
312 // A vector that accumulates per-term scores. The strongest match--a | 220 // A vector that accumulates per-term scores. The strongest match--a |
313 // match in the hostname at a word boundary--is worth 10 points. | 221 // match in the hostname at a word boundary--is worth 10 points. |
314 // Everything else is less. In general, a match that's not at a word | 222 // Everything else is less. In general, a match that's not at a word |
315 // boundary is worth about 1/4th or 1/5th of a match at the word boundary | 223 // boundary is worth about 1/4th or 1/5th of a match at the word boundary |
316 // in the same part of the URL/title. | 224 // in the same part of the URL/title. |
317 DCHECK_GT(num_terms, 0); | 225 DCHECK_GT(num_terms, 0); |
318 std::vector<int> term_scores(num_terms, 0); | 226 std::vector<int> term_scores(num_terms, 0); |
319 WordStarts::const_iterator next_word_starts = | 227 history::WordStarts::const_iterator next_word_starts = |
320 word_starts.url_word_starts_.begin(); | 228 word_starts.url_word_starts_.begin(); |
321 WordStarts::const_iterator end_word_starts = | 229 history::WordStarts::const_iterator end_word_starts = |
322 word_starts.url_word_starts_.end(); | 230 word_starts.url_word_starts_.end(); |
323 const size_t question_mark_pos = url.find('?'); | 231 const size_t question_mark_pos = url.find('?'); |
324 const size_t colon_pos = url.find(':'); | 232 const size_t colon_pos = url.find(':'); |
325 // The + 3 skips the // that probably appears in the protocol | 233 // The + 3 skips the // that probably appears in the protocol |
326 // after the colon. If the protocol doesn't have two slashes after | 234 // after the colon. If the protocol doesn't have two slashes after |
327 // the colon, that's okay--all this ends up doing is starting our | 235 // the colon, that's okay--all this ends up doing is starting our |
328 // search for the next / a few characters into the hostname. The | 236 // search for the next / a few characters into the hostname. The |
329 // only times this can cause problems is if we have a protocol without | 237 // only times this can cause problems is if we have a protocol without |
330 // a // after the colon and the hostname is only one or two characters. | 238 // a // after the colon and the hostname is only one or two characters. |
331 // This isn't worth worrying about. | 239 // This isn't worth worrying about. |
332 const size_t end_of_hostname_pos = (colon_pos != std::string::npos) ? | 240 const size_t end_of_hostname_pos = (colon_pos != std::string::npos) |
333 url.find('/', colon_pos + 3) : url.find('/'); | 241 ? url.find('/', colon_pos + 3) |
334 size_t last_part_of_hostname_pos = | 242 : url.find('/'); |
335 (end_of_hostname_pos != std::string::npos) ? | 243 size_t last_part_of_hostname_pos = (end_of_hostname_pos != std::string::npos) |
336 url.rfind('.', end_of_hostname_pos) : url.rfind('.'); | 244 ? url.rfind('.', end_of_hostname_pos) |
| 245 : url.rfind('.'); |
337 // Loop through all URL matches and score them appropriately. | 246 // Loop through all URL matches and score them appropriately. |
338 // First, filter all matches not at a word boundary and in the path (or | 247 // First, filter all matches not at a word boundary and in the path (or |
339 // later). | 248 // later). |
340 url_matches_ = FilterTermMatchesByWordStarts( | 249 scored_history_match->url_matches = FilterTermMatchesByWordStarts( |
341 url_matches_, terms_to_word_starts_offsets, word_starts.url_word_starts_, | 250 scored_history_match->url_matches, terms_to_word_starts_offsets, |
342 end_of_hostname_pos, | 251 word_starts.url_word_starts_, end_of_hostname_pos, std::string::npos); |
343 std::string::npos); | |
344 if (colon_pos != std::string::npos) { | 252 if (colon_pos != std::string::npos) { |
345 // Also filter matches not at a word boundary and in the scheme. | 253 // Also filter matches not at a word boundary and in the scheme. |
346 url_matches_ = FilterTermMatchesByWordStarts( | 254 scored_history_match->url_matches = FilterTermMatchesByWordStarts( |
347 url_matches_, terms_to_word_starts_offsets, | 255 scored_history_match->url_matches, terms_to_word_starts_offsets, |
348 word_starts.url_word_starts_, 0, colon_pos); | 256 word_starts.url_word_starts_, 0, colon_pos); |
349 } | 257 } |
350 for (TermMatches::const_iterator iter = url_matches_.begin(); | 258 for (const auto& url_match : scored_history_match->url_matches) { |
351 iter != url_matches_.end(); ++iter) { | 259 const size_t term_offset = terms_to_word_starts_offsets[url_match.term_num]; |
352 const size_t term_offset = terms_to_word_starts_offsets[iter->term_num]; | |
353 // Advance next_word_starts until it's >= the position of the term we're | 260 // Advance next_word_starts until it's >= the position of the term we're |
354 // considering (adjusted for where the word begins within the term). | 261 // considering (adjusted for where the word begins within the term). |
355 while ((next_word_starts != end_word_starts) && | 262 while ((next_word_starts != end_word_starts) && |
356 (*next_word_starts < (iter->offset + term_offset))) { | 263 (*next_word_starts < (url_match.offset + term_offset))) { |
357 ++next_word_starts; | 264 ++next_word_starts; |
358 } | 265 } |
359 const bool at_word_boundary = (next_word_starts != end_word_starts) && | 266 const bool at_word_boundary = |
360 (*next_word_starts == iter->offset + term_offset); | 267 (next_word_starts != end_word_starts) && |
| 268 (*next_word_starts == url_match.offset + term_offset); |
361 if ((question_mark_pos != std::string::npos) && | 269 if ((question_mark_pos != std::string::npos) && |
362 (iter->offset > question_mark_pos)) { | 270 (url_match.offset > question_mark_pos)) { |
363 // The match is in a CGI ?... fragment. | 271 // The match is in a CGI ?... fragment. |
364 DCHECK(at_word_boundary); | 272 DCHECK(at_word_boundary); |
365 term_scores[iter->term_num] += 5; | 273 term_scores[url_match.term_num] += 5; |
366 } else if ((end_of_hostname_pos != std::string::npos) && | 274 } else if ((end_of_hostname_pos != std::string::npos) && |
367 (iter->offset > end_of_hostname_pos)) { | 275 (url_match.offset > end_of_hostname_pos)) { |
368 // The match is in the path. | 276 // The match is in the path. |
369 DCHECK(at_word_boundary); | 277 DCHECK(at_word_boundary); |
370 term_scores[iter->term_num] += 8; | 278 term_scores[url_match.term_num] += 8; |
371 } else if ((colon_pos == std::string::npos) || | 279 } else if ((colon_pos == std::string::npos) || |
372 (iter->offset > colon_pos)) { | 280 (url_match.offset > colon_pos)) { |
373 // The match is in the hostname. | 281 // The match is in the hostname. |
374 if ((last_part_of_hostname_pos == std::string::npos) || | 282 if ((last_part_of_hostname_pos == std::string::npos) || |
375 (iter->offset < last_part_of_hostname_pos)) { | 283 (url_match.offset < last_part_of_hostname_pos)) { |
376 // Either there are no dots in the hostname or this match isn't | 284 // Either there are no dots in the hostname or this match isn't |
377 // the last dotted component. | 285 // the last dotted component. |
378 term_scores[iter->term_num] += at_word_boundary ? 10 : 2; | 286 term_scores[url_match.term_num] += at_word_boundary ? 10 : 2; |
379 } else { | 287 } else { |
380 // The match is in the last part of a dotted hostname (usually this | 288 // The match is in the last part of a dotted hostname (usually this |
381 // is the top-level domain .com, .net, etc.). | 289 // is the top-level domain .com, .net, etc.). |
382 if (allow_tld_matches_) | 290 if (allow_tld_matches_) |
383 term_scores[iter->term_num] += at_word_boundary ? 10 : 0; | 291 term_scores[url_match.term_num] += at_word_boundary ? 10 : 0; |
384 } | 292 } |
385 } else { | 293 } else { |
386 // The match is in the protocol (a.k.a. scheme). | 294 // The match is in the protocol (a.k.a. scheme). |
387 // Matches not at a word boundary should have been filtered already. | 295 // Matches not at a word boundary should have been filtered already. |
388 DCHECK(at_word_boundary); | 296 DCHECK(at_word_boundary); |
389 match_in_scheme = true; | 297 scored_history_match->match_in_scheme = true; |
390 if (allow_scheme_matches_) | 298 if (allow_scheme_matches_) |
391 term_scores[iter->term_num] += 10; | 299 term_scores[url_match.term_num] += 10; |
392 } | 300 } |
393 } | 301 } |
394 // Now do the analogous loop over all matches in the title. | 302 // Now do the analogous loop over all matches in the title. |
395 next_word_starts = word_starts.title_word_starts_.begin(); | 303 next_word_starts = word_starts.title_word_starts_.begin(); |
396 end_word_starts = word_starts.title_word_starts_.end(); | 304 end_word_starts = word_starts.title_word_starts_.end(); |
397 int word_num = 0; | 305 int word_num = 0; |
398 title_matches_ = FilterTermMatchesByWordStarts( | 306 scored_history_match->title_matches = FilterTermMatchesByWordStarts( |
399 title_matches_, terms_to_word_starts_offsets, | 307 scored_history_match->title_matches, terms_to_word_starts_offsets, |
400 word_starts.title_word_starts_, 0, std::string::npos); | 308 word_starts.title_word_starts_, 0, std::string::npos); |
401 for (TermMatches::const_iterator iter = title_matches_.begin(); | 309 for (const auto& title_match : scored_history_match->title_matches) { |
402 iter != title_matches_.end(); ++iter) { | 310 const size_t term_offset = |
403 const size_t term_offset = terms_to_word_starts_offsets[iter->term_num]; | 311 terms_to_word_starts_offsets[title_match.term_num]; |
404 // Advance next_word_starts until it's >= the position of the term we're | 312 // Advance next_word_starts until it's >= the position of the term we're |
405 // considering (adjusted for where the word begins within the term). | 313 // considering (adjusted for where the word begins within the term). |
406 while ((next_word_starts != end_word_starts) && | 314 while ((next_word_starts != end_word_starts) && |
407 (*next_word_starts < (iter->offset + term_offset))) { | 315 (*next_word_starts < (title_match.offset + term_offset))) { |
408 ++next_word_starts; | 316 ++next_word_starts; |
409 ++word_num; | 317 ++word_num; |
410 } | 318 } |
411 if (word_num >= 10) break; // only count the first ten words | 319 if (word_num >= 10) |
| 320 break; // only count the first ten words |
412 DCHECK(next_word_starts != end_word_starts); | 321 DCHECK(next_word_starts != end_word_starts); |
413 DCHECK_EQ(*next_word_starts, iter->offset + term_offset) | 322 DCHECK_EQ(*next_word_starts, title_match.offset + term_offset) |
414 << "not at word boundary"; | 323 << "not at word boundary"; |
415 term_scores[iter->term_num] += 8; | 324 term_scores[title_match.term_num] += 8; |
416 } | 325 } |
417 // TODO(mpearson): Restore logic for penalizing out-of-order matches. | 326 // TODO(mpearson): Restore logic for penalizing out-of-order matches. |
418 // (Perhaps discount them by 0.8?) | 327 // (Perhaps discount them by 0.8?) |
419 // TODO(mpearson): Consider: if the earliest match occurs late in the string, | 328 // TODO(mpearson): Consider: if the earliest match occurs late in the string, |
420 // should we discount it? | 329 // should we discount it? |
421 // TODO(mpearson): Consider: do we want to score based on how much of the | 330 // TODO(mpearson): Consider: do we want to score based on how much of the |
422 // input string the input covers? (I'm leaning toward no.) | 331 // input string the input covers? (I'm leaning toward no.) |
423 | 332 |
424 // Compute the topicality_score as the sum of transformed term_scores. | 333 // Compute the topicality_score as the sum of transformed term_scores. |
425 float topicality_score = 0; | 334 float topicality_score = 0; |
426 for (size_t i = 0; i < term_scores.size(); ++i) { | 335 for (size_t i = 0; i < term_scores.size(); ++i) { |
427 // Drop this URL if it seems like a term didn't appear or, more precisely, | 336 // Drop this URL if it seems like a term didn't appear or, more precisely, |
428 // didn't appear in a part of the URL or title that we trust enough | 337 // didn't appear in a part of the URL or title that we trust enough |
429 // to give it credit for. For instance, terms that appear in the middle | 338 // to give it credit for. For instance, terms that appear in the middle |
430 // of a CGI parameter get no credit. Almost all the matches dropped | 339 // of a CGI parameter get no credit. Almost all the matches dropped |
431 // due to this test would look stupid if shown to the user. | 340 // due to this test would look stupid if shown to the user. |
432 if (term_scores[i] == 0) | 341 if (term_scores[i] == 0) |
433 return 0; | 342 return 0; |
434 topicality_score += raw_term_score_to_topicality_score_[ | 343 topicality_score += |
435 (term_scores[i] >= kMaxRawTermScore) ? (kMaxRawTermScore - 1) : | 344 raw_term_score_to_topicality_score[(term_scores[i] >= kMaxRawTermScore) |
436 term_scores[i]]; | 345 ? (kMaxRawTermScore - 1) |
| 346 : term_scores[i]]; |
437 } | 347 } |
438 // TODO(mpearson): If there are multiple terms, consider taking the | 348 // TODO(mpearson): If there are multiple terms, consider taking the |
439 // geometric mean of per-term scores rather than the arithmetic mean. | 349 // geometric mean of per-term scores rather than the arithmetic mean. |
440 | 350 |
441 return topicality_score / num_terms; | 351 return topicality_score / num_terms; |
442 } | 352 } |
443 | 353 |
444 // static | 354 // static |
445 void ScoredHistoryMatch::FillInTermScoreToTopicalityScoreArray() { | 355 float ScoredHistoryMatchBuilder::GetRecencyScore(int last_visit_days_ago) { |
446 for (int term_score = 0; term_score < kMaxRawTermScore; ++term_score) { | 356 DCHECK(days_ago_to_recency_score); |
447 float topicality_score; | |
448 if (term_score < 10) { | |
449 // If the term scores less than 10 points (no full-credit hit, or | |
450 // no combination of hits that score that well), then the topicality | |
451 // score is linear in the term score. | |
452 topicality_score = 0.1 * term_score; | |
453 } else { | |
454 // For term scores of at least ten points, pass them through a log | |
455 // function so a score of 10 points gets a 1.0 (to meet up exactly | |
456 // with the linear component) and increases logarithmically until | |
457 // maxing out at 30 points, with computes to a score around 2.1. | |
458 topicality_score = (1.0 + 2.25 * log10(0.1 * term_score)); | |
459 } | |
460 raw_term_score_to_topicality_score_[term_score] = topicality_score; | |
461 } | |
462 } | |
463 | |
464 // static | |
465 float ScoredHistoryMatch::GetRecencyScore(int last_visit_days_ago) { | |
466 // Because the below thread is not thread safe, we check that we're | |
467 // only calling it from one thread: the UI thread. Specifically, | |
468 // we check "if we've heard of the UI thread then we'd better | |
469 // be on it." The first part is necessary so unit tests pass. (Many | |
470 // unit tests don't set up the threading naming system; hence | |
471 // CurrentlyOn(UI thread) will fail.) | |
472 DCHECK(!content::BrowserThread::IsThreadInitialized( | |
473 content::BrowserThread::UI) || | |
474 content::BrowserThread::CurrentlyOn(content::BrowserThread::UI)); | |
475 if (days_ago_to_recency_score_ == NULL) { | |
476 days_ago_to_recency_score_ = new float[kDaysToPrecomputeRecencyScoresFor]; | |
477 FillInDaysAgoToRecencyScoreArray(); | |
478 } | |
479 // Lookup the score in days_ago_to_recency_score, treating | 357 // Lookup the score in days_ago_to_recency_score, treating |
480 // everything older than what we've precomputed as the oldest thing | 358 // everything older than what we've precomputed as the oldest thing |
481 // we've precomputed. The std::max is to protect against corruption | 359 // we've precomputed. The std::max is to protect against corruption |
482 // in the database (in case last_visit_days_ago is negative). | 360 // in the database (in case last_visit_days_ago is negative). |
483 return days_ago_to_recency_score_[ | 361 return days_ago_to_recency_score[std::max( |
484 std::max( | 362 std::min(last_visit_days_ago, kDaysToPrecomputeRecencyScoresFor - 1), 0)]; |
485 std::min(last_visit_days_ago, kDaysToPrecomputeRecencyScoresFor - 1), | |
486 0)]; | |
487 } | |
488 | |
489 void ScoredHistoryMatch::FillInDaysAgoToRecencyScoreArray() { | |
490 for (int days_ago = 0; days_ago < kDaysToPrecomputeRecencyScoresFor; | |
491 days_ago++) { | |
492 int unnormalized_recency_score; | |
493 if (days_ago <= 4) { | |
494 unnormalized_recency_score = 100; | |
495 } else if (days_ago <= 14) { | |
496 // Linearly extrapolate between 4 and 14 days so 14 days has a score | |
497 // of 70. | |
498 unnormalized_recency_score = 70 + (14 - days_ago) * (100 - 70) / (14 - 4); | |
499 } else if (days_ago <= 31) { | |
500 // Linearly extrapolate between 14 and 31 days so 31 days has a score | |
501 // of 50. | |
502 unnormalized_recency_score = 50 + (31 - days_ago) * (70 - 50) / (31 - 14); | |
503 } else if (days_ago <= 90) { | |
504 // Linearly extrapolate between 30 and 90 days so 90 days has a score | |
505 // of 30. | |
506 unnormalized_recency_score = 30 + (90 - days_ago) * (50 - 30) / (90 - 30); | |
507 } else { | |
508 // Linearly extrapolate between 90 and 365 days so 365 days has a score | |
509 // of 10. | |
510 unnormalized_recency_score = | |
511 10 + (365 - days_ago) * (20 - 10) / (365 - 90); | |
512 } | |
513 days_ago_to_recency_score_[days_ago] = unnormalized_recency_score / 100.0; | |
514 if (days_ago > 0) { | |
515 DCHECK_LE(days_ago_to_recency_score_[days_ago], | |
516 days_ago_to_recency_score_[days_ago - 1]); | |
517 } | |
518 } | |
519 } | 363 } |
520 | 364 |
521 // static | 365 // static |
522 float ScoredHistoryMatch::GetFrequency(const base::Time& now, | 366 float ScoredHistoryMatchBuilder::GetFrequency( |
523 const bool bookmarked, | 367 const base::Time& now, |
524 const VisitInfoVector& visits) { | 368 const bool bookmarked, |
| 369 const history::VisitInfoVector& visits) { |
525 // Compute the weighted average |value_of_transition| over the last at | 370 // Compute the weighted average |value_of_transition| over the last at |
526 // most kMaxVisitsToScore visits, where each visit is weighted using | 371 // most kMaxVisitsToScore visits, where each visit is weighted using |
527 // GetRecencyScore() based on how many days ago it happened. Use | 372 // GetRecencyScore() based on how many days ago it happened. Use |
528 // kMaxVisitsToScore as the denominator for the average regardless of | 373 // kMaxVisitsToScore as the denominator for the average regardless of |
529 // how many visits there were in order to penalize a match that has | 374 // how many visits there were in order to penalize a match that has |
530 // fewer visits than kMaxVisitsToScore. | 375 // fewer visits than kMaxVisitsToScore. |
531 float summed_visit_points = 0; | 376 float summed_visit_points = 0; |
532 for (size_t i = 0; i < std::min(visits.size(), kMaxVisitsToScore); ++i) { | 377 size_t max_visit_to_score = |
| 378 std::min(visits.size(), history::ScoredHistoryMatch::kMaxVisitsToScore); |
| 379 for (size_t i = 0; i < max_visit_to_score; ++i) { |
533 int value_of_transition = | 380 int value_of_transition = |
534 (visits[i].second == ui::PAGE_TRANSITION_TYPED) ? 20 : 1; | 381 (visits[i].second == ui::PAGE_TRANSITION_TYPED) ? 20 : 1; |
535 if (bookmarked) | 382 if (bookmarked) |
536 value_of_transition = std::max(value_of_transition, bookmark_value_); | 383 value_of_transition = std::max(value_of_transition, bookmark_value_); |
537 const float bucket_weight = | 384 const float bucket_weight = |
538 GetRecencyScore((now - visits[i].first).InDays()); | 385 GetRecencyScore((now - visits[i].first).InDays()); |
539 summed_visit_points += (value_of_transition * bucket_weight); | 386 summed_visit_points += (value_of_transition * bucket_weight); |
540 } | 387 } |
541 return visits.size() * summed_visit_points / kMaxVisitsToScore; | 388 return visits.size() * summed_visit_points / |
| 389 history::ScoredHistoryMatch::kMaxVisitsToScore; |
542 } | 390 } |
543 | 391 |
544 // static | 392 // static |
545 float ScoredHistoryMatch::GetFinalRelevancyScore(float topicality_score, | 393 float ScoredHistoryMatchBuilder::GetFinalRelevancyScore(float topicality_score, |
546 float frequency_score) { | 394 float frequency_score) { |
547 if (topicality_score == 0) | 395 if (topicality_score == 0) |
548 return 0; | 396 return 0; |
549 // Here's how to interpret intermediate_score: Suppose the omnibox | 397 // Here's how to interpret intermediate_score: Suppose the omnibox |
550 // has one input term. Suppose we have a URL for which the omnibox | 398 // has one input term. Suppose we have a URL for which the omnibox |
551 // input term has a single URL hostname hit at a word boundary. (This | 399 // input term has a single URL hostname hit at a word boundary. (This |
552 // implies topicality_score = 1.0.). Then the intermediate_score for | 400 // implies topicality_score = 1.0.). Then the intermediate_score for |
553 // this URL will depend entirely on the frequency_score with | 401 // this URL will depend entirely on the frequency_score with |
554 // this interpretation: | 402 // this interpretation: |
555 // - a single typed visit more than three months ago, no other visits -> 0.2 | 403 // - a single typed visit more than three months ago, no other visits -> 0.2 |
556 // - a visit every three days, no typed visits -> 0.706 | 404 // - a visit every three days, no typed visits -> 0.706 |
(...skipping 18 matching lines...) Expand all Loading... |
575 } | 423 } |
576 // Linearly extrapolate so a score of 20 (or more) has a score of 1399. | 424 // Linearly extrapolate so a score of 20 (or more) has a score of 1399. |
577 // (Scores above 20 are possible for URLs that have multiple term hits | 425 // (Scores above 20 are possible for URLs that have multiple term hits |
578 // in the URL and/or title and that are visited practically all | 426 // in the URL and/or title and that are visited practically all |
579 // the time using typed visits. We don't attempt to distinguish | 427 // the time using typed visits. We don't attempt to distinguish |
580 // between these very good results.) | 428 // between these very good results.) |
581 const float slope = (1399 - 1300) / (20.0f - 12.0f); | 429 const float slope = (1399 - 1300) / (20.0f - 12.0f); |
582 return std::min(1399.0, 1300 + slope * (intermediate_score - 12.0)); | 430 return std::min(1399.0, 1300 + slope * (intermediate_score - 12.0)); |
583 } | 431 } |
584 | 432 |
585 void ScoredHistoryMatch::Init() { | 433 history::ScoredHistoryMatch ScoredHistoryMatchBuilder::Build( |
586 if (initialized_) | 434 const history::URLRow& row, |
587 return; | 435 const history::VisitInfoVector& visits, |
588 also_do_hup_like_scoring_ = false; | 436 const std::string& languages, |
589 // When doing HUP-like scoring, don't allow a non-inlineable match | 437 const base::string16& lower_string, |
590 // to beat the score of good inlineable matches. This is a problem | 438 const history::String16Vector& terms, |
591 // because if a non-inlineable match ends up with the highest score | 439 const history::WordStarts& terms_to_word_starts_offsets, |
592 // from HistoryQuick provider, all HistoryQuick matches get demoted | 440 const history::RowWordStarts& word_starts, |
593 // to non-inlineable scores (scores less than 1200). Without | 441 const base::Time now) const { |
594 // HUP-like-scoring, these results would actually come from the HUP | 442 history::ScoredHistoryMatch scored_history_match = |
595 // and not be demoted, thus outscoring the demoted HQP results. | 443 history::ScoredHistoryMatch(row, 0, false, false, 0, |
596 // When the HQP provides these, we need to clamp the non-inlineable | 444 history::TermMatches(), |
597 // results to preserve this behavior. | 445 history::TermMatches(), false); |
598 if (also_do_hup_like_scoring_) { | 446 |
599 max_assigned_score_for_non_inlineable_matches_ = | 447 GURL gurl = row.url(); |
600 HistoryURLProvider::kScoreForBestInlineableResult - 1; | 448 if (!gurl.is_valid()) |
| 449 return scored_history_match; |
| 450 |
| 451 // Figure out where each search term appears in the URL and/or page title |
| 452 // so that we can score as well as provide autocomplete highlighting. |
| 453 base::OffsetAdjuster::Adjustments adjustments; |
| 454 base::string16 url = |
| 455 bookmarks::CleanUpUrlForMatching(gurl, languages, &adjustments); |
| 456 base::string16 title = bookmarks::CleanUpTitleForMatching(row.title()); |
| 457 int term_num = 0; |
| 458 for (const auto& term : terms) { |
| 459 history::TermMatches url_term_matches = |
| 460 history::MatchTermInString(term, url, term_num); |
| 461 history::TermMatches title_term_matches = |
| 462 history::MatchTermInString(term, title, term_num); |
| 463 if (url_term_matches.empty() && title_term_matches.empty()) { |
| 464 // A term was not found in either URL or title - reject. |
| 465 return scored_history_match; |
| 466 } |
| 467 scored_history_match.url_matches.insert( |
| 468 scored_history_match.url_matches.end(), url_term_matches.begin(), |
| 469 url_term_matches.end()); |
| 470 scored_history_match.title_matches.insert( |
| 471 scored_history_match.title_matches.end(), title_term_matches.begin(), |
| 472 title_term_matches.end()); |
| 473 ++term_num; |
601 } | 474 } |
602 bookmark_value_ = OmniboxFieldTrial::HQPBookmarkValue(); | 475 |
603 allow_tld_matches_ = OmniboxFieldTrial::HQPAllowMatchInTLDValue(); | 476 // Sort matches by offset and eliminate any which overlap. |
604 allow_scheme_matches_ = OmniboxFieldTrial::HQPAllowMatchInSchemeValue(); | 477 // TODO(mpearson): Investigate whether this has any meaningful |
605 initialized_ = true; | 478 // effect on scoring. (It's necessary at some point: removing |
| 479 // overlaps and sorting is needed to decide what to highlight in the |
| 480 // suggestion string. But this sort and de-overlap doesn't have to |
| 481 // be done before scoring.) |
| 482 scored_history_match.url_matches = |
| 483 SortAndDeoverlapMatches(scored_history_match.url_matches); |
| 484 scored_history_match.title_matches = |
| 485 SortAndDeoverlapMatches(scored_history_match.title_matches); |
| 486 |
| 487 // We can inline autocomplete a match if: |
| 488 // 1) there is only one search term |
| 489 // 2) AND the match begins immediately after one of the prefixes in |
| 490 // URLPrefix such as http://www and https:// (note that one of these |
| 491 // is the empty prefix, for cases where the user has typed the scheme) |
| 492 // 3) AND the search string does not end in whitespace (making it look to |
| 493 // the IMUI as though there is a single search term when actually there |
| 494 // is a second, empty term). |
| 495 // |best_inlineable_prefix| stores the inlineable prefix computed in |
| 496 // clause (2) or NULL if no such prefix exists. (The URL is not inlineable.) |
| 497 // Note that using the best prefix here means that when multiple |
| 498 // prefixes match, we'll choose to inline following the longest one. |
| 499 // For a URL like "http://www.washingtonmutual.com", this means |
| 500 // typing "w" will inline "ashington..." instead of "ww.washington...". |
| 501 if (!scored_history_match.url_matches.empty() && terms.size() == 1 && |
| 502 !IsWhitespace(*lower_string.rbegin())) { |
| 503 const base::string16 gurl_spec = base::UTF8ToUTF16(gurl.spec()); |
| 504 const URLPrefix* best_inlineable_prefix = |
| 505 URLPrefix::BestURLPrefix(gurl_spec, terms[0]); |
| 506 if (best_inlineable_prefix) { |
| 507 // Initialize innermost_match. |
| 508 // The idea here is that matches that occur in the scheme or |
| 509 // "www." are worse than matches which don't. For the URLs |
| 510 // "http://www.google.com" and "http://wellsfargo.com", we want |
| 511 // the omnibox input "w" to cause the latter URL to rank higher |
| 512 // than the former. Note that this is not the same as checking |
| 513 // whether one match's inlinable prefix has more components than |
| 514 // the other match's, since in this example, both matches would |
| 515 // have an inlinable prefix of "http://", which is one component. |
| 516 // |
| 517 // Instead, we look for the overall best (i.e., most components) |
| 518 // prefix of the current URL, and then check whether the inlinable |
| 519 // prefix has that many components. If it does, this is an |
| 520 // "innermost" match, and should be boosted. In the example |
| 521 // above, the best prefixes for the two URLs have two and one |
| 522 // components respectively, while the inlinable prefixes each |
| 523 // have one component; this means the first match is not innermost |
| 524 // and the second match is innermost, resulting in us boosting the |
| 525 // second match. |
| 526 // |
| 527 // Now, the code that implements this. |
| 528 // The deepest prefix for this URL regardless of where the match is. |
| 529 const URLPrefix* best_prefix = |
| 530 URLPrefix::BestURLPrefix(gurl_spec, base::string16()); |
| 531 DCHECK(best_prefix); |
| 532 // If the URL is inlineable, we must have a match. Note the prefix that |
| 533 // makes it inlineable may be empty. |
| 534 scored_history_match.can_inline = true; |
| 535 scored_history_match.innermost_match = |
| 536 best_inlineable_prefix->num_components == best_prefix->num_components; |
| 537 } |
| 538 } |
| 539 |
| 540 const float topicality_score = |
| 541 GetTopicalityScore(terms.size(), url, terms_to_word_starts_offsets, |
| 542 word_starts, &scored_history_match); |
| 543 const float frequency_score = GetFrequency( |
| 544 now, !is_bookmarked_.is_null() && is_bookmarked_.Run(gurl), visits); |
| 545 scored_history_match.raw_score = std::min<int>( |
| 546 GetFinalRelevancyScore(topicality_score, frequency_score), kint32max); |
| 547 |
| 548 if (kAlsoDoHupLikeScoring && scored_history_match.can_inline) { |
| 549 // HistoryURL-provider-like scoring gives any match that is |
| 550 // capable of being inlined a certain minimum score. Some of these |
| 551 // are given a higher score that lets them be shown in inline. |
| 552 // This test here derives from the test in |
| 553 // HistoryURLProvider::PromoteMatchForInlineAutocomplete(). |
| 554 const bool promote_to_inline = |
| 555 row.typed_count() > 1 || |
| 556 (scored_history_match.IsHostOnly() && row.typed_count() == 1); |
| 557 int hup_like_score = |
| 558 promote_to_inline |
| 559 ? HistoryURLProvider::kScoreForBestInlineableResult |
| 560 : HistoryURLProvider::kBaseScoreForNonInlineableResult; |
| 561 |
| 562 // Also, if the user types the hostname of a host with a typed |
| 563 // visit, then everything from that host get given inlineable scores |
| 564 // (because the URL-that-you-typed will go first and everything |
| 565 // else will be assigned one minus the previous score, as coded |
| 566 // at the end of HistoryURLProvider::DoAutocomplete(). |
| 567 if (base::UTF8ToUTF16(gurl.host()) == terms[0]) |
| 568 hup_like_score = HistoryURLProvider::kScoreForBestInlineableResult; |
| 569 |
| 570 // HistoryURLProvider has the function PromoteOrCreateShorterSuggestion() |
| 571 // that's meant to promote prefixes of the best match (if they've |
| 572 // been visited enough related to the best match) or |
| 573 // create/promote host-only suggestions (even if they've never |
| 574 // been typed). The code is complicated and we don't try to |
| 575 // duplicate the logic here. Instead, we handle a simple case: in |
| 576 // low-typed-count ranges, give host-only matches (i.e., |
| 577 // http://www.foo.com/ vs. http://www.foo.com/bar.html) a boost so |
| 578 // that the host-only match outscores all the other matches that |
| 579 // would normally have the same base score. This behavior is not |
| 580 // identical to what happens in HistoryURLProvider even in these |
| 581 // low typed count ranges--sometimes it will create/promote when |
| 582 // this test does not (indeed, we cannot create matches like HUP |
| 583 // can) and vice versa--but the underlying philosophy is similar. |
| 584 if (!promote_to_inline && scored_history_match.IsHostOnly()) |
| 585 hup_like_score++; |
| 586 |
| 587 // All the other logic to goes into hup-like-scoring happens in |
| 588 // the tie-breaker case of MatchScoreGreater(). |
| 589 |
| 590 // Incorporate hup_like_score into raw_score. |
| 591 scored_history_match.raw_score = |
| 592 std::max(scored_history_match.raw_score, hup_like_score); |
| 593 } |
| 594 |
| 595 // If this match is not inlineable and there's a cap on the maximum |
| 596 // score that can be given to non-inlineable matches, apply the cap. |
| 597 if (!scored_history_match.can_inline && |
| 598 max_assigned_score_for_non_inlineable_matches != -1) { |
| 599 scored_history_match.raw_score = |
| 600 std::min(scored_history_match.raw_score, |
| 601 max_assigned_score_for_non_inlineable_matches); |
| 602 } |
| 603 |
| 604 // Now that we're done processing this entry, correct the offsets of the |
| 605 // matches in |url_matches| so they point to offsets in the original URL |
| 606 // spec, not the cleaned-up URL string that we used for matching. |
| 607 std::vector<size_t> offsets = |
| 608 OffsetsFromTermMatches(scored_history_match.url_matches); |
| 609 base::OffsetAdjuster::UnadjustOffsets(adjustments, &offsets); |
| 610 scored_history_match.url_matches = |
| 611 ReplaceOffsetsInTermMatches(scored_history_match.url_matches, offsets); |
| 612 |
| 613 return scored_history_match; |
606 } | 614 } |
607 | |
608 } // namespace history | |
OLD | NEW |