Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(325)

Side by Side Diff: chrome/browser/autocomplete/scored_history_match.cc

Issue 976423002: Remove ScoreHistoryMatch::Builder (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@cpplint
Patch Set: Address comments Created 5 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "chrome/browser/autocomplete/scored_history_match.h" 5 #include "chrome/browser/autocomplete/scored_history_match.h"
6 6
7 #include <math.h>
8
9 #include <algorithm>
10 #include <vector>
11
12 #include "base/logging.h"
13 #include "base/numerics/safe_conversions.h"
14 #include "base/strings/string_number_conversions.h"
15 #include "base/strings/string_split.h"
16 #include "base/strings/string_util.h"
17 #include "base/strings/utf_offset_string_conversions.h"
18 #include "base/strings/utf_string_conversions.h"
19 #include "chrome/browser/autocomplete/history_url_provider.h"
20 #include "components/bookmarks/browser/bookmark_utils.h"
21 #include "components/omnibox/omnibox_field_trial.h"
22 #include "components/omnibox/url_prefix.h"
23 #include "content/public/browser/browser_thread.h"
24
25 namespace {
26
27 // The number of days of recency scores to precompute.
28 const int kDaysToPrecomputeRecencyScoresFor = 366;
29
30 // The number of raw term score buckets use; raw term scores greater this are
31 // capped at the score of the largest bucket.
32 const int kMaxRawTermScore = 30;
33
34 // If true, assign raw scores to be max(whatever it normally would be, a score
35 // that's similar to the score HistoryURL provider would assign). This variable
36 // is set in the constructor by examining the field trial state.
37 const bool kAlsoDoHupLikeScoring = false;
38
39 // Pre-computed information to speed up calculating recency scores.
40 // |days_ago_to_recency_score| is a simple array mapping how long ago a page was
41 // visited (in days) to the recency score we should assign it. This allows easy
42 // lookups of scores without requiring math. This is initialized by
43 // InitDaysAgoToRecencyScoreArray called by
44 // ScoredHistoryMatch::Init().
45 float days_ago_to_recency_score[kDaysToPrecomputeRecencyScoresFor];
46
47 // Pre-computed information to speed up calculating topicality scores.
48 // |raw_term_score_to_topicality_score| is a simple array mapping how raw terms
49 // scores (a weighted sum of the number of hits for the term, weighted by how
50 // important the hit is: hostname, path, etc.) to the topicality score we should
51 // assign it. This allows easy lookups of scores without requiring math. This
52 // is initialized by InitRawTermScoreToTopicalityScoreArray() called from
53 // ScoredHistoryMatch::Init().
54 float raw_term_score_to_topicality_score[kMaxRawTermScore];
55
56 // The maximum score that can be assigned to non-inlineable matches. This is
57 // useful because often we want inlineable matches to come first (even if they
58 // don't sometimes score as well as non-inlineable matches) because if a
59 // non-inlineable match comes first than all matches will get demoted later in
60 // HistoryQuickProvider to non-inlineable scores. Set to -1 to indicate no
61 // maximum score.
62 int max_assigned_score_for_non_inlineable_matches = -1;
63
64 // Whether ScoredHistoryMatch::Init() has been called.
65 bool initialized = false;
66
67 // Precalculates raw_term_score_to_topicality_score, used in
68 // GetTopicalityScore().
69 void InitRawTermScoreToTopicalityScoreArray() {
70 for (int term_score = 0; term_score < kMaxRawTermScore; ++term_score) {
71 float topicality_score;
72 if (term_score < 10) {
73 // If the term scores less than 10 points (no full-credit hit, or
74 // no combination of hits that score that well), then the topicality
75 // score is linear in the term score.
76 topicality_score = 0.1 * term_score;
77 } else {
78 // For term scores of at least ten points, pass them through a log
79 // function so a score of 10 points gets a 1.0 (to meet up exactly
80 // with the linear component) and increases logarithmically until
81 // maxing out at 30 points, with computes to a score around 2.1.
82 topicality_score = (1.0 + 2.25 * log10(0.1 * term_score));
83 }
84 raw_term_score_to_topicality_score[term_score] = topicality_score;
85 }
86 }
87
88 // Pre-calculates days_ago_to_recency_score, used in GetRecencyScore().
89 void InitDaysAgoToRecencyScoreArray() {
90 for (int days_ago = 0; days_ago < kDaysToPrecomputeRecencyScoresFor;
91 days_ago++) {
92 int unnormalized_recency_score;
93 if (days_ago <= 4) {
94 unnormalized_recency_score = 100;
95 } else if (days_ago <= 14) {
96 // Linearly extrapolate between 4 and 14 days so 14 days has a score
97 // of 70.
98 unnormalized_recency_score = 70 + (14 - days_ago) * (100 - 70) / (14 - 4);
99 } else if (days_ago <= 31) {
100 // Linearly extrapolate between 14 and 31 days so 31 days has a score
101 // of 50.
102 unnormalized_recency_score = 50 + (31 - days_ago) * (70 - 50) / (31 - 14);
103 } else if (days_ago <= 90) {
104 // Linearly extrapolate between 30 and 90 days so 90 days has a score
105 // of 30.
106 unnormalized_recency_score = 30 + (90 - days_ago) * (50 - 30) / (90 - 30);
107 } else {
108 // Linearly extrapolate between 90 and 365 days so 365 days has a score
109 // of 10.
110 unnormalized_recency_score =
111 10 + (365 - days_ago) * (20 - 10) / (365 - 90);
112 }
113 days_ago_to_recency_score[days_ago] = unnormalized_recency_score / 100.0;
114 if (days_ago > 0) {
115 DCHECK_LE(days_ago_to_recency_score[days_ago],
116 days_ago_to_recency_score[days_ago - 1]);
117 }
118 }
119 }
120
121 } // namespace
122
7 // static 123 // static
8 const size_t ScoredHistoryMatch::kMaxVisitsToScore = 10; 124 const size_t ScoredHistoryMatch::kMaxVisitsToScore = 10;
125 int ScoredHistoryMatch::bookmark_value_ = 1;
126 bool ScoredHistoryMatch::allow_tld_matches_ = false;
127 bool ScoredHistoryMatch::allow_scheme_matches_ = false;
128 bool ScoredHistoryMatch::hqp_experimental_scoring_enabled_ = false;
129 float ScoredHistoryMatch::topicality_threshold_ = -1;
130 std::vector<ScoredHistoryMatch::ScoreMaxRelevance>*
131 ScoredHistoryMatch::hqp_relevance_buckets_ = nullptr;
9 132
10 ScoredHistoryMatch::ScoredHistoryMatch() : raw_score(0), can_inline(false) { 133 ScoredHistoryMatch::ScoredHistoryMatch() : raw_score(0), can_inline(false) {
11 } 134 }
12 135
13 ScoredHistoryMatch::ScoredHistoryMatch(const history::URLRow& url_info, 136 ScoredHistoryMatch::ScoredHistoryMatch(
14 size_t input_location, 137 const history::URLRow& row,
15 bool match_in_scheme, 138 const VisitInfoVector& visits,
16 bool innermost_match, 139 const std::string& languages,
17 int raw_score, 140 const base::string16& lower_string,
18 const TermMatches& url_matches, 141 const String16Vector& terms_vector,
19 const TermMatches& title_matches, 142 const WordStarts& terms_to_word_starts_offsets,
20 bool can_inline) 143 const RowWordStarts& word_starts,
21 : HistoryMatch(url_info, input_location, match_in_scheme, innermost_match), 144 bool is_url_bookmarked,
22 raw_score(raw_score), 145 base::Time now)
23 url_matches(url_matches), 146 : HistoryMatch(row, 0, false, false), raw_score(0), can_inline(false) {
24 title_matches(title_matches), 147 GURL gurl = row.url();
25 can_inline(can_inline) { 148 if (!gurl.is_valid())
149 return;
150
151 ScoredHistoryMatch::Init();
152
153 // Figure out where each search term appears in the URL and/or page title
154 // so that we can score as well as provide autocomplete highlighting.
155 base::OffsetAdjuster::Adjustments adjustments;
156 base::string16 url =
157 bookmarks::CleanUpUrlForMatching(gurl, languages, &adjustments);
158 base::string16 title = bookmarks::CleanUpTitleForMatching(row.title());
159 int term_num = 0;
160 for (const auto& term : terms_vector) {
161 TermMatches url_term_matches = MatchTermInString(term, url, term_num);
162 TermMatches title_term_matches = MatchTermInString(term, title, term_num);
163 if (url_term_matches.empty() && title_term_matches.empty()) {
164 // A term was not found in either URL or title - reject.
165 return;
166 }
167 url_matches.insert(url_matches.end(), url_term_matches.begin(),
168 url_term_matches.end());
169 title_matches.insert(title_matches.end(), title_term_matches.begin(),
170 title_term_matches.end());
171 ++term_num;
172 }
173
174 // Sort matches by offset and eliminate any which overlap.
175 // TODO(mpearson): Investigate whether this has any meaningful
176 // effect on scoring. (It's necessary at some point: removing
177 // overlaps and sorting is needed to decide what to highlight in the
178 // suggestion string. But this sort and de-overlap doesn't have to
179 // be done before scoring.)
180 url_matches = SortAndDeoverlapMatches(url_matches);
181 title_matches = SortAndDeoverlapMatches(title_matches);
182
183 // We can inline autocomplete a match if:
184 // 1) there is only one search term
185 // 2) AND the match begins immediately after one of the prefixes in
186 // URLPrefix such as http://www and https:// (note that one of these
187 // is the empty prefix, for cases where the user has typed the scheme)
188 // 3) AND the search string does not end in whitespace (making it look to
189 // the IMUI as though there is a single search term when actually there
190 // is a second, empty term).
191 // |best_inlineable_prefix| stores the inlineable prefix computed in
192 // clause (2) or NULL if no such prefix exists. (The URL is not inlineable.)
193 // Note that using the best prefix here means that when multiple
194 // prefixes match, we'll choose to inline following the longest one.
195 // For a URL like "http://www.washingtonmutual.com", this means
196 // typing "w" will inline "ashington..." instead of "ww.washington...".
197 if (!url_matches.empty() && (terms_vector.size() == 1) &&
198 !IsWhitespace(*lower_string.rbegin())) {
199 const base::string16 gurl_spec = base::UTF8ToUTF16(gurl.spec());
200 const URLPrefix* best_inlineable_prefix =
201 URLPrefix::BestURLPrefix(gurl_spec, terms_vector[0]);
202 if (best_inlineable_prefix) {
203 // Initialize innermost_match.
204 // The idea here is that matches that occur in the scheme or
205 // "www." are worse than matches which don't. For the URLs
206 // "http://www.google.com" and "http://wellsfargo.com", we want
207 // the omnibox input "w" to cause the latter URL to rank higher
208 // than the former. Note that this is not the same as checking
209 // whether one match's inlinable prefix has more components than
210 // the other match's, since in this example, both matches would
211 // have an inlinable prefix of "http://", which is one component.
212 //
213 // Instead, we look for the overall best (i.e., most components)
214 // prefix of the current URL, and then check whether the inlinable
215 // prefix has that many components. If it does, this is an
216 // "innermost" match, and should be boosted. In the example
217 // above, the best prefixes for the two URLs have two and one
218 // components respectively, while the inlinable prefixes each
219 // have one component; this means the first match is not innermost
220 // and the second match is innermost, resulting in us boosting the
221 // second match.
222 //
223 // Now, the code that implements this.
224 // The deepest prefix for this URL regardless of where the match is.
225 const URLPrefix* best_prefix =
226 URLPrefix::BestURLPrefix(gurl_spec, base::string16());
227 DCHECK(best_prefix);
228 // If the URL is inlineable, we must have a match. Note the prefix that
229 // makes it inlineable may be empty.
230 can_inline = true;
231 innermost_match =
232 best_inlineable_prefix->num_components == best_prefix->num_components;
233 }
234 }
235
236 const float topicality_score = GetTopicalityScore(
237 terms_vector.size(), url, terms_to_word_starts_offsets, word_starts);
238 const float frequency_score = GetFrequency(now, is_url_bookmarked, visits);
239 raw_score = base::saturated_cast<int>(GetFinalRelevancyScore(
240 topicality_score, frequency_score, *hqp_relevance_buckets_));
241
242 if (kAlsoDoHupLikeScoring && can_inline) {
243 // HistoryURL-provider-like scoring gives any match that is
244 // capable of being inlined a certain minimum score. Some of these
245 // are given a higher score that lets them be shown in inline.
246 // This test here derives from the test in
247 // HistoryURLProvider::PromoteMatchForInlineAutocomplete().
248 const bool promote_to_inline =
249 (row.typed_count() > 1) || (IsHostOnly() && (row.typed_count() == 1));
250 int hup_like_score =
251 promote_to_inline
252 ? HistoryURLProvider::kScoreForBestInlineableResult
253 : HistoryURLProvider::kBaseScoreForNonInlineableResult;
254
255 // Also, if the user types the hostname of a host with a typed
256 // visit, then everything from that host get given inlineable scores
257 // (because the URL-that-you-typed will go first and everything
258 // else will be assigned one minus the previous score, as coded
259 // at the end of HistoryURLProvider::DoAutocomplete().
260 if (base::UTF8ToUTF16(gurl.host()) == terms_vector[0])
261 hup_like_score = HistoryURLProvider::kScoreForBestInlineableResult;
262
263 // HistoryURLProvider has the function PromoteOrCreateShorterSuggestion()
264 // that's meant to promote prefixes of the best match (if they've
265 // been visited enough related to the best match) or
266 // create/promote host-only suggestions (even if they've never
267 // been typed). The code is complicated and we don't try to
268 // duplicate the logic here. Instead, we handle a simple case: in
269 // low-typed-count ranges, give host-only matches (i.e.,
270 // http://www.foo.com/ vs. http://www.foo.com/bar.html) a boost so
271 // that the host-only match outscores all the other matches that
272 // would normally have the same base score. This behavior is not
273 // identical to what happens in HistoryURLProvider even in these
274 // low typed count ranges--sometimes it will create/promote when
275 // this test does not (indeed, we cannot create matches like HUP
276 // can) and vice versa--but the underlying philosophy is similar.
277 if (!promote_to_inline && IsHostOnly())
278 hup_like_score++;
279
280 // All the other logic to goes into hup-like-scoring happens in
281 // the tie-breaker case of MatchScoreGreater().
282
283 // Incorporate hup_like_score into raw_score.
284 raw_score = std::max(raw_score, hup_like_score);
285 }
286
287 // If this match is not inlineable and there's a cap on the maximum
288 // score that can be given to non-inlineable matches, apply the cap.
289 if (!can_inline && (max_assigned_score_for_non_inlineable_matches != -1)) {
290 raw_score =
291 std::min(raw_score, max_assigned_score_for_non_inlineable_matches);
292 }
293
294 // Now that we're done processing this entry, correct the offsets of the
295 // matches in |url_matches| so they point to offsets in the original URL
296 // spec, not the cleaned-up URL string that we used for matching.
297 std::vector<size_t> offsets = OffsetsFromTermMatches(url_matches);
298 base::OffsetAdjuster::UnadjustOffsets(adjustments, &offsets);
299 url_matches = ReplaceOffsetsInTermMatches(url_matches, offsets);
26 } 300 }
27 301
28 ScoredHistoryMatch::~ScoredHistoryMatch() { 302 ScoredHistoryMatch::~ScoredHistoryMatch() {
29 } 303 }
30 304
31 // Comparison function for sorting ScoredMatches by their scores with 305 // Comparison function for sorting ScoredMatches by their scores with
32 // intelligent tie-breaking. 306 // intelligent tie-breaking.
33 bool ScoredHistoryMatch::MatchScoreGreater(const ScoredHistoryMatch& m1, 307 bool ScoredHistoryMatch::MatchScoreGreater(const ScoredHistoryMatch& m1,
34 const ScoredHistoryMatch& m2) { 308 const ScoredHistoryMatch& m2) {
35 if (m1.raw_score != m2.raw_score) 309 if (m1.raw_score != m2.raw_score)
(...skipping 23 matching lines...) Expand all
59 return m1.IsHostOnly(); 333 return m1.IsHostOnly();
60 } 334 }
61 335
62 // URLs that have been visited more often are better. 336 // URLs that have been visited more often are better.
63 if (m1.url_info.visit_count() != m2.url_info.visit_count()) 337 if (m1.url_info.visit_count() != m2.url_info.visit_count())
64 return m1.url_info.visit_count() > m2.url_info.visit_count(); 338 return m1.url_info.visit_count() > m2.url_info.visit_count();
65 339
66 // URLs that have been visited more recently are better. 340 // URLs that have been visited more recently are better.
67 return m1.url_info.last_visit() > m2.url_info.last_visit(); 341 return m1.url_info.last_visit() > m2.url_info.last_visit();
68 } 342 }
343
344 // static
345 TermMatches ScoredHistoryMatch::FilterTermMatchesByWordStarts(
346 const TermMatches& term_matches,
347 const WordStarts& terms_to_word_starts_offsets,
348 const WordStarts& word_starts,
349 size_t start_pos,
350 size_t end_pos) {
351 // Return early if no filtering is needed.
352 if (start_pos == std::string::npos)
353 return term_matches;
354 TermMatches filtered_matches;
355 WordStarts::const_iterator next_word_starts = word_starts.begin();
356 WordStarts::const_iterator end_word_starts = word_starts.end();
357 for (const auto& term_match : term_matches) {
358 const size_t term_offset =
359 terms_to_word_starts_offsets[term_match.term_num];
360 // Advance next_word_starts until it's >= the position of the term we're
361 // considering (adjusted for where the word begins within the term).
362 while ((next_word_starts != end_word_starts) &&
363 (*next_word_starts < (term_match.offset + term_offset)))
364 ++next_word_starts;
365 // Add the match if it's before the position we start filtering at or
366 // after the position we stop filtering at (assuming we have a position
367 // to stop filtering at) or if it's at a word boundary.
368 if ((term_match.offset < start_pos) ||
369 ((end_pos != std::string::npos) && (term_match.offset >= end_pos)) ||
370 ((next_word_starts != end_word_starts) &&
371 (*next_word_starts == term_match.offset + term_offset)))
372 filtered_matches.push_back(term_match);
373 }
374 return filtered_matches;
375 }
376
377 // static
378 void ScoredHistoryMatch::Init() {
379 // Because the code below is not thread safe, we check that we're only calling
380 // it from one thread: the UI thread. Specifically, we check "if we've heard
381 // of the UI thread then we'd better be on it." The first part is necessary
382 // so unit tests pass. (Many unit tests don't set up the threading naming
383 // system; hence CurrentlyOn(UI thread) will fail.)
384 using content::BrowserThread;
385 DCHECK(!BrowserThread::IsThreadInitialized(BrowserThread::UI) ||
386 BrowserThread::CurrentlyOn(BrowserThread::UI));
387
388 if (initialized)
389 return;
390
391 initialized = true;
392
393 // When doing HUP-like scoring, don't allow a non-inlineable match
394 // to beat the score of good inlineable matches. This is a problem
395 // because if a non-inlineable match ends up with the highest score
396 // from HistoryQuick provider, all HistoryQuick matches get demoted
397 // to non-inlineable scores (scores less than 1200). Without
398 // HUP-like-scoring, these results would actually come from the HUP
399 // and not be demoted, thus outscoring the demoted HQP results.
400 // When the HQP provides these, we need to clamp the non-inlineable
401 // results to preserve this behavior.
402 if (kAlsoDoHupLikeScoring) {
403 max_assigned_score_for_non_inlineable_matches =
404 HistoryURLProvider::kScoreForBestInlineableResult - 1;
405 }
406 bookmark_value_ = OmniboxFieldTrial::HQPBookmarkValue();
407 allow_tld_matches_ = OmniboxFieldTrial::HQPAllowMatchInTLDValue();
408 allow_scheme_matches_ = OmniboxFieldTrial::HQPAllowMatchInSchemeValue();
409
410 InitRawTermScoreToTopicalityScoreArray();
411 InitDaysAgoToRecencyScoreArray();
412 InitHQPExperimentalParams();
413 }
414
415 float ScoredHistoryMatch::GetTopicalityScore(
416 const int num_terms,
417 const base::string16& url,
418 const WordStarts& terms_to_word_starts_offsets,
419 const RowWordStarts& word_starts) {
420 ScoredHistoryMatch::Init();
421 // A vector that accumulates per-term scores. The strongest match--a
422 // match in the hostname at a word boundary--is worth 10 points.
423 // Everything else is less. In general, a match that's not at a word
424 // boundary is worth about 1/4th or 1/5th of a match at the word boundary
425 // in the same part of the URL/title.
426 DCHECK_GT(num_terms, 0);
427 std::vector<int> term_scores(num_terms, 0);
428 WordStarts::const_iterator next_word_starts =
429 word_starts.url_word_starts_.begin();
430 WordStarts::const_iterator end_word_starts =
431 word_starts.url_word_starts_.end();
432 const size_t question_mark_pos = url.find('?');
433 const size_t colon_pos = url.find(':');
434 // The + 3 skips the // that probably appears in the protocol
435 // after the colon. If the protocol doesn't have two slashes after
436 // the colon, that's okay--all this ends up doing is starting our
437 // search for the next / a few characters into the hostname. The
438 // only times this can cause problems is if we have a protocol without
439 // a // after the colon and the hostname is only one or two characters.
440 // This isn't worth worrying about.
441 const size_t end_of_hostname_pos = (colon_pos != std::string::npos)
442 ? url.find('/', colon_pos + 3)
443 : url.find('/');
444 size_t last_part_of_hostname_pos = (end_of_hostname_pos != std::string::npos)
445 ? url.rfind('.', end_of_hostname_pos)
446 : url.rfind('.');
447 // Loop through all URL matches and score them appropriately.
448 // First, filter all matches not at a word boundary and in the path (or
449 // later).
450 url_matches = FilterTermMatchesByWordStarts(
451 url_matches, terms_to_word_starts_offsets, word_starts.url_word_starts_,
452 end_of_hostname_pos, std::string::npos);
453 if (colon_pos != std::string::npos) {
454 // Also filter matches not at a word boundary and in the scheme.
455 url_matches = FilterTermMatchesByWordStarts(
456 url_matches, terms_to_word_starts_offsets, word_starts.url_word_starts_,
457 0, colon_pos);
458 }
459 for (const auto& url_match : url_matches) {
460 const size_t term_offset = terms_to_word_starts_offsets[url_match.term_num];
461 // Advance next_word_starts until it's >= the position of the term we're
462 // considering (adjusted for where the word begins within the term).
463 while ((next_word_starts != end_word_starts) &&
464 (*next_word_starts < (url_match.offset + term_offset))) {
465 ++next_word_starts;
466 }
467 const bool at_word_boundary =
468 (next_word_starts != end_word_starts) &&
469 (*next_word_starts == url_match.offset + term_offset);
470 if ((question_mark_pos != std::string::npos) &&
471 (url_match.offset > question_mark_pos)) {
472 // The match is in a CGI ?... fragment.
473 DCHECK(at_word_boundary);
474 term_scores[url_match.term_num] += 5;
475 } else if ((end_of_hostname_pos != std::string::npos) &&
476 (url_match.offset > end_of_hostname_pos)) {
477 // The match is in the path.
478 DCHECK(at_word_boundary);
479 term_scores[url_match.term_num] += 8;
480 } else if ((colon_pos == std::string::npos) ||
481 (url_match.offset > colon_pos)) {
482 // The match is in the hostname.
483 if ((last_part_of_hostname_pos == std::string::npos) ||
484 (url_match.offset < last_part_of_hostname_pos)) {
485 // Either there are no dots in the hostname or this match isn't
486 // the last dotted component.
487 term_scores[url_match.term_num] += at_word_boundary ? 10 : 2;
488 } else {
489 // The match is in the last part of a dotted hostname (usually this
490 // is the top-level domain .com, .net, etc.).
491 if (allow_tld_matches_)
492 term_scores[url_match.term_num] += at_word_boundary ? 10 : 0;
493 }
494 } else {
495 // The match is in the protocol (a.k.a. scheme).
496 // Matches not at a word boundary should have been filtered already.
497 DCHECK(at_word_boundary);
498 match_in_scheme = true;
499 if (allow_scheme_matches_)
500 term_scores[url_match.term_num] += 10;
501 }
502 }
503 // Now do the analogous loop over all matches in the title.
504 next_word_starts = word_starts.title_word_starts_.begin();
505 end_word_starts = word_starts.title_word_starts_.end();
506 int word_num = 0;
507 title_matches = FilterTermMatchesByWordStarts(
508 title_matches, terms_to_word_starts_offsets,
509 word_starts.title_word_starts_, 0, std::string::npos);
510 for (const auto& title_match : title_matches) {
511 const size_t term_offset =
512 terms_to_word_starts_offsets[title_match.term_num];
513 // Advance next_word_starts until it's >= the position of the term we're
514 // considering (adjusted for where the word begins within the term).
515 while ((next_word_starts != end_word_starts) &&
516 (*next_word_starts < (title_match.offset + term_offset))) {
517 ++next_word_starts;
518 ++word_num;
519 }
520 if (word_num >= 10)
521 break; // only count the first ten words
522 DCHECK(next_word_starts != end_word_starts);
523 DCHECK_EQ(*next_word_starts, title_match.offset + term_offset)
524 << "not at word boundary";
525 term_scores[title_match.term_num] += 8;
526 }
527 // TODO(mpearson): Restore logic for penalizing out-of-order matches.
528 // (Perhaps discount them by 0.8?)
529 // TODO(mpearson): Consider: if the earliest match occurs late in the string,
530 // should we discount it?
531 // TODO(mpearson): Consider: do we want to score based on how much of the
532 // input string the input covers? (I'm leaning toward no.)
533
534 // Compute the topicality_score as the sum of transformed term_scores.
535 float topicality_score = 0;
536 for (int term_score : term_scores) {
537 // Drop this URL if it seems like a term didn't appear or, more precisely,
538 // didn't appear in a part of the URL or title that we trust enough
539 // to give it credit for. For instance, terms that appear in the middle
540 // of a CGI parameter get no credit. Almost all the matches dropped
541 // due to this test would look stupid if shown to the user.
542 if (term_score == 0)
543 return 0;
544 topicality_score += raw_term_score_to_topicality_score[std::min(
545 term_score, kMaxRawTermScore - 1)];
546 }
547 // TODO(mpearson): If there are multiple terms, consider taking the
548 // geometric mean of per-term scores rather than the arithmetic mean.
549
550 const float final_topicality_score = topicality_score / num_terms;
551
552 // Demote the URL if the topicality score is less than threshold.
553 if (hqp_experimental_scoring_enabled_ &&
554 (final_topicality_score < topicality_threshold_)) {
555 return 0.0;
556 }
557
558 return final_topicality_score;
559 }
560
561 // static
562 float ScoredHistoryMatch::GetRecencyScore(int last_visit_days_ago) {
563 ScoredHistoryMatch::Init();
564 // Lookup the score in days_ago_to_recency_score, treating
565 // everything older than what we've precomputed as the oldest thing
566 // we've precomputed. The std::max is to protect against corruption
567 // in the database (in case last_visit_days_ago is negative).
568 return days_ago_to_recency_score[std::max(
569 std::min(last_visit_days_ago, kDaysToPrecomputeRecencyScoresFor - 1), 0)];
570 }
571
572 // static
573 float ScoredHistoryMatch::GetFrequency(const base::Time& now,
574 const bool bookmarked,
575 const VisitInfoVector& visits) {
576 // Compute the weighted average |value_of_transition| over the last at
577 // most kMaxVisitsToScore visits, where each visit is weighted using
578 // GetRecencyScore() based on how many days ago it happened. Use
579 // kMaxVisitsToScore as the denominator for the average regardless of
580 // how many visits there were in order to penalize a match that has
581 // fewer visits than kMaxVisitsToScore.
582 float summed_visit_points = 0;
583 const size_t max_visit_to_score =
584 std::min(visits.size(), ScoredHistoryMatch::kMaxVisitsToScore);
585 for (size_t i = 0; i < max_visit_to_score; ++i) {
586 int value_of_transition =
587 (visits[i].second == ui::PAGE_TRANSITION_TYPED) ? 20 : 1;
588 if (bookmarked)
589 value_of_transition = std::max(value_of_transition, bookmark_value_);
590 const float bucket_weight =
591 GetRecencyScore((now - visits[i].first).InDays());
592 summed_visit_points += (value_of_transition * bucket_weight);
593 }
594 return visits.size() * summed_visit_points /
595 ScoredHistoryMatch::kMaxVisitsToScore;
596 }
597
598 // static
599 float ScoredHistoryMatch::GetFinalRelevancyScore(
600 float topicality_score,
601 float frequency_score,
602 const std::vector<ScoreMaxRelevance>& hqp_relevance_buckets) {
603 DCHECK(hqp_relevance_buckets.size() > 0);
604 DCHECK_EQ(hqp_relevance_buckets[0].first, 0.0);
605
606 if (topicality_score == 0)
607 return 0;
608 // Here's how to interpret intermediate_score: Suppose the omnibox
609 // has one input term. Suppose we have a URL for which the omnibox
610 // input term has a single URL hostname hit at a word boundary. (This
611 // implies topicality_score = 1.0.). Then the intermediate_score for
612 // this URL will depend entirely on the frequency_score with
613 // this interpretation:
614 // - a single typed visit more than three months ago, no other visits -> 0.2
615 // - a visit every three days, no typed visits -> 0.706
616 // - a visit every day, no typed visits -> 0.916
617 // - a single typed visit yesterday, no other visits -> 2.0
618 // - a typed visit once a week -> 11.77
619 // - a typed visit every three days -> 14.12
620 // - at least ten typed visits today -> 20.0 (maximum score)
621 //
622 // The below code maps intermediate_score to the range [0, 1399].
623 // For example:
624 // HQP default scoring buckets: "0.0:400,1.5:600,12.0:1300,20.0:1399"
625 // We will linearly interpolate the scores between:
626 // 0 to 1.5 --> 400 to 600
627 // 1.5 to 12.0 --> 600 to 1300
628 // 12.0 to 20.0 --> 1300 to 1399
629 // >= 20.0 --> 1399
630 //
631 // The score maxes out at 1399 (i.e., cannot beat a good inlineable result
632 // from HistoryURL provider).
633 const float intermediate_score = topicality_score * frequency_score;
634
635 // Find the threshold where intermediate score is greater than bucket.
636 size_t i = 1;
637 for (; i < hqp_relevance_buckets.size(); ++i) {
638 const ScoreMaxRelevance& hqp_bucket = hqp_relevance_buckets[i];
639 if (intermediate_score >= hqp_bucket.first) {
640 continue;
641 }
642 const ScoreMaxRelevance& previous_bucket = hqp_relevance_buckets[i - 1];
643 const float slope = ((hqp_bucket.second - previous_bucket.second) /
644 (hqp_bucket.first - previous_bucket.first));
645 return (previous_bucket.second +
646 (slope * (intermediate_score - previous_bucket.first)));
647 }
648 // It will reach this stage when the score is > highest bucket score.
649 // Return the highest bucket score.
650 return hqp_relevance_buckets[i - 1].second;
651 }
652
653 // static
654 void ScoredHistoryMatch::InitHQPExperimentalParams() {
655 // These are default HQP relevance scoring buckets.
656 // See GetFinalRelevancyScore() for details.
657 std::string hqp_relevance_buckets_str = "0.0:400,1.5:600,12.0:1300,20.0:1399";
658
659 // Fetch the experiment params if they are any.
660 hqp_experimental_scoring_enabled_ =
661 OmniboxFieldTrial::HQPExperimentalScoringEnabled();
662
663 if (hqp_experimental_scoring_enabled_) {
664 // Add the topicality threshold from experiment params.
665 float hqp_experimental_topicality_threhold =
666 OmniboxFieldTrial::HQPExperimentalTopicalityThreshold();
667 topicality_threshold_ = hqp_experimental_topicality_threhold;
668
669 // Add the HQP experimental scoring buckets.
670 std::string hqp_experimental_scoring_buckets =
671 OmniboxFieldTrial::HQPExperimentalScoringBuckets();
672 if (!hqp_experimental_scoring_buckets.empty())
673 hqp_relevance_buckets_str = hqp_experimental_scoring_buckets;
674 }
675
676 // Parse the hqp_relevance_buckets_str string once and store them in vector
677 // which is easy to access.
678 hqp_relevance_buckets_ =
679 new std::vector<ScoredHistoryMatch::ScoreMaxRelevance>();
680
681 bool is_valid_bucket_str = GetHQPBucketsFromString(hqp_relevance_buckets_str,
682 hqp_relevance_buckets_);
683 DCHECK(is_valid_bucket_str);
684 }
685
686 // static
687 bool ScoredHistoryMatch::GetHQPBucketsFromString(
688 const std::string& buckets_str,
689 std::vector<ScoreMaxRelevance>* hqp_buckets) {
690 DCHECK(hqp_buckets != NULL);
691 DCHECK(!buckets_str.empty());
692
693 base::StringPairs kv_pairs;
694 if (base::SplitStringIntoKeyValuePairs(buckets_str, ':', ',', &kv_pairs)) {
695 for (base::StringPairs::const_iterator it = kv_pairs.begin();
696 it != kv_pairs.end(); ++it) {
697 ScoreMaxRelevance bucket;
698 bool is_valid_intermediate_score =
699 base::StringToDouble(it->first, &bucket.first);
700 DCHECK(is_valid_intermediate_score);
701 bool is_valid_hqp_score = base::StringToInt(it->second, &bucket.second);
702 DCHECK(is_valid_hqp_score);
703 hqp_buckets->push_back(bucket);
704 }
705 return true;
706 }
707 return false;
708 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698