Chromium Code Reviews| Index: chrome/browser/loader/safe_browsing_resource_throttle.cc |
| diff --git a/chrome/browser/loader/safe_browsing_resource_throttle.cc b/chrome/browser/loader/safe_browsing_resource_throttle.cc |
| index 8fccf4b69c4e8e2b48cc580cdb8751c89e2cab7b..426f0b4ae1366958645c17b4a20e091966c31f6b 100644 |
| --- a/chrome/browser/loader/safe_browsing_resource_throttle.cc |
| +++ b/chrome/browser/loader/safe_browsing_resource_throttle.cc |
| @@ -238,9 +238,10 @@ void SafeBrowsingResourceThrottle::OnCheckBrowseUrlResult( |
| CHECK(url.is_valid()); |
| CHECK(url_being_checked_.is_valid()); |
| if (url != url_being_checked_) { |
| - char buf[2000]; |
| - snprintf(buf, sizeof(buf), "sbtr::ocbur:%s -- %s\n", url.spec().c_str(), |
| - url_being_checked_.spec().c_str()); |
| + bool url_had_timed_out = base::ContainsValue(timed_out_urls_, url); |
|
Nathan Parker
2017/01/04 18:48:18
A thought: If this disproves the theory, we might
vakh (use Gerrit instead)
2017/01/06 00:31:14
With the recent findings that I posted on http://c
|
| + char buf[1000]; |
|
Scott Hess - ex-Googler
2017/01/04 18:52:10
Most of the minidumps I see are ~5M, so reducing t
vakh (use Gerrit instead)
2017/01/06 00:31:14
Yes, but the buffer was too large and having the f
|
| + snprintf(buf, sizeof(buf), "sbtr::ocbur:%d:%s -- %s\n", url_had_timed_out, |
| + url.spec().c_str(), url_being_checked_.spec().c_str()); |
| base::debug::Alias(buf); |
| CHECK(false) << "buf: " << buf; |
| } |
| @@ -398,6 +399,12 @@ bool SafeBrowsingResourceThrottle::CheckUrl(const GURL& url) { |
| BeginNetLogEvent(NetLogEventType::SAFE_BROWSING_CHECKING_URL, url, nullptr, |
| nullptr); |
| + // If the URL had timed out earlier but is being retried, remove it from the |
|
Nathan Parker
2017/01/04 18:48:18
I don't think they can get retried.
vakh (use Gerrit instead)
2017/01/06 00:31:14
Quite possibly, but I don't understand the SBRT's
|
| + // list of URLs that timed out. |
| + auto iter = std::find(timed_out_urls_.begin(), timed_out_urls_.end(), url); |
| + if (iter != timed_out_urls_.end()) { |
| + timed_out_urls_.erase(iter); |
| + } |
|
Scott Hess - ex-Googler
2017/01/04 18:52:10
timed_out_urls_.erase(std::remove(timed_out_urls_.
vakh (use Gerrit instead)
2017/01/06 00:31:15
The STL version is so much harder understand, IMO.
Scott Hess - ex-Googler
2017/01/06 00:48:53
ACK on the erase(remove()) idiom, but what did you
vakh (use Gerrit instead)
2017/01/06 01:05:50
Sorry, I missed that.
As far as I can tell, ordere
Scott Hess - ex-Googler
2017/01/06 02:42:27
I mean for timed_out_urls_. So that this code can
vakh (use Gerrit instead)
2017/01/06 02:46:59
Gah, I don't know why I am talking nonsense.
You'r
Scott Hess - ex-Googler
2017/01/06 04:27:18
That's an even better solution!
Scott Hess - ex-Googler
2017/01/10 21:16:12
I took this thread to mean that you had removed th
vakh (use Gerrit instead)
2017/01/10 21:38:08
Changed to std::set but, frankly, I think it doesn
Scott Hess - ex-Googler
2017/01/10 21:43:05
Oh, I agree it doesn't matter. But assume the wor
|
| // Start a timer to abort the check if it takes too long. |
| // TODO(nparker): Set this only when we defer, based on remaining time, |
| // so we don't cancel earlier than necessary. |
| @@ -415,6 +422,7 @@ void SafeBrowsingResourceThrottle::OnCheckUrlTimeout() { |
| OnCheckBrowseUrlResult(url_being_checked_, safe_browsing::SB_THREAT_TYPE_SAFE, |
| safe_browsing::ThreatMetadata()); |
| + timed_out_urls_.push_back(url_being_checked_); |
|
Nathan Parker
2017/01/04 18:48:18
nit: Would it work to just increment an timed_out_
vakh (use Gerrit instead)
2017/01/06 00:31:14
Not sure how that would work since we need to comp
|
| } |
| void SafeBrowsingResourceThrottle::ResumeRequest() { |