OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "chrome/browser/safe_browsing/protocol_manager.h" | 5 #include "chrome/browser/safe_browsing/protocol_manager.h" |
6 | 6 |
7 #ifndef NDEBUG | 7 #ifndef NDEBUG |
8 #include "base/base64.h" | 8 #include "base/base64.h" |
9 #endif | 9 #endif |
10 #include "base/environment.h" | 10 #include "base/environment.h" |
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
104 bool is_download, ResultType result_type) { | 104 bool is_download, ResultType result_type) { |
105 if (is_download) { | 105 if (is_download) { |
106 UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResultDownload", result_type, | 106 UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResultDownload", result_type, |
107 GET_HASH_RESULT_MAX); | 107 GET_HASH_RESULT_MAX); |
108 } else { | 108 } else { |
109 UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResult", result_type, | 109 UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResult", result_type, |
110 GET_HASH_RESULT_MAX); | 110 GET_HASH_RESULT_MAX); |
111 } | 111 } |
112 } | 112 } |
113 | 113 |
| 114 bool SafeBrowsingProtocolManager::IsUpdateScheduled() const { |
| 115 return update_timer_.IsRunning(); |
| 116 } |
| 117 |
114 SafeBrowsingProtocolManager::~SafeBrowsingProtocolManager() { | 118 SafeBrowsingProtocolManager::~SafeBrowsingProtocolManager() { |
115 // Delete in-progress SafeBrowsing requests. | 119 // Delete in-progress SafeBrowsing requests. |
116 STLDeleteContainerPairFirstPointers(hash_requests_.begin(), | 120 STLDeleteContainerPairFirstPointers(hash_requests_.begin(), |
117 hash_requests_.end()); | 121 hash_requests_.end()); |
118 hash_requests_.clear(); | 122 hash_requests_.clear(); |
119 } | 123 } |
120 | 124 |
121 // We can only have one update or chunk request outstanding, but there may be | 125 // We can only have one update or chunk request outstanding, but there may be |
122 // multiple GetHash requests pending since we don't want to serialize them and | 126 // multiple GetHash requests pending since we don't want to serialize them and |
123 // slow down the user. | 127 // slow down the user. |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
163 // what back off / how many times to try), and if that effects the | 167 // what back off / how many times to try), and if that effects the |
164 // update back off. For now, a failed parse of the chunk means we | 168 // update back off. For now, a failed parse of the chunk means we |
165 // drop it. This isn't so bad because the next UPDATE_REQUEST we | 169 // drop it. This isn't so bad because the next UPDATE_REQUEST we |
166 // do will report all the chunks we have. If that chunk is still | 170 // do will report all the chunks we have. If that chunk is still |
167 // required, the SafeBrowsing servers will tell us to get it again. | 171 // required, the SafeBrowsing servers will tell us to get it again. |
168 void SafeBrowsingProtocolManager::OnURLFetchComplete( | 172 void SafeBrowsingProtocolManager::OnURLFetchComplete( |
169 const net::URLFetcher* source) { | 173 const net::URLFetcher* source) { |
170 DCHECK(CalledOnValidThread()); | 174 DCHECK(CalledOnValidThread()); |
171 scoped_ptr<const net::URLFetcher> fetcher; | 175 scoped_ptr<const net::URLFetcher> fetcher; |
172 bool parsed_ok = true; | 176 bool parsed_ok = true; |
173 bool must_back_off = false; // Reduce SafeBrowsing service query frequency. | |
174 | 177 |
175 HashRequests::iterator it = hash_requests_.find(source); | 178 HashRequests::iterator it = hash_requests_.find(source); |
176 if (it != hash_requests_.end()) { | 179 if (it != hash_requests_.end()) { |
177 // GetHash response. | 180 // GetHash response. |
178 fetcher.reset(it->first); | 181 fetcher.reset(it->first); |
179 const FullHashDetails& details = it->second; | 182 const FullHashDetails& details = it->second; |
180 std::vector<SBFullHashResult> full_hashes; | 183 std::vector<SBFullHashResult> full_hashes; |
181 bool can_cache = false; | 184 bool can_cache = false; |
182 if (source->GetStatus().is_success() && | 185 if (source->GetStatus().is_success() && |
183 (source->GetResponseCode() == 200 || | 186 (source->GetResponseCode() == 200 || |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
224 fetcher.reset(request_.release()); | 227 fetcher.reset(request_.release()); |
225 | 228 |
226 if (request_type_ == UPDATE_REQUEST) { | 229 if (request_type_ == UPDATE_REQUEST) { |
227 if (!fetcher.get()) { | 230 if (!fetcher.get()) { |
228 // We've timed out waiting for an update response, so we've cancelled | 231 // We've timed out waiting for an update response, so we've cancelled |
229 // the update request and scheduled a new one. Ignore this response. | 232 // the update request and scheduled a new one. Ignore this response. |
230 return; | 233 return; |
231 } | 234 } |
232 | 235 |
233 // Cancel the update response timeout now that we have the response. | 236 // Cancel the update response timeout now that we have the response. |
234 update_timer_.Stop(); | 237 timeout_timer_.Stop(); |
235 } | 238 } |
236 | 239 |
237 if (source->GetStatus().is_success() && source->GetResponseCode() == 200) { | 240 if (source->GetStatus().is_success() && source->GetResponseCode() == 200) { |
238 // We have data from the SafeBrowsing service. | 241 // We have data from the SafeBrowsing service. |
239 std::string data; | 242 std::string data; |
240 source->GetResponseAsString(&data); | 243 source->GetResponseAsString(&data); |
241 parsed_ok = HandleServiceResponse( | 244 parsed_ok = HandleServiceResponse( |
242 source->GetURL(), data.data(), static_cast<int>(data.length())); | 245 source->GetURL(), data.data(), static_cast<int>(data.length())); |
243 if (!parsed_ok) { | 246 if (!parsed_ok) { |
244 VLOG(1) << "SafeBrowsing request for: " << source->GetURL() | 247 VLOG(1) << "SafeBrowsing request for: " << source->GetURL() |
245 << " failed parse."; | 248 << " failed parse."; |
246 must_back_off = true; | |
247 chunk_request_urls_.clear(); | 249 chunk_request_urls_.clear(); |
248 UpdateFinished(false); | 250 UpdateFinished(false); |
249 } | 251 } |
250 | 252 |
251 switch (request_type_) { | 253 switch (request_type_) { |
252 case CHUNK_REQUEST: | 254 case CHUNK_REQUEST: |
253 if (parsed_ok) | 255 if (parsed_ok) { |
254 chunk_request_urls_.pop_front(); | 256 chunk_request_urls_.pop_front(); |
| 257 if (chunk_request_urls_.empty() && !chunk_pending_to_write_) |
| 258 UpdateFinished(true); |
| 259 } |
255 break; | 260 break; |
256 case UPDATE_REQUEST: | 261 case UPDATE_REQUEST: |
257 if (chunk_request_urls_.empty() && parsed_ok) { | 262 if (chunk_request_urls_.empty() && parsed_ok) { |
258 // We are up to date since the servers gave us nothing new, so we | 263 // We are up to date since the servers gave us nothing new, so we |
259 // are done with this update cycle. | 264 // are done with this update cycle. |
260 UpdateFinished(true); | 265 UpdateFinished(true); |
261 } | 266 } |
262 break; | 267 break; |
263 default: | 268 default: |
264 NOTREACHED(); | 269 NOTREACHED(); |
265 break; | 270 break; |
266 } | 271 } |
267 } else { | 272 } else { |
268 // The SafeBrowsing service error, or very bad response code: back off. | 273 // The SafeBrowsing service error, or very bad response code: back off. |
269 must_back_off = true; | |
270 if (request_type_ == CHUNK_REQUEST) | 274 if (request_type_ == CHUNK_REQUEST) |
271 chunk_request_urls_.clear(); | 275 chunk_request_urls_.clear(); |
272 UpdateFinished(false); | 276 UpdateFinished(false); |
273 if (source->GetStatus().status() == net::URLRequestStatus::FAILED) { | 277 if (source->GetStatus().status() == net::URLRequestStatus::FAILED) { |
274 VLOG(1) << "SafeBrowsing request for: " << source->GetURL() | 278 VLOG(1) << "SafeBrowsing request for: " << source->GetURL() |
275 << " failed with error: " << source->GetStatus().error(); | 279 << " failed with error: " << source->GetStatus().error(); |
276 } else { | 280 } else { |
277 VLOG(1) << "SafeBrowsing request for: " << source->GetURL() | 281 VLOG(1) << "SafeBrowsing request for: " << source->GetURL() |
278 << " failed with error: " << source->GetResponseCode(); | 282 << " failed with error: " << source->GetResponseCode(); |
279 } | 283 } |
280 } | 284 } |
281 } | 285 } |
282 | 286 |
283 // Schedule a new update request if we've finished retrieving all the chunks | |
284 // from the previous update. We treat the update request and the chunk URLs it | |
285 // contains as an atomic unit as far as back off is concerned. | |
286 if (chunk_request_urls_.empty() && | |
287 (request_type_ == CHUNK_REQUEST || request_type_ == UPDATE_REQUEST)) | |
288 ScheduleNextUpdate(must_back_off); | |
289 | |
290 // Get the next chunk if available. | 287 // Get the next chunk if available. |
291 IssueChunkRequest(); | 288 IssueChunkRequest(); |
292 } | 289 } |
293 | 290 |
294 bool SafeBrowsingProtocolManager::HandleServiceResponse(const GURL& url, | 291 bool SafeBrowsingProtocolManager::HandleServiceResponse(const GURL& url, |
295 const char* data, | 292 const char* data, |
296 int length) { | 293 int length) { |
297 DCHECK(CalledOnValidThread()); | 294 DCHECK(CalledOnValidThread()); |
298 SafeBrowsingProtocolParser parser; | 295 SafeBrowsingProtocolParser parser; |
299 | 296 |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
365 VLOG(1) << "ParseChunk error for chunk: " << chunk_url.url | 362 VLOG(1) << "ParseChunk error for chunk: " << chunk_url.url |
366 << ", Base64Encode(data): " << encoded_chunk | 363 << ", Base64Encode(data): " << encoded_chunk |
367 << ", length: " << length; | 364 << ", length: " << length; |
368 #endif | 365 #endif |
369 return false; | 366 return false; |
370 } | 367 } |
371 | 368 |
372 // Chunks to add to storage. Pass ownership of |chunks|. | 369 // Chunks to add to storage. Pass ownership of |chunks|. |
373 if (!chunks->empty()) { | 370 if (!chunks->empty()) { |
374 chunk_pending_to_write_ = true; | 371 chunk_pending_to_write_ = true; |
375 delegate_->AddChunks(chunk_url.list_name, chunks.release()); | 372 delegate_->AddChunks( |
| 373 chunk_url.list_name, chunks.release(), |
| 374 base::Bind(&SafeBrowsingProtocolManager::OnAddChunksComplete, |
| 375 base::Unretained(this))); |
376 } | 376 } |
377 | 377 |
378 break; | 378 break; |
379 } | 379 } |
380 | 380 |
381 default: | 381 default: |
382 return false; | 382 return false; |
383 } | 383 } |
384 | 384 |
385 return true; | 385 return true; |
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
486 request_->SetRequestContext(request_context_getter_); | 486 request_->SetRequestContext(request_context_getter_); |
487 chunk_request_start_ = base::Time::Now(); | 487 chunk_request_start_ = base::Time::Now(); |
488 request_->Start(); | 488 request_->Start(); |
489 } | 489 } |
490 | 490 |
491 void SafeBrowsingProtocolManager::OnGetChunksComplete( | 491 void SafeBrowsingProtocolManager::OnGetChunksComplete( |
492 const std::vector<SBListChunkRanges>& lists, bool database_error) { | 492 const std::vector<SBListChunkRanges>& lists, bool database_error) { |
493 DCHECK(CalledOnValidThread()); | 493 DCHECK(CalledOnValidThread()); |
494 DCHECK_EQ(request_type_, UPDATE_REQUEST); | 494 DCHECK_EQ(request_type_, UPDATE_REQUEST); |
495 if (database_error) { | 495 if (database_error) { |
496 UpdateFinished(false); | 496 // The update was not successful, but don't back off. |
497 ScheduleNextUpdate(false); | 497 UpdateFinished(false, false); |
498 return; | 498 return; |
499 } | 499 } |
500 | 500 |
501 // Format our stored chunks: | 501 // Format our stored chunks: |
502 std::string list_data; | 502 std::string list_data; |
503 bool found_malware = false; | 503 bool found_malware = false; |
504 bool found_phishing = false; | 504 bool found_phishing = false; |
505 for (size_t i = 0; i < lists.size(); ++i) { | 505 for (size_t i = 0; i < lists.size(); ++i) { |
506 list_data.append(FormatList(lists[i])); | 506 list_data.append(FormatList(lists[i])); |
507 if (lists[i].name == safe_browsing_util::kPhishingList) | 507 if (lists[i].name == safe_browsing_util::kPhishingList) |
(...skipping 15 matching lines...) Expand all Loading... |
523 | 523 |
524 GURL update_url = UpdateUrl(); | 524 GURL update_url = UpdateUrl(); |
525 request_.reset(net::URLFetcher::Create( | 525 request_.reset(net::URLFetcher::Create( |
526 url_fetcher_id_++, update_url, net::URLFetcher::POST, this)); | 526 url_fetcher_id_++, update_url, net::URLFetcher::POST, this)); |
527 request_->SetLoadFlags(net::LOAD_DISABLE_CACHE); | 527 request_->SetLoadFlags(net::LOAD_DISABLE_CACHE); |
528 request_->SetRequestContext(request_context_getter_); | 528 request_->SetRequestContext(request_context_getter_); |
529 request_->SetUploadData("text/plain", list_data); | 529 request_->SetUploadData("text/plain", list_data); |
530 request_->Start(); | 530 request_->Start(); |
531 | 531 |
532 // Begin the update request timeout. | 532 // Begin the update request timeout. |
533 update_timer_.Start(FROM_HERE, TimeDelta::FromSeconds(kSbMaxUpdateWaitSec), | 533 timeout_timer_.Start(FROM_HERE, TimeDelta::FromSeconds(kSbMaxUpdateWaitSec), |
534 this, | 534 this, |
535 &SafeBrowsingProtocolManager::UpdateResponseTimeout); | 535 &SafeBrowsingProtocolManager::UpdateResponseTimeout); |
536 } | 536 } |
537 | 537 |
538 // If we haven't heard back from the server with an update response, this method | 538 // If we haven't heard back from the server with an update response, this method |
539 // will run. Close the current update session and schedule another update. | 539 // will run. Close the current update session and schedule another update. |
540 void SafeBrowsingProtocolManager::UpdateResponseTimeout() { | 540 void SafeBrowsingProtocolManager::UpdateResponseTimeout() { |
541 DCHECK(CalledOnValidThread()); | 541 DCHECK(CalledOnValidThread()); |
542 DCHECK_EQ(request_type_, UPDATE_REQUEST); | 542 DCHECK_EQ(request_type_, UPDATE_REQUEST); |
543 request_.reset(); | 543 request_.reset(); |
544 UpdateFinished(false); | 544 UpdateFinished(false); |
545 ScheduleNextUpdate(true); | |
546 } | 545 } |
547 | 546 |
548 void SafeBrowsingProtocolManager::OnChunkInserted() { | 547 void SafeBrowsingProtocolManager::OnAddChunksComplete() { |
549 DCHECK(CalledOnValidThread()); | 548 DCHECK(CalledOnValidThread()); |
550 chunk_pending_to_write_ = false; | 549 chunk_pending_to_write_ = false; |
551 | 550 |
552 if (chunk_request_urls_.empty()) { | 551 if (chunk_request_urls_.empty()) { |
553 UMA_HISTOGRAM_LONG_TIMES("SB2.Update", Time::Now() - last_update_); | 552 UMA_HISTOGRAM_LONG_TIMES("SB2.Update", Time::Now() - last_update_); |
554 UpdateFinished(true); | 553 UpdateFinished(true); |
555 } else { | 554 } else { |
556 IssueChunkRequest(); | 555 IssueChunkRequest(); |
557 } | 556 } |
558 } | 557 } |
(...skipping 18 matching lines...) Expand all Loading... |
577 } | 576 } |
578 | 577 |
579 void SafeBrowsingProtocolManager::HandleGetHashError(const Time& now) { | 578 void SafeBrowsingProtocolManager::HandleGetHashError(const Time& now) { |
580 DCHECK(CalledOnValidThread()); | 579 DCHECK(CalledOnValidThread()); |
581 base::TimeDelta next = GetNextBackOffInterval( | 580 base::TimeDelta next = GetNextBackOffInterval( |
582 &gethash_error_count_, &gethash_back_off_mult_); | 581 &gethash_error_count_, &gethash_back_off_mult_); |
583 next_gethash_time_ = now + next; | 582 next_gethash_time_ = now + next; |
584 } | 583 } |
585 | 584 |
586 void SafeBrowsingProtocolManager::UpdateFinished(bool success) { | 585 void SafeBrowsingProtocolManager::UpdateFinished(bool success) { |
| 586 UpdateFinished(success, !success); |
| 587 } |
| 588 |
| 589 void SafeBrowsingProtocolManager::UpdateFinished(bool success, bool back_off) { |
587 DCHECK(CalledOnValidThread()); | 590 DCHECK(CalledOnValidThread()); |
588 UMA_HISTOGRAM_COUNTS("SB2.UpdateSize", update_size_); | 591 UMA_HISTOGRAM_COUNTS("SB2.UpdateSize", update_size_); |
589 update_size_ = 0; | 592 update_size_ = 0; |
590 delegate_->UpdateFinished(success); | 593 delegate_->UpdateFinished(success); |
| 594 ScheduleNextUpdate(back_off); |
591 } | 595 } |
592 | 596 |
593 GURL SafeBrowsingProtocolManager::UpdateUrl() const { | 597 GURL SafeBrowsingProtocolManager::UpdateUrl() const { |
594 std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl( | 598 std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl( |
595 url_prefix_, "downloads", client_name_, version_, additional_query_); | 599 url_prefix_, "downloads", client_name_, version_, additional_query_); |
596 return GURL(url); | 600 return GURL(url); |
597 } | 601 } |
598 | 602 |
599 GURL SafeBrowsingProtocolManager::GetHashUrl() const { | 603 GURL SafeBrowsingProtocolManager::GetHashUrl() const { |
600 std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl( | 604 std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl( |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
636 FullHashCallback callback, bool is_download) | 640 FullHashCallback callback, bool is_download) |
637 : callback(callback), | 641 : callback(callback), |
638 is_download(is_download) { | 642 is_download(is_download) { |
639 } | 643 } |
640 | 644 |
641 SafeBrowsingProtocolManager::FullHashDetails::~FullHashDetails() { | 645 SafeBrowsingProtocolManager::FullHashDetails::~FullHashDetails() { |
642 } | 646 } |
643 | 647 |
644 SafeBrowsingProtocolManagerDelegate::~SafeBrowsingProtocolManagerDelegate() { | 648 SafeBrowsingProtocolManagerDelegate::~SafeBrowsingProtocolManagerDelegate() { |
645 } | 649 } |
OLD | NEW |