Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "chrome/browser/safe_browsing/protocol_manager.h" | 5 #include "chrome/browser/safe_browsing/protocol_manager.h" |
| 6 | 6 |
| 7 #include "base/base64.h" | 7 #include "base/base64.h" |
| 8 #include "base/environment.h" | 8 #include "base/environment.h" |
| 9 #include "base/logging.h" | 9 #include "base/logging.h" |
| 10 #include "base/metrics/histogram_macros.h" | 10 #include "base/metrics/histogram_macros.h" |
| (...skipping 242 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 253 } | 253 } |
| 254 | 254 |
| 255 // Serialize and Base64 encode. | 255 // Serialize and Base64 encode. |
| 256 std::string req_data, req_base64; | 256 std::string req_data, req_base64; |
| 257 req.SerializeToString(&req_data); | 257 req.SerializeToString(&req_data); |
| 258 base::Base64Encode(req_data, &req_base64); | 258 base::Base64Encode(req_data, &req_base64); |
| 259 | 259 |
| 260 return req_base64; | 260 return req_base64; |
| 261 } | 261 } |
| 262 | 262 |
| 263 bool SafeBrowsingProtocolManager::ParseV4HashResponse( | |
| 264 const std::string& data_base64, | |
| 265 std::vector<SBFullHashResult>* full_hashes, | |
| 266 base::TimeDelta* cache_lifetime) { | |
| 267 std::string data; | |
| 268 base::Base64Decode(data_base64, &data); | |
| 269 FindFullHashesResponse response; | |
| 270 | |
| 271 if (!response.ParseFromString(data)) | |
| 272 return false; | |
| 273 | |
| 274 if (response.has_negative_cache_duration()) { | |
| 275 // Seconds resolution is good enough so we ignore the nanos field. | |
| 276 *cache_lifetime = base::TimeDelta::FromSeconds( | |
| 277 response.negative_cache_duration().seconds()); | |
| 278 } | |
| 279 | |
| 280 // Loop over the threat matches and fill in full_hashes. | |
| 281 for (const ThreatMatch& match : response.matches()) { | |
| 282 // Make sure the platform and threat entry type match. | |
| 283 if (!(match.has_threat_entry_type() && | |
| 284 match.threat_entry_type() == URL_EXPRESSION && | |
| 285 match.has_platform_type() && | |
| 286 match.platform_type() == CHROME_PLATFORM && | |
|
awoz
2015/12/30 18:23:13
If you're making this generic, you won't want to f
kcarattini
2016/01/04 00:52:01
Done.
| |
| 287 match.has_threat())) { | |
| 288 continue; | |
| 289 } | |
| 290 | |
| 291 // Fill in the full hash. | |
| 292 SBFullHashResult result; | |
| 293 result.hash = StringToSBFullHash(match.threat().hash()); | |
| 294 | |
| 295 if (match.has_cache_duration()) { | |
| 296 // Seconds resolution is good enough so we ignore the nanos field. | |
| 297 result.cache_duration = base::TimeDelta::FromSeconds( | |
| 298 match.cache_duration().seconds()); | |
| 299 } | |
| 300 | |
| 301 // Different threat types will handle the metadata differently. | |
| 302 if (match.has_threat_type() && match.threat_type() == API_ABUSE && | |
| 303 match.has_threat_entry_metadata()) { | |
| 304 // For API Abuse, store a csv of the returned permissions. | |
| 305 for (const ThreatEntryMetadata::MetadataEntry& m : | |
| 306 match.threat_entry_metadata().entries()) { | |
| 307 if (m.key() == "permission") { | |
| 308 result.metadata += m.value() + ","; | |
|
awoz
2015/12/30 18:23:13
Do you care about having a trailing comma?
kcarattini
2016/01/04 00:52:01
No, I imagine I'll parse this to a set and check f
| |
| 309 } | |
| 310 } | |
| 311 } | |
| 312 | |
| 313 full_hashes->push_back(result); | |
| 314 } | |
| 315 return true; | |
| 316 } | |
| 317 | |
| 263 void SafeBrowsingProtocolManager::GetV4FullHashes( | 318 void SafeBrowsingProtocolManager::GetV4FullHashes( |
| 264 const std::vector<SBPrefix>& prefixes, | 319 const std::vector<SBPrefix>& prefixes, |
| 265 ThreatType threat_type, | 320 ThreatType threat_type, |
| 266 FullHashCallback callback) { | 321 FullHashCallback callback) { |
| 267 DCHECK(CalledOnValidThread()); | 322 DCHECK(CalledOnValidThread()); |
| 268 // TODO(kcarattini): Implement backoff behavior. | 323 // TODO(kcarattini): Implement backoff behavior. |
| 269 | 324 |
| 270 std::string req_base64 = GetV4HashRequest(prefixes, threat_type); | 325 std::string req_base64 = GetV4HashRequest(prefixes, threat_type); |
| 271 GURL gethash_url = GetV4HashUrl(req_base64); | 326 GURL gethash_url = GetV4HashUrl(req_base64); |
| 272 | 327 |
| 273 net::URLFetcher* fetcher = | 328 net::URLFetcher* fetcher = |
| 274 net::URLFetcher::Create(url_fetcher_id_++, gethash_url, | 329 net::URLFetcher::Create(url_fetcher_id_++, gethash_url, |
| 275 net::URLFetcher::GET, this) | 330 net::URLFetcher::GET, this) |
| 276 .release(); | 331 .release(); |
| 277 // TODO(kcarattini): Implement a new response processor. | |
| 278 v4_hash_requests_[fetcher] = FullHashDetails(callback, | 332 v4_hash_requests_[fetcher] = FullHashDetails(callback, |
| 279 false /* is_download */); | 333 false /* is_download */); |
| 280 | 334 |
| 281 fetcher->SetLoadFlags(net::LOAD_DISABLE_CACHE); | 335 fetcher->SetLoadFlags(net::LOAD_DISABLE_CACHE); |
| 282 fetcher->SetRequestContext(request_context_getter_.get()); | 336 fetcher->SetRequestContext(request_context_getter_.get()); |
| 283 fetcher->Start(); | 337 fetcher->Start(); |
| 284 } | 338 } |
| 285 | 339 |
| 286 void SafeBrowsingProtocolManager::GetFullHashesWithApis( | 340 void SafeBrowsingProtocolManager::GetFullHashesWithApis( |
| 287 const std::vector<SBPrefix>& prefixes, | 341 const std::vector<SBPrefix>& prefixes, |
| (...skipping 18 matching lines...) Expand all Loading... | |
| 306 // update back off. For now, a failed parse of the chunk means we | 360 // update back off. For now, a failed parse of the chunk means we |
| 307 // drop it. This isn't so bad because the next UPDATE_REQUEST we | 361 // drop it. This isn't so bad because the next UPDATE_REQUEST we |
| 308 // do will report all the chunks we have. If that chunk is still | 362 // do will report all the chunks we have. If that chunk is still |
| 309 // required, the SafeBrowsing servers will tell us to get it again. | 363 // required, the SafeBrowsing servers will tell us to get it again. |
| 310 void SafeBrowsingProtocolManager::OnURLFetchComplete( | 364 void SafeBrowsingProtocolManager::OnURLFetchComplete( |
| 311 const net::URLFetcher* source) { | 365 const net::URLFetcher* source) { |
| 312 DCHECK(CalledOnValidThread()); | 366 DCHECK(CalledOnValidThread()); |
| 313 scoped_ptr<const net::URLFetcher> fetcher; | 367 scoped_ptr<const net::URLFetcher> fetcher; |
| 314 | 368 |
| 315 HashRequests::iterator it = hash_requests_.find(source); | 369 HashRequests::iterator it = hash_requests_.find(source); |
| 370 HashRequests::iterator v4_it = v4_hash_requests_.find(source); | |
| 316 int response_code = source->GetResponseCode(); | 371 int response_code = source->GetResponseCode(); |
| 317 net::URLRequestStatus status = source->GetStatus(); | 372 net::URLRequestStatus status = source->GetStatus(); |
| 318 RecordHttpResponseOrErrorCode(kUmaHashResponseMetricName, status, | 373 RecordHttpResponseOrErrorCode(kUmaHashResponseMetricName, status, |
| 319 response_code); | 374 response_code); |
| 320 if (it != hash_requests_.end()) { | 375 if (it != hash_requests_.end()) { |
| 321 // GetHash response. | 376 // GetHash response. |
| 322 fetcher.reset(it->first); | 377 fetcher.reset(it->first); |
| 323 const FullHashDetails& details = it->second; | 378 const FullHashDetails& details = it->second; |
| 324 std::vector<SBFullHashResult> full_hashes; | 379 std::vector<SBFullHashResult> full_hashes; |
| 325 base::TimeDelta cache_lifetime; | 380 base::TimeDelta cache_lifetime; |
| (...skipping 29 matching lines...) Expand all Loading... | |
| 355 << " failed with error: " << response_code; | 410 << " failed with error: " << response_code; |
| 356 } | 411 } |
| 357 } | 412 } |
| 358 | 413 |
| 359 // Invoke the callback with full_hashes, even if there was a parse error or | 414 // Invoke the callback with full_hashes, even if there was a parse error or |
| 360 // an error response code (in which case full_hashes will be empty). The | 415 // an error response code (in which case full_hashes will be empty). The |
| 361 // caller can't be blocked indefinitely. | 416 // caller can't be blocked indefinitely. |
| 362 details.callback.Run(full_hashes, cache_lifetime); | 417 details.callback.Run(full_hashes, cache_lifetime); |
| 363 | 418 |
| 364 hash_requests_.erase(it); | 419 hash_requests_.erase(it); |
| 420 } else if (v4_it != v4_hash_requests_.end()) { | |
| 421 // V4 FindFullHashes response. | |
| 422 fetcher.reset(v4_it->first); | |
| 423 const FullHashDetails& details = v4_it->second; | |
| 424 std::vector<SBFullHashResult> full_hashes; | |
| 425 base::TimeDelta cache_lifetime; | |
| 426 if (status.is_success() && response_code == net::HTTP_OK) { | |
| 427 // TODO(kcarattini): Add UMA reporting. | |
| 428 // TODO(kcarattini): Implement backoff and minimum waiting duration | |
| 429 // compliance. | |
| 430 std::string data_base64; | |
|
awoz
2015/12/30 18:23:13
The protocol returns a serialized protobuf in bina
kcarattini
2016/01/04 00:52:01
Done.
| |
| 431 source->GetResponseAsString(&data_base64); | |
| 432 if (!ParseV4HashResponse(data_base64, &full_hashes, &cache_lifetime)) { | |
| 433 full_hashes.clear(); | |
| 434 // TODO(kcarattini): Add UMA reporting. | |
| 435 } | |
| 436 } else { | |
| 437 // TODO(kcarattini): Handle error by setting backoff interval. | |
| 438 // TODO(kcarattini): Add UMA reporting. | |
| 439 if (status.status() == net::URLRequestStatus::FAILED) { | |
| 440 DVLOG(1) << "SafeBrowsing GetEncodedFullHashes request for: " << | |
| 441 source->GetURL() << " failed with error: " << status.error(); | |
| 442 } else { | |
| 443 DVLOG(1) << "SafeBrowsing GetEncodedFullHashes request for: " << | |
| 444 source->GetURL() << " failed with error: " << response_code; | |
| 445 } | |
| 446 } | |
| 447 | |
| 448 // Invoke the callback with full_hashes, even if there was a parse error or | |
| 449 // an error response code (in which case full_hashes will be empty). The | |
| 450 // caller can't be blocked indefinitely. | |
| 451 details.callback.Run(full_hashes, cache_lifetime); | |
| 452 | |
| 453 v4_hash_requests_.erase(it); | |
| 365 } else { | 454 } else { |
| 366 // Update or chunk response. | 455 // Update or chunk response. |
| 367 fetcher.reset(request_.release()); | 456 fetcher.reset(request_.release()); |
| 368 | 457 |
| 369 if (request_type_ == UPDATE_REQUEST || | 458 if (request_type_ == UPDATE_REQUEST || |
| 370 request_type_ == BACKUP_UPDATE_REQUEST) { | 459 request_type_ == BACKUP_UPDATE_REQUEST) { |
| 371 if (!fetcher.get()) { | 460 if (!fetcher.get()) { |
| 372 // We've timed out waiting for an update response, so we've cancelled | 461 // We've timed out waiting for an update response, so we've cancelled |
| 373 // the update request and scheduled a new one. Ignore this response. | 462 // the update request and scheduled a new one. Ignore this response. |
| 374 return; | 463 return; |
| (...skipping 486 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 861 SafeBrowsingProtocolManager::FullHashDetails::FullHashDetails( | 950 SafeBrowsingProtocolManager::FullHashDetails::FullHashDetails( |
| 862 FullHashCallback callback, | 951 FullHashCallback callback, |
| 863 bool is_download) | 952 bool is_download) |
| 864 : callback(callback), is_download(is_download) {} | 953 : callback(callback), is_download(is_download) {} |
| 865 | 954 |
| 866 SafeBrowsingProtocolManager::FullHashDetails::~FullHashDetails() {} | 955 SafeBrowsingProtocolManager::FullHashDetails::~FullHashDetails() {} |
| 867 | 956 |
| 868 SafeBrowsingProtocolManagerDelegate::~SafeBrowsingProtocolManagerDelegate() {} | 957 SafeBrowsingProtocolManagerDelegate::~SafeBrowsingProtocolManagerDelegate() {} |
| 869 | 958 |
| 870 } // namespace safe_browsing | 959 } // namespace safe_browsing |
| OLD | NEW |