OLD | NEW |
(Empty) | |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "net/reporting/reporting_service.h" |
| 6 |
| 7 #include "base/bind.h" |
| 8 #include "base/json/json_reader.h" |
| 9 #include "base/json/json_writer.h" |
| 10 #include "base/memory/ptr_util.h" |
| 11 #include "base/strings/string_number_conversions.h" |
| 12 #include "base/strings/string_util.h" |
| 13 #include "base/time/default_tick_clock.h" |
| 14 #include "net/http/http_request_info.h" |
| 15 #include "net/http/http_response_headers.h" |
| 16 #include "net/http/http_response_info.h" |
| 17 #include "net/reporting/reporting_metrics.h" |
| 18 #include "net/url_request/url_fetcher.h" |
| 19 #include "net/url_request/url_request_context_getter.h" |
| 20 |
| 21 namespace { |
| 22 const char kDefaultGroupName[] = "default"; |
| 23 |
| 24 // Per |
| 25 // https://greenbytes.de/tech/webdav/draft-reschke-http-jfv-02.html#rfc.section.
4 |
| 26 // assuming |normalized_header| is the result of completing step 1. |
| 27 std::unique_ptr<base::Value> ParseJFV( |
| 28 const std::string& normalized_header_value) { |
| 29 std::string value = "[" + normalized_header_value + "]"; |
| 30 return base::JSONReader::Read(value); |
| 31 } |
| 32 |
| 33 bool IsOriginSecure(const GURL& url) { |
| 34 return url.SchemeIsCryptographic(); |
| 35 } |
| 36 } // namespace |
| 37 |
| 38 namespace net { |
| 39 |
| 40 // static |
| 41 ReportingService::Policy ReportingService::Policy::GetDefault() { |
| 42 Policy policy; |
| 43 |
| 44 policy.endpoint_lifetime = base::TimeDelta::FromDays(7); |
| 45 policy.endpoint_backoff.num_errors_to_ignore = 0; |
| 46 policy.endpoint_backoff.initial_delay_ms = 5 * 1000; |
| 47 policy.endpoint_backoff.multiply_factor = 2.0; |
| 48 policy.endpoint_backoff.jitter_factor = 0.1; |
| 49 policy.endpoint_backoff.maximum_backoff_ms = 60 * 60 * 1000; |
| 50 policy.endpoint_backoff.entry_lifetime_ms = 7 * 24 * 60 * 60 * 1000; |
| 51 policy.endpoint_backoff.always_use_initial_delay = false; |
| 52 policy.max_endpoint_failures = 5; |
| 53 policy.max_endpoint_count = 100; |
| 54 |
| 55 policy.report_lifetime = base::TimeDelta::FromDays(2); |
| 56 policy.max_report_failures = 5; |
| 57 policy.max_report_count = 100; |
| 58 |
| 59 policy.persist_reports_across_network_changes = false; |
| 60 |
| 61 return policy; |
| 62 } |
| 63 |
| 64 ReportingService::ReportingService(const Policy& policy) |
| 65 : policy_(policy), clock_(new base::DefaultTickClock()) {} |
| 66 |
| 67 ReportingService::~ReportingService() { |
| 68 for (auto& report : reports_) |
| 69 HistogramReportInternal(REPORT_FATE_SHUTDOWN, *report); |
| 70 size_t endpoint_count = endpoints_.size(); |
| 71 for (size_t i = 0; i < endpoint_count; ++i) |
| 72 HistogramEndpoint(ENDPOINT_FATE_SHUTDOWN); |
| 73 } |
| 74 |
| 75 void ReportingService::set_uploader( |
| 76 std::unique_ptr<ReportingUploader> uploader) { |
| 77 uploader_ = std::move(uploader); |
| 78 } |
| 79 |
| 80 void ReportingService::QueueReport(std::unique_ptr<base::Value> body, |
| 81 const GURL& url, |
| 82 const GURL& origin, |
| 83 const std::string& group, |
| 84 const std::string& type) { |
| 85 auto report = base::MakeUnique<ReportingReport>(); |
| 86 report->body = std::move(body); |
| 87 report->url = url.GetAsReferrer(); |
| 88 report->origin = origin; |
| 89 report->group = group; |
| 90 report->type = type; |
| 91 report->timestamp = clock_->NowTicks(); |
| 92 report->attempts = 0; |
| 93 report->pending = false; |
| 94 reports_.push_back(std::move(report)); |
| 95 } |
| 96 |
| 97 void ReportingService::ProcessHeader(const GURL& origin, |
| 98 const std::string& header_value) { |
| 99 if (!IsOriginSecure(origin)) { |
| 100 HistogramHeader(HEADER_FATE_REJECTED_INSECURE_ORIGIN); |
| 101 return; |
| 102 } |
| 103 |
| 104 std::vector<std::string> errors; |
| 105 std::vector<EndpointTuple> tuples; |
| 106 if (!EndpointTuple::FromHeader(header_value, &tuples, &errors)) { |
| 107 HistogramHeader(HEADER_FATE_REJECTED_INVALID_JSON); |
| 108 return; |
| 109 } |
| 110 |
| 111 if (errors.empty()) |
| 112 HistogramHeader(HEADER_FATE_ACCEPTED); |
| 113 else |
| 114 HistogramHeader(HEADER_FATE_ACCEPTED_WITH_INVALID_ENDPOINT); |
| 115 |
| 116 // TODO: Plumb these out to the DevTools console somehow. |
| 117 for (const std::string& error : errors) { |
| 118 LOG(WARNING) << "Origin " << origin.spec() << " sent " |
| 119 << "Report-To header with error: " << error << ": " |
| 120 << header_value; |
| 121 } |
| 122 |
| 123 for (auto& tuple : tuples) |
| 124 ProcessEndpointTuple(origin, tuple); |
| 125 |
| 126 CollectGarbage(); |
| 127 } |
| 128 |
| 129 void ReportingService::SendReports() { |
| 130 if (!uploader_) |
| 131 return; |
| 132 |
| 133 std::map<Endpoint*, std::vector<ReportingReport*>> endpoint_reports; |
| 134 for (auto& report : reports_) { |
| 135 // If the report is already contained in another pending upload, don't |
| 136 // upload it twice. |
| 137 if (report->pending) |
| 138 continue; |
| 139 Endpoint* endpoint = FindEndpointForReport(*report); |
| 140 // If there's no available endpoint for the report, leave it for later. |
| 141 if (!endpoint) |
| 142 continue; |
| 143 // If the chosen endpoint is pending, don't start another upload; let this |
| 144 // report go in the next upload instead. |
| 145 if (endpoint->pending) |
| 146 continue; |
| 147 |
| 148 report->pending = true; |
| 149 endpoint_reports[endpoint].push_back(report.get()); |
| 150 } |
| 151 |
| 152 for (auto& pair : endpoint_reports) { |
| 153 Endpoint* endpoint = pair.first; |
| 154 const std::vector<ReportingReport*>& reports = pair.second; |
| 155 |
| 156 std::string json = SerializeReports(reports); |
| 157 |
| 158 uploader_->AttemptDelivery( |
| 159 endpoint->url, json, |
| 160 base::Bind(&ReportingService::OnDeliveryAttemptComplete, |
| 161 base::Unretained(this), |
| 162 base::MakeUnique<Delivery>(endpoint->url, reports))); |
| 163 |
| 164 endpoint->last_used = clock_->NowTicks(); |
| 165 |
| 166 for (auto& report : reports) |
| 167 ++report->attempts; |
| 168 |
| 169 HistogramDeliveryContent(reports.size(), json.length()); |
| 170 } |
| 171 } |
| 172 |
| 173 void ReportingService::set_clock_for_testing( |
| 174 std::unique_ptr<base::TickClock> clock) { |
| 175 clock_ = std::move(clock); |
| 176 } |
| 177 |
| 178 bool ReportingService::HasEndpointForTesting(const GURL& endpoint_url) { |
| 179 return GetEndpointByURL(endpoint_url); |
| 180 } |
| 181 |
| 182 bool ReportingService::HasClientForTesting(const GURL& endpoint_url, |
| 183 const GURL& origin) { |
| 184 Endpoint* endpoint = GetEndpointByURL(endpoint_url); |
| 185 if (!endpoint) |
| 186 return false; |
| 187 return endpoint->clients.count(origin) > 0; |
| 188 } |
| 189 |
| 190 int ReportingService::GetEndpointFailuresForTesting(const GURL& endpoint_url) { |
| 191 Endpoint* endpoint = GetEndpointByURL(endpoint_url); |
| 192 if (!endpoint) |
| 193 return -1; |
| 194 return endpoint->backoff.failure_count(); |
| 195 } |
| 196 |
| 197 void ReportingService::CollectGarbageForTesting() { |
| 198 CollectGarbage(); |
| 199 } |
| 200 |
| 201 ReportingService::Client::Client(const GURL& origin, |
| 202 bool subdomains, |
| 203 const std::string& group, |
| 204 base::TimeDelta ttl, |
| 205 base::TimeTicks creation) |
| 206 : origin(origin), |
| 207 subdomains(subdomains), |
| 208 group(group), |
| 209 ttl(ttl), |
| 210 creation(creation) {} |
| 211 |
| 212 ReportingService::Endpoint::Endpoint(const GURL& url, |
| 213 const BackoffEntry::Policy& backoff_policy, |
| 214 base::TickClock* clock) |
| 215 : url(url), |
| 216 backoff(&backoff_policy, clock), |
| 217 last_used(clock->NowTicks()), |
| 218 pending(false) {} |
| 219 ReportingService::Endpoint::~Endpoint() {} |
| 220 |
| 221 bool ReportingService::Endpoint::is_expired(base::TimeTicks now) const { |
| 222 for (auto& pair : clients) |
| 223 if (!pair.second.is_expired(now)) |
| 224 return false; |
| 225 return true; |
| 226 } |
| 227 |
| 228 // static |
| 229 bool ReportingService::EndpointTuple::FromDictionary( |
| 230 const base::DictionaryValue& dictionary, |
| 231 EndpointTuple* tuple_out, |
| 232 std::string* error_out) { |
| 233 if (!dictionary.HasKey("url")) { |
| 234 *error_out = "url missing"; |
| 235 return false; |
| 236 } |
| 237 std::string url_string; |
| 238 if (!dictionary.GetString("url", &url_string)) { |
| 239 *error_out = "url not a string"; |
| 240 return false; |
| 241 } |
| 242 tuple_out->url = GURL(url_string); |
| 243 |
| 244 tuple_out->group = kDefaultGroupName; |
| 245 if (dictionary.HasKey("group")) { |
| 246 if (!dictionary.GetString("group", &tuple_out->group)) { |
| 247 *error_out = "group present but not a string"; |
| 248 return false; |
| 249 } |
| 250 } |
| 251 |
| 252 tuple_out->subdomains = false; |
| 253 if (dictionary.HasKey("includeSubdomains")) { |
| 254 if (!dictionary.GetBoolean("includeSubdomains", &tuple_out->subdomains)) { |
| 255 *error_out = "includeSubdomains present but not boolean"; |
| 256 return false; |
| 257 } |
| 258 } |
| 259 |
| 260 if (!dictionary.HasKey("max-age")) { |
| 261 *error_out = "max-age missing"; |
| 262 return false; |
| 263 } |
| 264 int ttl_sec; |
| 265 if (!dictionary.GetInteger("max-age", &ttl_sec)) { |
| 266 *error_out = "max-age not an integer"; |
| 267 return false; |
| 268 } |
| 269 tuple_out->ttl = base::TimeDelta::FromSeconds(ttl_sec); |
| 270 |
| 271 return true; |
| 272 } |
| 273 |
| 274 // static |
| 275 bool ReportingService::EndpointTuple::FromHeader( |
| 276 const std::string& header, |
| 277 std::vector<ReportingService::EndpointTuple>* tuples_out, |
| 278 std::vector<std::string>* errors_out) { |
| 279 tuples_out->clear(); |
| 280 errors_out->clear(); |
| 281 |
| 282 std::unique_ptr<base::Value> value(ParseJFV(header)); |
| 283 if (!value) { |
| 284 errors_out->push_back("failed to parse JSON field value."); |
| 285 return false; |
| 286 } |
| 287 |
| 288 base::ListValue* list; |
| 289 bool was_list = value->GetAsList(&list); |
| 290 DCHECK(was_list); |
| 291 |
| 292 base::DictionaryValue* item; |
| 293 for (size_t i = 0; i < list->GetSize(); i++) { |
| 294 std::string error_prefix = "endpoint " + base::SizeTToString(i + 1) + |
| 295 " of " + base::SizeTToString(list->GetSize()) + |
| 296 ": "; |
| 297 if (!list->GetDictionary(i, &item)) { |
| 298 errors_out->push_back(error_prefix + "is not a dictionary"); |
| 299 continue; |
| 300 } |
| 301 EndpointTuple tuple; |
| 302 std::string error; |
| 303 if (!EndpointTuple::FromDictionary(*item, &tuple, &error)) { |
| 304 errors_out->push_back(error_prefix + error); |
| 305 continue; |
| 306 } |
| 307 if (!IsOriginSecure(tuple.url)) { |
| 308 errors_out->push_back(error_prefix + "url " + tuple.url.spec() + |
| 309 " is insecure"); |
| 310 continue; |
| 311 } |
| 312 if (tuple.ttl < base::TimeDelta()) { |
| 313 errors_out->push_back(error_prefix + "ttl is negative"); |
| 314 continue; |
| 315 } |
| 316 tuples_out->push_back(tuple); |
| 317 } |
| 318 return true; |
| 319 } |
| 320 |
| 321 std::string ReportingService::EndpointTuple::ToString() const { |
| 322 return "(url=" + url.spec() + ", subdomains=" + |
| 323 (subdomains ? "true" : "false") + ", ttl=" + |
| 324 base::Int64ToString(ttl.InSeconds()) + "s" + ", group=" + group + ")"; |
| 325 } |
| 326 |
| 327 ReportingService::Delivery::Delivery( |
| 328 const GURL& endpoint_url, |
| 329 const std::vector<ReportingReport*>& reports) |
| 330 : endpoint_url(endpoint_url), reports(reports) {} |
| 331 |
| 332 ReportingService::Delivery::~Delivery() {} |
| 333 |
| 334 void ReportingService::ProcessEndpointTuple(const GURL& origin, |
| 335 const EndpointTuple& tuple) { |
| 336 Endpoint* endpoint = GetEndpointByURL(tuple.url); |
| 337 |
| 338 bool endpoint_exists = endpoint; |
| 339 bool client_exists = endpoint && endpoint->clients.count(origin) > 0; |
| 340 |
| 341 HistogramHeaderEndpointInternal(endpoint_exists, client_exists, tuple.ttl); |
| 342 |
| 343 if (client_exists) |
| 344 endpoint->clients.erase(origin); |
| 345 |
| 346 if (tuple.ttl <= base::TimeDelta()) |
| 347 return; |
| 348 |
| 349 if (!endpoint_exists) { |
| 350 endpoint = new Endpoint(tuple.url, policy_.endpoint_backoff, clock_.get()); |
| 351 endpoints_.insert(std::make_pair(tuple.url, base::WrapUnique(endpoint))); |
| 352 } |
| 353 |
| 354 if (!client_exists) |
| 355 HistogramClient(tuple.ttl); |
| 356 |
| 357 Client client(origin, tuple.subdomains, tuple.group, tuple.ttl, |
| 358 clock_->NowTicks()); |
| 359 endpoint->clients.insert(std::make_pair(origin, client)); |
| 360 } |
| 361 |
| 362 void ReportingService::OnDeliveryAttemptComplete( |
| 363 const std::unique_ptr<Delivery>& delivery, |
| 364 ReportingUploader::Outcome outcome) { |
| 365 // Note: HistogramDeliveryOutcome is called from within the uploader since it |
| 366 // has access to the net error code and HTTP response. |
| 367 |
| 368 for (auto report : delivery->reports) { |
| 369 DCHECK(report->pending); |
| 370 report->pending = false; |
| 371 } |
| 372 |
| 373 Endpoint* endpoint = GetEndpointByURL(delivery->endpoint_url); |
| 374 if (endpoint) { |
| 375 endpoint->backoff.InformOfRequest(outcome == ReportingUploader::SUCCESS); |
| 376 endpoint->pending = false; |
| 377 } |
| 378 |
| 379 switch (outcome) { |
| 380 case ReportingUploader::SUCCESS: |
| 381 for (auto report : delivery->reports) { |
| 382 HistogramReportInternal(REPORT_FATE_DELIVERED, *report); |
| 383 DequeueReport(report); |
| 384 } |
| 385 break; |
| 386 case ReportingUploader::FAILURE: |
| 387 // Reports have been marked not-pending and can be retried later. |
| 388 // BackoffEntry has been informed of failure. |
| 389 break; |
| 390 case ReportingUploader::REMOVE_ENDPOINT: |
| 391 // Note: This is not specified, but seems the obvious intention. |
| 392 if (endpoint) { |
| 393 HistogramEndpoint(ENDPOINT_FATE_REQUESTED_REMOVAL); |
| 394 endpoints_.erase(delivery->endpoint_url); |
| 395 } |
| 396 break; |
| 397 } |
| 398 |
| 399 CollectGarbage(); |
| 400 } |
| 401 |
| 402 void ReportingService::CollectGarbage() { |
| 403 base::TimeTicks now = clock_->NowTicks(); |
| 404 |
| 405 { |
| 406 std::vector<ReportVector::iterator> to_erase; |
| 407 for (auto it = reports_.begin(); it != reports_.end(); ++it) { |
| 408 ReportingReport* report = it->get(); |
| 409 if (report->pending) |
| 410 continue; |
| 411 if (policy_.max_report_failures > 0 && |
| 412 report->attempts >= policy_.max_report_failures) { |
| 413 HistogramReportInternal(REPORT_FATE_FAILED, *report); |
| 414 to_erase.push_back(it); |
| 415 } else if (!policy_.report_lifetime.is_zero() && |
| 416 now - report->timestamp > policy_.report_lifetime) { |
| 417 HistogramReportInternal(REPORT_FATE_EXPIRED, *report); |
| 418 to_erase.push_back(it); |
| 419 } |
| 420 } |
| 421 |
| 422 for (auto it = reports_.begin(); |
| 423 it != reports_.end() && |
| 424 reports_.size() - to_erase.size() > policy_.max_report_count; |
| 425 ++it) { |
| 426 if (it->get()->pending) |
| 427 continue; |
| 428 |
| 429 HistogramReportInternal(REPORT_FATE_EVICTED, *it->get()); |
| 430 to_erase.push_back(it); |
| 431 } |
| 432 |
| 433 for (auto it : to_erase) |
| 434 reports_.erase(it); |
| 435 } |
| 436 |
| 437 { |
| 438 std::vector<EndpointMap::iterator> to_erase; |
| 439 for (auto it = endpoints_.begin(); it != endpoints_.end(); ++it) { |
| 440 Endpoint* endpoint = it->second.get(); |
| 441 if (endpoint->pending) |
| 442 continue; |
| 443 if (endpoint->is_expired(now)) { |
| 444 HistogramEndpoint(ENDPOINT_FATE_EXPIRED); |
| 445 to_erase.push_back(it); |
| 446 } else if (!policy_.endpoint_lifetime.is_zero() && |
| 447 now - endpoint->last_used > policy_.endpoint_lifetime) { |
| 448 HistogramEndpoint(ENDPOINT_FATE_UNUSED); |
| 449 to_erase.push_back(it); |
| 450 // Don't remove failed endpoints until the BackoffEntry okays it, to |
| 451 // avoid |
| 452 // hammering failing endpoints by removing and re-adding them |
| 453 // constantly. |
| 454 } else if (policy_.max_endpoint_failures >= 0 && |
| 455 endpoint->backoff.CanDiscard() && |
| 456 endpoint->backoff.failure_count() > |
| 457 policy_.max_endpoint_failures) { |
| 458 HistogramEndpoint(ENDPOINT_FATE_FAILED); |
| 459 to_erase.push_back(it); |
| 460 } |
| 461 } |
| 462 |
| 463 while (endpoints_.size() - to_erase.size() > policy_.max_endpoint_count) { |
| 464 auto oldest_it = endpoints_.end(); |
| 465 |
| 466 for (auto it = endpoints_.begin(); it != endpoints_.end(); ++it) { |
| 467 if (it->second->pending) |
| 468 continue; |
| 469 if (oldest_it == endpoints_.end() || |
| 470 it->second->last_used < oldest_it->second->last_used) { |
| 471 oldest_it = it; |
| 472 } |
| 473 } |
| 474 |
| 475 if (oldest_it == endpoints_.end()) |
| 476 break; |
| 477 |
| 478 HistogramEndpoint(ENDPOINT_FATE_EVICTED); |
| 479 to_erase.push_back(oldest_it); |
| 480 |
| 481 // Gross kludge: Keep us from picking this endpoint again. |
| 482 oldest_it->second->pending = true; |
| 483 } |
| 484 |
| 485 for (auto it : to_erase) |
| 486 endpoints_.erase(it); |
| 487 } |
| 488 } |
| 489 |
| 490 ReportingService::Endpoint* ReportingService::FindEndpointForReport( |
| 491 const ReportingReport& report) { |
| 492 // TODO: This is O(count of all endpoints, regardless of client origins). |
| 493 // TODO: The spec doesn't prioritize *.bar.foo.com over *.foo.com when |
| 494 // choosing which endpoint to upload a report for baz.bar.foo.com to. |
| 495 // That seems wrong, but we need clarification on the spec end. |
| 496 for (auto& pair : endpoints_) { |
| 497 Endpoint* endpoint = pair.second.get(); |
| 498 if (endpoint->is_expired(clock_->NowTicks()) || |
| 499 endpoint->backoff.ShouldRejectRequest() || |
| 500 !DoesEndpointMatchReport(*endpoint, report)) { |
| 501 continue; |
| 502 } |
| 503 return endpoint; |
| 504 } |
| 505 return nullptr; |
| 506 } |
| 507 |
| 508 bool ReportingService::DoesEndpointMatchReport(const Endpoint& endpoint, |
| 509 const ReportingReport& report) { |
| 510 for (auto& pair : endpoint.clients) { |
| 511 const Client& client = pair.second; |
| 512 if (client.is_expired(clock_->NowTicks())) |
| 513 continue; |
| 514 if (!base::EqualsCaseInsensitiveASCII(client.group, report.group)) |
| 515 continue; |
| 516 if (client.origin == report.origin) |
| 517 return true; |
| 518 if (client.subdomains && report.origin.DomainIs(client.origin.host_piece())) |
| 519 return true; |
| 520 } |
| 521 return false; |
| 522 } |
| 523 |
| 524 std::string ReportingService::SerializeReports( |
| 525 const std::vector<ReportingReport*>& reports) { |
| 526 base::ListValue collection; |
| 527 for (auto& report : reports) { |
| 528 std::unique_ptr<base::DictionaryValue> data(new base::DictionaryValue()); |
| 529 data->SetInteger("age", |
| 530 (clock_->NowTicks() - report->timestamp).InMilliseconds()); |
| 531 data->SetString("type", report->type); |
| 532 data->SetString("url", report->url.spec()); |
| 533 data->Set("report", report->body->DeepCopy()); |
| 534 collection.Append(std::move(data)); |
| 535 } |
| 536 |
| 537 std::string json = ""; |
| 538 bool written = base::JSONWriter::Write(collection, &json); |
| 539 DCHECK(written); |
| 540 return json; |
| 541 } |
| 542 |
| 543 ReportingService::Endpoint* ReportingService::GetEndpointByURL( |
| 544 const GURL& url) { |
| 545 auto it = endpoints_.find(url); |
| 546 if (it == endpoints_.end()) |
| 547 return nullptr; |
| 548 return it->second.get(); |
| 549 } |
| 550 |
| 551 void ReportingService::DequeueReport(ReportingReport* report) { |
| 552 // TODO: This is O(N). |
| 553 for (auto it = reports_.begin(); it != reports_.end(); ++it) { |
| 554 if (it->get() == report) { |
| 555 reports_.erase(it); |
| 556 return; |
| 557 } |
| 558 } |
| 559 } |
| 560 |
| 561 void ReportingService::HistogramHeaderEndpointInternal( |
| 562 bool endpoint_exists, |
| 563 bool client_exists, |
| 564 base::TimeDelta ttl) const { |
| 565 HeaderEndpointFate fate; |
| 566 if (ttl > base::TimeDelta()) { |
| 567 if (client_exists) |
| 568 fate = HEADER_ENDPOINT_FATE_SET_CLIENT_UPDATED; |
| 569 else if (endpoint_exists) |
| 570 fate = HEADER_ENDPOINT_FATE_SET_CLIENT_CREATED; |
| 571 else |
| 572 fate = HEADER_ENDPOINT_FATE_SET_ENDPOINT_CREATED; |
| 573 } else { |
| 574 if (client_exists) |
| 575 fate = HEADER_ENDPOINT_FATE_CLEAR_CLIENT_REMOVED; |
| 576 else if (endpoint_exists) |
| 577 fate = HEADER_ENDPOINT_FATE_CLEAR_NO_CLIENT; |
| 578 else |
| 579 fate = HEADER_ENDPOINT_FATE_CLEAR_NO_ENDPOINT; |
| 580 } |
| 581 HistogramHeaderEndpoint(fate, ttl); |
| 582 } |
| 583 |
| 584 void ReportingService::HistogramReportInternal( |
| 585 ReportFate fate, |
| 586 const ReportingReport& report) const { |
| 587 HistogramReport(fate, clock_->NowTicks() - report.timestamp, report.attempts); |
| 588 } |
| 589 |
| 590 } // namespace net |
OLD | NEW |