Chromium Code Reviews| Index: net/cert_net/cert_net_fetcher_impl.cc |
| diff --git a/net/cert_net/cert_net_fetcher_impl.cc b/net/cert_net/cert_net_fetcher_impl.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..9190d7fae2fb40aa913f6f7914b49c88dc5941e7 |
| --- /dev/null |
| +++ b/net/cert_net/cert_net_fetcher_impl.cc |
| @@ -0,0 +1,524 @@ |
| +// Copyright 2015 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "net/cert_net/cert_net_fetcher_impl.h" |
| + |
| +#include "base/logging.h" |
| +#include "base/numerics/safe_math.h" |
| +#include "base/stl_util.h" |
| +#include "base/timer/timer.h" |
| +#include "net/base/load_flags.h" |
| +#include "net/url_request/redirect_info.h" |
| +#include "net/url_request/url_request_context.h" |
| + |
| +// TODO(eroman): Add support for POST parameters. |
| +// TODO(eroman): Add controls for bypassing the cache. |
| +// TODO(eroman): Add a maximum number of in-flight jobs/requests. |
| + |
| +namespace net { |
| + |
| +namespace { |
| + |
| +// The error returned when the response body exceeded the size limit. |
| +const Error kNetErrorResponseTooLarge = ERR_FILE_TOO_BIG; |
| + |
| +// The error returned when the URL could not be fetched because it was not an |
| +// allowed scheme (http). |
| +const Error kNetErrorNotHttpUrl = ERR_DISALLOWED_URL_SCHEME; |
| + |
| +// The error returned when the URL fetch did not complete in time. |
| +const Error kNetErrorTimedOut = ERR_TIMED_OUT; |
| + |
| +// The error returned when the response was HTTP however it did not have a |
| +// status of 200/OK. |
| +// TODO(eroman): Use a more specific error code. |
| +const Error kNetErrorNot200HttpResponse = ERR_FAILED; |
|
davidben
2015/03/27 23:34:32
Just using ERR_TIMED_OUT and the like where you us
eroman
2015/04/04 21:57:15
Done.
|
| + |
| +// The size of the buffer used for reading the response body of the URLRequest. |
| +const int kReadBufferSizeInBytes = 4096; |
| + |
| +// The maximum size in bytes for the response body when fetching a CRL. |
| +const int kMaxResponseSizeInBytesForCrl = 5 * 1024 * 1024; |
| + |
| +// The maximum size in bytes for the response body when fetching an AIA URL |
| +// (caIssuers/OCSP). |
| +const int kMaxResponseSizeInBytesForAia = 64 * 1024; |
| + |
| +// The default timeout in seconds for fetch requests. |
| +const int kTimeoutSeconds = 15; |
| + |
| +// Policy for which URLs are allowed to be fetched. This is called both for the |
| +// initial URL and for each redirect. Returns OK on success or a net error |
| +// code on failure. |
| +Error CanFetchUrl(const GURL& url) { |
| + if (!url.SchemeIs("http")) |
| + return kNetErrorNotHttpUrl; |
| + return OK; |
| +} |
| + |
| +base::TimeDelta GetTimeout(int timeout_milliseconds) { |
| + if (timeout_milliseconds == CertNetFetcher::DEFAULT) |
| + return base::TimeDelta::FromSeconds(kTimeoutSeconds); |
| + return base::TimeDelta::FromMilliseconds(timeout_milliseconds); |
| +} |
| + |
| +size_t GetMaxResponseBytes(int max_response_bytes, |
| + size_t default_max_response_bytes) { |
| + if (max_response_bytes == CertNetFetcher::DEFAULT) |
| + return default_max_response_bytes; |
| + |
| + // Ensure that the specified limit is not negative, and cannot result in an |
| + // overflow while reading. |
| + base::CheckedNumeric<size_t> check(max_response_bytes); |
| + check += kReadBufferSizeInBytes; |
| + DCHECK(check.IsValid()); |
| + |
| + return max_response_bytes; |
| +} |
| + |
| +enum HttpMethod { |
| + HTTP_METHOD_GET, |
| + HTTP_METHOD_POST, |
| +}; |
| + |
| +} // namespace |
| + |
| +// CertNetFetcherImpl::Request tracks an outstanding call to Fetch(). |
| +struct CertNetFetcherImpl::Request { |
| + Request(FetchCallback callback, Job* job) : callback(callback), job(job) {} |
| + |
| + // The callback to invoke when the request has completed. |
| + FetchCallback callback; |
| + |
| + // A non-owned pointer to the job that is executing the request (and in effect |
| + // owns |this|). |
| + Job* job; |
| + |
| + private: |
| + DISALLOW_COPY_AND_ASSIGN(Request); |
| +}; |
| + |
| +struct CertNetFetcherImpl::RequestParams { |
| + RequestParams(); |
| + |
| + bool operator<(const RequestParams& other) const; |
| + |
| + GURL url; |
| + HttpMethod http_method; |
| + size_t max_response_bytes; |
| + |
| + // If set to a value <= 0 then means "no timeout". |
| + base::TimeDelta timeout; |
| + |
| + // IMPORTANT: When adding fields to this structure, update operator<(). |
| + |
| + private: |
| + DISALLOW_COPY_AND_ASSIGN(RequestParams); |
| +}; |
| + |
| +CertNetFetcherImpl::RequestParams::RequestParams() |
| + : http_method(HTTP_METHOD_GET), max_response_bytes(0) { |
| +} |
| + |
| +bool CertNetFetcherImpl::RequestParams::operator<( |
| + const RequestParams& other) const { |
| + if (url != other.url) |
| + return url < other.url; |
| + if (http_method != other.http_method) |
| + return http_method < other.http_method; |
| + if (max_response_bytes != other.max_response_bytes) |
| + return max_response_bytes < other.max_response_bytes; |
| + return timeout < other.timeout; |
|
davidben
2015/03/27 23:34:32
Aside: Comparing timeouts here is kinda weird. Tho
eroman
2015/04/04 21:57:15
Yeah agreed. There are some different strategies f
|
| +} |
| + |
| +// CertNetFetcherImpl::Job tracks an outstanding URLRequest as well as all of |
| +// the pending requests for it. |
| +class CertNetFetcherImpl::Job : public URLRequest::Delegate { |
| + public: |
| + Job(scoped_ptr<RequestParams> request_params, CertNetFetcherImpl* parent); |
| + ~Job() override; |
| + |
| + // Cancels the job and all requests attached to it. No callbacks will be |
| + // invoked following cancellation. |
| + void Cancel(); |
| + |
| + const RequestParams& request_params() const { return *request_params_; } |
| + |
| + // Attaches a request to the job. When the job completes it will invoke |
| + // |callback|. |
| + RequestId AddRequest(const FetchCallback& callback); |
| + |
| + // Removes |request| from the job and deletes it. |
| + void CancelRequest(Request* request); |
| + |
| + // Creates and starts a URLRequest for the job. After the request has |
| + // completed, OnJobCompleted() will be invoked and all the registered requests |
| + // notified of completion. |
| + void StartURLRequest(URLRequestContext* context); |
| + |
| + private: |
| + // The pointers in RequestList are owned by the Job. |
| + using RequestList = std::vector<Request*>; |
|
davidben
2015/03/27 23:34:31
This might change if you make the consumer own the
eroman
2015/04/04 21:57:15
No longer applicable, as the request is no longer
|
| + |
| + // Implementation of URLRequest::Delegate |
| + void OnReceivedRedirect(URLRequest* request, |
| + const RedirectInfo& redirect_info, |
| + bool* defer_redirect) override; |
| + void OnResponseStarted(URLRequest* request) override; |
| + void OnReadCompleted(URLRequest* request, int bytes_read) override; |
| + |
| + // Clears the URLRequest and timer. Helper for doing work common to |
| + // cancellation and job completion. |
| + void Stop(); |
| + |
| + // Reads as much data as available from the |request|. |
|
davidben
2015/03/27 23:34:32
Nit: from the -> from
eroman
2015/04/04 21:57:15
Done.
|
| + void ReadBody(URLRequest* request); |
| + |
| + // Helper to copy the partial bytes read from the read IOBuffer to an |
| + // aggregated buffer. |
| + bool ConsumeBytesRead(URLRequest* request, int num_bytes); |
| + |
| + // Called once the job has exceeded its deadline. |
| + void OnTimeout(); |
| + |
| + // Called when the URLRequest has completed (either success or failure). |
| + void OnUrlRequestCompleted(URLRequest* request); |
| + |
| + // Called when the Job has completed. The job may finish in response to a |
| + // timeout, an invalid URL, or the URLRequest completing. By the time this |
| + // method is called, the response variables have been assigned |
| + // (result_net_error_code_ et al). |
|
davidben
2015/03/31 20:52:23
Nit: result_net_error_code_ -> result_net_error_
eroman
2015/04/04 21:57:15
Done.
|
| + void OnJobCompleted(); |
| + |
| + // The requests attached to this job. |
| + RequestList requests_; |
| + |
| + // The input parameters for starting a URLRequest. |
| + scoped_ptr<RequestParams> request_params_; |
| + |
| + // The URLRequest response information. |
| + std::vector<uint8_t> response_body_; |
| + Error result_net_error_; |
| + |
| + scoped_ptr<URLRequest> url_request_; |
| + scoped_refptr<IOBuffer> read_buffer_; |
| + |
| + // Used to timeout the job when the URLRequest takes too long. This timer is |
| + // also used for notifying a failure to start the URLRequest. |
| + base::OneShotTimer<Job> timer_; |
| + |
| + // Non-owned pointer to the CertNetFetcherImpl that created this job. |
| + CertNetFetcherImpl* parent_; |
| + |
| + DISALLOW_COPY_AND_ASSIGN(Job); |
| +}; |
| + |
| +CertNetFetcherImpl::Job::Job(scoped_ptr<RequestParams> request_params, |
| + CertNetFetcherImpl* parent) |
| + : request_params_(request_params.Pass()), |
| + result_net_error_(ERR_IO_PENDING), |
| + parent_(parent) { |
| +} |
| + |
| +CertNetFetcherImpl::Job::~Job() { |
| + Cancel(); |
| +} |
| + |
| +void CertNetFetcherImpl::Job::Cancel() { |
| + parent_ = nullptr; |
| + STLDeleteElements(&requests_); |
| + Stop(); |
| +} |
| + |
| +CertNetFetcher::RequestId CertNetFetcherImpl::Job::AddRequest( |
| + const FetchCallback& callback) { |
| + requests_.push_back(new Request(callback, this)); |
| + return requests_.back(); |
| +} |
| + |
| +void CertNetFetcherImpl::Job::CancelRequest(Request* request) { |
| + scoped_ptr<Job> delete_this; |
| + |
| + RequestList::iterator it = |
| + std::find(requests_.begin(), requests_.end(), request); |
| + CHECK(it != requests_.end()); |
| + requests_.erase(it); |
| + delete request; |
| + |
| + // If there are no longer any requests attached to the job then |
| + // cancel and delete it. |
| + if (requests_.empty() && parent_) |
| + delete_this = parent_->RemoveJob(this); |
|
davidben
2015/03/27 23:34:31
Why not just "parent_->RemoveJob(this)"? Or moving
eroman
2015/04/04 21:57:14
I wanted to ensure that deletion of |this| was the
|
| +} |
| + |
| +void CertNetFetcherImpl::Job::StartURLRequest(URLRequestContext* context) { |
| + Error error = CanFetchUrl(request_params_->url); |
| + if (error != OK) { |
| + result_net_error_ = error; |
| + // The CertNetFetcher's API contract is that requests always complete |
| + // asynchronously. Use the timer class so the task is easily cancelled. |
| + timer_.Start(FROM_HERE, base::TimeDelta(), this, &Job::OnJobCompleted); |
| + return; |
| + } |
| + |
| + // Start the URLRequest. |
| + read_buffer_ = new IOBuffer(kReadBufferSizeInBytes); |
| + url_request_ = |
| + context->CreateRequest(request_params_->url, DEFAULT_PRIORITY, this); |
| + if (request_params_->http_method == HTTP_METHOD_POST) |
| + url_request_->set_method("POST"); |
| + url_request_->SetLoadFlags(LOAD_DO_NOT_SAVE_COOKIES | |
| + LOAD_DO_NOT_SEND_COOKIES); |
| + url_request_->Start(); |
| + |
| + // Start a timer to limit how long the job runs for. |
| + if (request_params_->timeout > base::TimeDelta()) |
| + timer_.Start(FROM_HERE, request_params_->timeout, this, &Job::OnTimeout); |
| +} |
| + |
| +void CertNetFetcherImpl::Job::OnReceivedRedirect( |
| + URLRequest* request, |
| + const RedirectInfo& redirect_info, |
| + bool* defer_redirect) { |
| + DCHECK_EQ(url_request_.get(), request); |
| + |
| + // Ensure that the new URL matches the policy. |
| + Error error = CanFetchUrl(redirect_info.new_url); |
| + if (error != OK) { |
| + request->CancelWithError(error); |
| + OnUrlRequestCompleted(request); |
|
davidben
2015/03/27 23:34:31
This does end up deleting |request| while it's on
eroman
2015/04/04 21:57:14
Acknowledged.
|
| + return; |
| + } |
| +} |
| + |
| +void CertNetFetcherImpl::Job::OnResponseStarted(URLRequest* request) { |
| + DCHECK_EQ(url_request_.get(), request); |
| + |
| + if (!request->status().is_success()) { |
| + OnUrlRequestCompleted(request); |
| + return; |
| + } |
| + |
| + // In practice all URLs fetched are HTTP, but check anyway as defensive |
| + // measure in case the policy is ever changed. |
|
davidben
2015/03/31 20:52:23
I don't understand what this comment is referring
eroman
2015/04/04 21:57:15
Fixed. This comment was referring to an older vers
|
| + if (request->GetResponseCode() != 200) { |
| + request->CancelWithError(kNetErrorNot200HttpResponse); |
| + OnUrlRequestCompleted(request); |
| + return; |
| + } |
| + |
| + ReadBody(request); |
| +} |
| + |
| +void CertNetFetcherImpl::Job::OnReadCompleted(URLRequest* request, |
| + int bytes_read) { |
| + DCHECK_EQ(url_request_.get(), request); |
| + |
| + // Keep reading the response body. |
| + if (ConsumeBytesRead(request, bytes_read)) |
| + ReadBody(request); |
| +} |
| + |
| +void CertNetFetcherImpl::Job::Stop() { |
| + timer_.Stop(); |
| + url_request_.reset(); |
| +} |
| + |
| +void CertNetFetcherImpl::Job::ReadBody(URLRequest* request) { |
| + // Read as many bytes as are available synchronously. |
| + int num_bytes; |
| + while ( |
| + request->Read(read_buffer_.get(), kReadBufferSizeInBytes, &num_bytes)) { |
| + if (!ConsumeBytesRead(request, num_bytes)) |
| + return; |
| + } |
| + |
| + // Check whether the read failed synchronously. |
| + if (!request->status().is_io_pending()) |
| + OnUrlRequestCompleted(request); |
| + return; |
| +} |
| + |
| +bool CertNetFetcherImpl::Job::ConsumeBytesRead(URLRequest* request, |
| + int num_bytes) { |
| + if (num_bytes <= 0) { |
| + // Error while reading, or EOF. |
| + OnUrlRequestCompleted(request); |
| + return false; |
| + } |
| + |
| + // Enforce maximum size bound. |
| + if (num_bytes + response_body_.size() > request_params_->max_response_bytes) { |
| + request->CancelWithError(kNetErrorResponseTooLarge); |
| + OnUrlRequestCompleted(request); |
| + return false; |
| + } |
| + |
| + // Append the data to |response_body_|. |
| + response_body_.reserve(response_body_.size() + num_bytes); |
|
davidben
2015/03/31 20:52:23
Does the reserve call do anything? I would have ho
eroman
2015/04/04 21:57:15
Agreed I don't think it is necessary. However I ad
davidben
2015/04/07 19:22:25
Oh, hrmf. Sorry, I should have checked one more ST
|
| + response_body_.insert(response_body_.end(), read_buffer_->data(), |
| + read_buffer_->data() + num_bytes); |
| + return true; |
| +} |
| + |
| +void CertNetFetcherImpl::Job::OnTimeout() { |
| + result_net_error_ = kNetErrorTimedOut; |
| + url_request_->CancelWithError(result_net_error_); |
| + OnJobCompleted(); |
| +} |
| + |
| +void CertNetFetcherImpl::Job::OnUrlRequestCompleted(URLRequest* request) { |
| + DCHECK_EQ(request, url_request_.get()); |
| + |
| + if (request->status().is_success()) |
| + result_net_error_ = OK; |
| + else |
| + result_net_error_ = static_cast<Error>(request->status().error()); |
| + |
| + OnJobCompleted(); |
| +} |
| + |
| +void CertNetFetcherImpl::Job::OnJobCompleted() { |
| + // Stop the timer and clear the URLRequest. |
| + Stop(); |
| + |
| + // Invoking the callbacks is subtle as state may be mutated while iterating |
| + // through the callbacks: |
| + // |
| + // * The parent CertNetFetcherImpl may be deleted |
| + // * Requests in this job may be cancelled |
| + |
| + scoped_ptr<Job> delete_this = parent_->RemoveJob(this); |
| + parent_->SetCurrentlyCompletingJob(this); |
|
davidben
2015/03/27 23:34:31
DCHECK somewhere that there isn't another currentl
eroman
2015/04/04 21:57:14
Done. I added a new method ClearCurrentlyCompletin
|
| + |
| + while (!requests_.empty()) { |
| + scoped_ptr<Request> request(requests_.front()); |
| + requests_.erase(requests_.begin()); |
| + request->callback.Run(result_net_error_, response_body_); |
| + } |
| + |
| + if (parent_) |
| + parent_->SetCurrentlyCompletingJob(nullptr); |
| +} |
| + |
| +CertNetFetcherImpl::CertNetFetcherImpl(URLRequestContext* context) |
| + : currently_completing_job_(nullptr), context_(context) { |
| +} |
| + |
| +CertNetFetcherImpl::~CertNetFetcherImpl() { |
| + STLDeleteElements(&jobs_); |
| + if (currently_completing_job_) |
| + currently_completing_job_->Cancel(); |
| +} |
| + |
| +void CertNetFetcherImpl::CancelRequest(RequestId request_id) { |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + |
| + Request* request = reinterpret_cast<Request*>(request_id); |
| + request->job->CancelRequest(request); |
| +} |
| + |
| +CertNetFetcher::RequestId CertNetFetcherImpl::FetchCaIssuers( |
| + const GURL& url, |
| + int timeout_milliseconds, |
| + int max_response_bytes, |
| + const FetchCallback& callback) { |
| + scoped_ptr<RequestParams> request_params(new RequestParams); |
| + |
| + request_params->url = url; |
| + request_params->http_method = HTTP_METHOD_GET; |
| + request_params->timeout = GetTimeout(timeout_milliseconds); |
| + request_params->max_response_bytes = |
| + GetMaxResponseBytes(max_response_bytes, kMaxResponseSizeInBytesForAia); |
| + |
| + return Fetch(request_params.Pass(), callback); |
| +} |
| + |
| +CertNetFetcher::RequestId CertNetFetcherImpl::FetchCrl( |
| + const GURL& url, |
| + int timeout_milliseconds, |
| + int max_response_bytes, |
| + const FetchCallback& callback) { |
| + scoped_ptr<RequestParams> request_params(new RequestParams); |
| + |
| + request_params->url = url; |
| + request_params->http_method = HTTP_METHOD_GET; |
| + request_params->timeout = GetTimeout(timeout_milliseconds); |
| + request_params->max_response_bytes = |
| + GetMaxResponseBytes(max_response_bytes, kMaxResponseSizeInBytesForCrl); |
| + |
| + return Fetch(request_params.Pass(), callback); |
| +} |
| + |
| +CertNetFetcher::RequestId CertNetFetcherImpl::FetchOcsp( |
| + const GURL& url, |
| + int timeout_milliseconds, |
| + int max_response_bytes, |
| + const FetchCallback& callback) { |
| + scoped_ptr<RequestParams> request_params(new RequestParams); |
| + |
| + request_params->url = url; |
| + request_params->http_method = HTTP_METHOD_GET; |
| + request_params->timeout = GetTimeout(timeout_milliseconds); |
| + request_params->max_response_bytes = |
| + GetMaxResponseBytes(max_response_bytes, kMaxResponseSizeInBytesForAia); |
| + |
| + return Fetch(request_params.Pass(), callback); |
| +} |
| + |
| +bool CertNetFetcherImpl::JobComparator::operator()(const Job* job1, |
| + const Job* job2) const { |
| + return job1->request_params() < job2->request_params(); |
| +} |
| + |
| +CertNetFetcher::RequestId CertNetFetcherImpl::Fetch( |
| + scoped_ptr<RequestParams> request_params, |
| + const FetchCallback& callback) { |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + |
| + // If there is an in-progress job that matches the request parameters use it. |
| + // Otherwise start a new job. |
| + Job* job = FindJob(*request_params); |
| + |
| + if (!job) { |
| + job = new Job(request_params.Pass(), this); |
| + jobs_.insert(job); |
| + job->StartURLRequest(context_); |
| + } |
| + |
| + return job->AddRequest(callback); |
| +} |
| + |
| +struct CertNetFetcherImpl::JobToRequestParamsComparator { |
| + bool operator()(const Job* job, |
| + const CertNetFetcherImpl::RequestParams& value) const { |
| + return job->request_params() < value; |
| + } |
| +}; |
| + |
| +CertNetFetcherImpl::Job* CertNetFetcherImpl::FindJob( |
| + const RequestParams& params) { |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + |
| + // The JobSet is kept in sorted order so items can be found using binary |
| + // search. |
| + JobSet::iterator it = std::lower_bound(jobs_.begin(), jobs_.end(), params, |
| + JobToRequestParamsComparator()); |
| + if (it != jobs_.end() && !(params < (*it)->request_params())) |
| + return *it; |
| + return nullptr; |
| +} |
| + |
| +scoped_ptr<CertNetFetcherImpl::Job> CertNetFetcherImpl::RemoveJob(Job* job) { |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + bool erased_job = jobs_.erase(job) == 1; |
| + DCHECK(erased_job); |
| + return scoped_ptr<Job>(job); |
|
davidben
2015/03/31 20:52:23
Optional: make_scoped_ptr might be a little less r
eroman
2015/04/04 21:57:15
Done.
|
| +} |
| + |
| +void CertNetFetcherImpl::SetCurrentlyCompletingJob(Job* job) { |
| + currently_completing_job_ = job; |
| +} |
| + |
| +} // namespace net |