OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "net/cert/cert_net_fetcher.h" | |
6 | |
7 #include "base/logging.h" | |
8 #include "base/numerics/safe_math.h" | |
9 #include "base/stl_util.h" | |
10 #include "base/timer/timer.h" | |
11 #include "net/base/load_flags.h" | |
12 #include "net/url_request/redirect_info.h" | |
13 #include "net/url_request/url_request_context.h" | |
14 | |
15 // TODO(eroman): Add support for POST parameters. | |
16 // TODO(eroman): Add controls for bypassing the cache. | |
17 // TODO(eroman): Add a maximum number of in-flight jobs/requests. | |
18 | |
19 namespace net { | |
20 | |
21 namespace { | |
22 | |
23 // The error returned when the response body exceeded the size limit. | |
24 const int kNetErrorResponseTooLarge = ERR_FILE_TOO_BIG; | |
25 | |
26 // The error returned when the URL could not be fetched because it was not an | |
27 // allowed scheme (http). | |
28 const int kNetErrorNotHttpUrl = ERR_DISALLOWED_URL_SCHEME; | |
29 | |
30 // The error returned when the URL fetch did not complete in time. | |
31 const int kNetErrorTimedOut = ERR_TIMED_OUT; | |
32 | |
33 // The error returned when the response was HTTP however it did not have a | |
34 // status of 200/OK. | |
35 // TODO(eroman): Use a more specific error code. | |
36 const int kNetErrorNot200HttpResponse = ERR_FAILED; | |
Ryan Sleevi
2015/02/23 20:25:22
DESIGN: Given that net::Error is an enum, I would
eroman
2015/02/23 23:36:57
Done.
| |
37 | |
38 // The size of the buffer used for reading the response body of the URLRequest. | |
39 const int kReadBufferSizeInBytes = 4096; | |
40 | |
41 // The maximum size in bytes for the response body when fetching a CRL. | |
42 const int kMaxResponseSizeInBytesForCrl = 5 * 1024 * 1024; | |
43 | |
44 // The maximum size in bytes for the response body when fetching an AIA URL | |
45 // (caIssuers/OCSP). | |
46 const int kMaxResponseSizeInBytesForAia = 64 * 1024; | |
47 | |
48 // The default timeout in seconds for fetch requests. | |
49 const int kTimeoutSeconds = 15; | |
50 | |
51 // Policy for which URLs are allowed to be fetched. This is called both for the | |
52 // initial URL and for each redirect. Returns OK on success or a net error | |
53 // code on failure. | |
54 int CanFetchUrl(const GURL& url) { | |
55 if (!url.SchemeIs("http")) | |
56 return kNetErrorNotHttpUrl; | |
57 return OK; | |
58 } | |
59 | |
60 base::TimeDelta GetTimeout(int timeout_milliseconds) { | |
61 if (timeout_milliseconds == CertNetFetcher::DEFAULT) | |
62 return base::TimeDelta::FromSeconds(kTimeoutSeconds); | |
63 return base::TimeDelta::FromMilliseconds(timeout_milliseconds); | |
64 } | |
65 | |
66 size_t GetMaxResponseBytes(int max_response_bytes, | |
67 size_t default_max_response_bytes) { | |
68 if (max_response_bytes == CertNetFetcher::DEFAULT) | |
69 return default_max_response_bytes; | |
70 | |
71 // Ensure that the specified limit is not negative, and cannot result in an | |
72 // overflow while reading. | |
73 base::CheckedNumeric<size_t> result(max_response_bytes); | |
74 result += kReadBufferSizeInBytes; | |
75 result -= kReadBufferSizeInBytes; | |
Ryan Sleevi
2015/02/23 20:25:22
This is non-obvious to me how you ensure it's not
eroman
2015/02/23 23:36:57
The first line:
base::CheckedNumeric<size_t> res
| |
76 return result.ValueOrDie(); | |
77 } | |
78 | |
79 enum HttpMethod { | |
80 HTTP_METHOD_GET, | |
81 HTTP_METHOD_POST, | |
82 }; | |
83 | |
84 } // namespace | |
85 | |
86 // CertNetFetcher::Request tracks an outstanding call to Fetch(). | |
87 struct CertNetFetcher::Request { | |
88 Request(FetchCallback callback, Job* job) : callback(callback), job(job) {} | |
89 | |
90 // The callback to invoke when the request has completed. | |
91 FetchCallback callback; | |
92 | |
93 // A non-owned pointer to the job that is executing the request (and in effect | |
94 // owns |this|). | |
95 Job* job; | |
96 | |
97 DISALLOW_COPY_AND_ASSIGN(Request); | |
Ryan Sleevi
2015/02/23 20:25:22
STYLE: Line 96 should specify private: , as per ht
eroman
2015/02/23 23:36:57
Done.
| |
98 }; | |
99 | |
100 struct CertNetFetcher::RequestParams { | |
101 RequestParams(); | |
102 | |
103 bool operator<(const RequestParams& other) const; | |
104 | |
105 GURL url; | |
106 HttpMethod http_method; | |
107 size_t max_response_bytes; | |
108 | |
109 // If set to a value <= 0 then means "no timeout". | |
110 base::TimeDelta timeout; | |
111 | |
112 // IMPORTANT: When adding fields to this structure, update operator<(). | |
113 | |
114 DISALLOW_COPY_AND_ASSIGN(RequestParams); | |
Ryan Sleevi
2015/02/23 20:25:22
ditto private
eroman
2015/02/23 23:36:58
Done.
| |
115 }; | |
116 | |
117 CertNetFetcher::RequestParams::RequestParams() | |
118 : http_method(HTTP_METHOD_GET), max_response_bytes(0) { | |
119 } | |
120 | |
121 bool CertNetFetcher::RequestParams::operator<( | |
122 const RequestParams& other) const { | |
123 if (url != other.url) | |
124 return url < other.url; | |
125 if (http_method != other.http_method) | |
126 return http_method < other.http_method; | |
127 if (max_response_bytes != other.max_response_bytes) | |
128 return max_response_bytes < other.max_response_bytes; | |
129 if (timeout != other.timeout) | |
130 return timeout < other.timeout; | |
131 return false; | |
132 } | |
133 | |
134 // CertNetFetcher::Job tracks an outstanding URLRequest as well as all of the | |
135 // pending requests for it. | |
136 class CertNetFetcher::Job : public URLRequest::Delegate { | |
137 public: | |
138 Job(scoped_ptr<RequestParams> request_params, CertNetFetcher* parent); | |
139 ~Job() override; | |
140 | |
141 // Cancels the job and all requests attached to it. No callbacks will be | |
142 // invoked following cancellation. | |
143 void Cancel(); | |
144 | |
145 const RequestParams& request_params() const { return *request_params_; } | |
146 | |
147 // Attaches a request to the job. When the job completes it will invoke | |
148 // |callback|. | |
149 RequestId AddRequest(const FetchCallback& callback); | |
150 | |
151 // Removes |request| from the job and deletes it. | |
Ryan Sleevi
2015/02/23 20:25:22
The "deletes it" is unclear, given that RequestId
eroman
2015/02/23 23:36:57
My thinking is that the Request object is an inter
| |
152 void CancelRequest(RequestId request); | |
153 | |
154 // Creates and starts a URLRequest for the job. After the request has | |
155 // completed, OnJobCompleted() will be invoked and all the registered requests | |
156 // notified of completion. | |
157 void StartURLRequest(URLRequestContext* context); | |
158 | |
159 private: | |
160 // The pointers in RequestList are owned by the Job. | |
161 typedef std::vector<Request*> RequestList; | |
162 | |
163 // Implementation of URLRequest::Delegate | |
164 void OnReceivedRedirect(URLRequest* request, | |
165 const RedirectInfo& redirect_info, | |
166 bool* defer_redirect) override; | |
167 void OnResponseStarted(URLRequest* request) override; | |
168 void OnReadCompleted(URLRequest* request, int bytes_read) override; | |
169 | |
170 // Clears the URLRequest and timer. Helper for doing work common to | |
171 // cancellation and job completion. | |
172 void Stop(); | |
173 | |
174 // Reads as much data as available from the |request|. | |
175 void ReadBody(URLRequest* request); | |
176 | |
177 // Helper to copy the partial bytes read from the read IOBuffer to an | |
178 // aggregated buffer. | |
179 bool ConsumeBytesRead(URLRequest* request, int num_bytes); | |
180 | |
181 // Called once the job has exceeded its deadline. | |
182 void OnTimeout(); | |
183 | |
184 // Called when the URLRequest has completed (either success or failure). | |
185 void OnUrlRequestCompleted(URLRequest* request); | |
186 | |
187 // Called when the Job has completed. The job may finish in response to a | |
188 // timeout, an invalid URL, or the URLRequest completing. By the time this | |
189 // method is called, the response variables have been assigned | |
190 // (result_net_error_code_ et al). | |
191 void OnJobCompleted(); | |
192 | |
193 // The requests attached to this job. | |
194 RequestList requests_; | |
195 | |
196 // The input parameters for starting a URLRequest. | |
197 scoped_ptr<RequestParams> request_params_; | |
198 | |
199 // The URLRequest response information. | |
200 std::vector<uint8_t> response_body_; | |
201 int result_net_error_; | |
202 | |
203 scoped_ptr<URLRequest> url_request_; | |
204 scoped_refptr<IOBuffer> read_buffer_; | |
205 | |
206 // Used to timeout the job when the URLRequest takes too long. This timer is | |
207 // also used for notifying a failure to start the URLRequest. | |
208 base::OneShotTimer<Job> timer_; | |
209 | |
210 // Non-owned pointer to the CertNetFetcher that created this job. | |
211 CertNetFetcher* parent_; | |
212 | |
213 DISALLOW_COPY_AND_ASSIGN(Job); | |
214 }; | |
215 | |
216 CertNetFetcher::Job::Job(scoped_ptr<RequestParams> request_params, | |
217 CertNetFetcher* parent) | |
218 : request_params_(request_params.Pass()), | |
219 result_net_error_(ERR_IO_PENDING), | |
220 parent_(parent) { | |
221 } | |
222 | |
223 CertNetFetcher::Job::~Job() { | |
224 Cancel(); | |
225 } | |
226 | |
227 void CertNetFetcher::Job::Cancel() { | |
228 parent_ = NULL; | |
229 STLDeleteElements(&requests_); | |
230 Stop(); | |
231 } | |
232 | |
233 CertNetFetcher::RequestId CertNetFetcher::Job::AddRequest( | |
234 const FetchCallback& callback) { | |
235 requests_.push_back(new Request(callback, this)); | |
236 return requests_.back(); | |
237 } | |
238 | |
239 void CertNetFetcher::Job::CancelRequest(RequestId request) { | |
240 scoped_ptr<Job> delete_this; | |
241 | |
242 RequestList::iterator it = | |
243 std::find(requests_.begin(), requests_.end(), request); | |
244 DCHECK(it != requests_.end()); | |
245 requests_.erase(it); | |
246 delete request; | |
247 | |
248 // If there are no longer any requests attached to the job then | |
249 // cancel and delete it. | |
250 if (requests_.empty() && parent_) | |
251 delete_this = parent_->RemoveJob(this); | |
252 } | |
253 | |
254 void CertNetFetcher::Job::StartURLRequest(URLRequestContext* context) { | |
255 int error = CanFetchUrl(request_params_->url); | |
256 if (error != OK) { | |
257 result_net_error_ = error; | |
258 // The CertNetFetcher's API contract is that requests always complete | |
259 // asynchronously. Use the timer class so the task is easily cancelled. | |
260 timer_.Start(FROM_HERE, base::TimeDelta(), this, &Job::OnJobCompleted); | |
261 return; | |
262 } | |
263 | |
264 // Start the URLRequest. | |
265 read_buffer_ = new IOBuffer(kReadBufferSizeInBytes); | |
266 url_request_ = context->CreateRequest(request_params_->url, DEFAULT_PRIORITY, | |
267 this, NULL); | |
268 if (request_params_->http_method == HTTP_METHOD_POST) | |
269 url_request_->set_method("POST"); | |
270 url_request_->SetLoadFlags(LOAD_DO_NOT_SAVE_COOKIES | | |
271 LOAD_DO_NOT_SEND_COOKIES); | |
272 url_request_->Start(); | |
273 | |
274 // Start a timer to limit how long the job runs for. | |
275 if (request_params_->timeout > base::TimeDelta()) | |
276 timer_.Start(FROM_HERE, request_params_->timeout, this, &Job::OnTimeout); | |
277 } | |
278 | |
279 void CertNetFetcher::Job::OnReceivedRedirect(URLRequest* request, | |
280 const RedirectInfo& redirect_info, | |
281 bool* defer_redirect) { | |
282 DCHECK_EQ(url_request_.get(), request); | |
283 | |
284 // Ensure that the new URL matches the policy. | |
285 int error = CanFetchUrl(redirect_info.new_url); | |
Ryan Sleevi
2015/02/23 20:25:22
My remarks on HSTS from the header notwithstanding
eroman
2015/02/23 23:36:57
I would venture to say that this is a bad configur
| |
286 if (error != OK) { | |
287 request->CancelWithError(error); | |
288 OnUrlRequestCompleted(request); | |
289 return; | |
290 } | |
291 } | |
292 | |
293 void CertNetFetcher::Job::OnResponseStarted(URLRequest* request) { | |
294 DCHECK_EQ(url_request_.get(), request); | |
295 | |
296 if (!request->status().is_success()) { | |
297 OnUrlRequestCompleted(request); | |
298 return; | |
299 } | |
300 | |
301 // In practice all URLs fetched are HTTP, but check anyway as defensive | |
302 // measure in case the policy is ever changed. | |
303 if (request->GetResponseCode() != 200) { | |
304 request->CancelWithError(kNetErrorNot200HttpResponse); | |
305 OnUrlRequestCompleted(request); | |
306 return; | |
307 } | |
308 | |
309 ReadBody(request); | |
310 } | |
311 | |
312 void CertNetFetcher::Job::OnReadCompleted(URLRequest* request, int bytes_read) { | |
313 DCHECK_EQ(url_request_.get(), request); | |
314 | |
315 // Keep reading the response body. | |
316 if (ConsumeBytesRead(request, bytes_read)) | |
317 ReadBody(request); | |
318 } | |
319 | |
320 void CertNetFetcher::Job::Stop() { | |
321 timer_.Stop(); | |
322 url_request_.reset(); | |
323 } | |
324 | |
325 void CertNetFetcher::Job::ReadBody(URLRequest* request) { | |
326 // Read as many bytes as are available synchronously. | |
327 int num_bytes; | |
328 while ( | |
329 request->Read(read_buffer_.get(), kReadBufferSizeInBytes, &num_bytes)) { | |
330 if (!ConsumeBytesRead(request, num_bytes)) | |
331 return; | |
332 } | |
333 | |
334 // Check whether the read failed synchronously. | |
335 if (!request->status().is_io_pending()) | |
336 OnUrlRequestCompleted(request); | |
337 return; | |
338 } | |
339 | |
340 bool CertNetFetcher::Job::ConsumeBytesRead(URLRequest* request, int num_bytes) { | |
341 if (num_bytes <= 0) { | |
342 // Error while reading, or EOF. | |
343 OnUrlRequestCompleted(request); | |
344 return false; | |
345 } | |
346 | |
347 // Enforce maximum size bound. | |
348 if (num_bytes + response_body_.size() > request_params_->max_response_bytes) { | |
349 request->CancelWithError(kNetErrorResponseTooLarge); | |
350 OnUrlRequestCompleted(request); | |
351 return false; | |
352 } | |
353 | |
354 // Append the data to |response_body_|. | |
355 response_body_.insert(response_body_.end(), read_buffer_->data(), | |
Ryan Sleevi
2015/02/23 20:25:22
Recommendation: .reserve() first.
[Why? Long stor
eroman
2015/02/23 23:36:58
Done.
| |
356 read_buffer_->data() + num_bytes); | |
357 return true; | |
358 } | |
359 | |
360 void CertNetFetcher::Job::OnTimeout() { | |
361 result_net_error_ = kNetErrorTimedOut; | |
362 url_request_->CancelWithError(result_net_error_); | |
363 OnJobCompleted(); | |
364 } | |
365 | |
366 void CertNetFetcher::Job::OnUrlRequestCompleted(URLRequest* request) { | |
367 DCHECK_EQ(request, url_request_.get()); | |
368 | |
369 if (request->status().is_success()) | |
370 result_net_error_ = OK; | |
371 else | |
372 result_net_error_ = request->status().error(); | |
373 | |
374 OnJobCompleted(); | |
375 } | |
376 | |
377 void CertNetFetcher::Job::OnJobCompleted() { | |
378 // Stop the timer and clear the URLRequest. | |
379 Stop(); | |
380 | |
381 // Invoking the callbacks is subtle as state may be mutated while iterating | |
382 // through the callbacks: | |
383 // | |
384 // * The parent CertNetFetcher may be deleted | |
385 // * Requests in this job may be cancelled | |
386 | |
387 scoped_ptr<Job> delete_this = parent_->RemoveJob(this); | |
388 parent_->SetCurrentlyCompletingJob(this); | |
389 | |
390 while (!requests_.empty()) { | |
391 scoped_ptr<Request> request(requests_.front()); | |
392 requests_.erase(requests_.begin()); | |
393 request->callback.Run(result_net_error_, response_body_); | |
394 } | |
395 | |
396 if (parent_) | |
397 parent_->SetCurrentlyCompletingJob(NULL); | |
398 } | |
399 | |
400 CertNetFetcher::CertNetFetcher(URLRequestContext* context) | |
401 : currently_completing_job_(NULL), context_(context) { | |
402 } | |
403 | |
404 CertNetFetcher::~CertNetFetcher() { | |
405 STLDeleteElements(&jobs_); | |
406 if (currently_completing_job_) | |
407 currently_completing_job_->Cancel(); | |
Ryan Sleevi
2015/02/23 20:25:22
I find this a little surprising, or at least, uncl
eroman
2015/02/23 23:36:57
Correct. With the additional detail that once OnJo
| |
408 } | |
409 | |
410 void CertNetFetcher::CancelRequest(RequestId request) { | |
411 DCHECK(thread_checker_.CalledOnValidThread()); | |
412 | |
413 request->job->CancelRequest(request); | |
414 } | |
415 | |
416 CertNetFetcher::RequestId CertNetFetcher::FetchCaIssuers( | |
417 const GURL& url, | |
418 int timeout_milliseconds, | |
419 int max_response_bytes, | |
420 const FetchCallback& callback) { | |
421 scoped_ptr<RequestParams> request_params(new RequestParams); | |
422 | |
423 request_params->url = url; | |
424 request_params->http_method = HTTP_METHOD_GET; | |
425 request_params->timeout = GetTimeout(timeout_milliseconds); | |
426 request_params->max_response_bytes = | |
427 GetMaxResponseBytes(max_response_bytes, kMaxResponseSizeInBytesForAia); | |
428 | |
429 return Fetch(request_params.Pass(), callback); | |
430 } | |
431 | |
432 CertNetFetcher::RequestId CertNetFetcher::FetchCrl( | |
433 const GURL& url, | |
434 int timeout_milliseconds, | |
435 int max_response_bytes, | |
436 const FetchCallback& callback) { | |
437 scoped_ptr<RequestParams> request_params(new RequestParams); | |
438 | |
439 request_params->url = url; | |
440 request_params->http_method = HTTP_METHOD_GET; | |
441 request_params->timeout = GetTimeout(timeout_milliseconds); | |
442 request_params->max_response_bytes = | |
443 GetMaxResponseBytes(max_response_bytes, kMaxResponseSizeInBytesForCrl); | |
444 | |
445 return Fetch(request_params.Pass(), callback); | |
446 } | |
447 | |
448 CertNetFetcher::RequestId CertNetFetcher::FetchOcsp( | |
449 const GURL& url, | |
450 int timeout_milliseconds, | |
451 int max_response_bytes, | |
452 const FetchCallback& callback) { | |
453 scoped_ptr<RequestParams> request_params(new RequestParams); | |
454 | |
455 request_params->url = url; | |
456 request_params->http_method = HTTP_METHOD_GET; | |
457 request_params->timeout = GetTimeout(timeout_milliseconds); | |
458 request_params->max_response_bytes = | |
459 GetMaxResponseBytes(max_response_bytes, kMaxResponseSizeInBytesForAia); | |
460 | |
461 return Fetch(request_params.Pass(), callback); | |
462 } | |
463 | |
464 bool CertNetFetcher::JobComparator::operator()(const Job* job1, | |
465 const Job* job2) const { | |
466 return job1->request_params() < job2->request_params(); | |
467 } | |
468 | |
469 CertNetFetcher::RequestId CertNetFetcher::Fetch( | |
470 scoped_ptr<RequestParams> request_params, | |
471 const FetchCallback& callback) { | |
472 DCHECK(thread_checker_.CalledOnValidThread()); | |
473 | |
474 // If there is an in-progress job that matches the request parameters use it. | |
475 // Otherwise start a new job. | |
476 Job* job = FindJob(*request_params.get()); | |
Ryan Sleevi
2015/02/23 20:25:22
No need for .get() here
eroman
2015/02/23 23:36:58
Done.
| |
477 | |
478 if (!job) { | |
479 job = new Job(request_params.Pass(), this); | |
480 jobs_.insert(job); | |
481 job->StartURLRequest(context_); | |
482 } | |
483 | |
484 return job->AddRequest(callback); | |
485 } | |
486 | |
487 struct CertNetFetcher::JobToRequestParamsComparator { | |
488 bool operator()(const Job* job, | |
489 const CertNetFetcher::RequestParams& value) const { | |
490 return job->request_params() < value; | |
491 } | |
492 }; | |
493 | |
494 CertNetFetcher::Job* CertNetFetcher::FindJob(const RequestParams& params) { | |
495 DCHECK(thread_checker_.CalledOnValidThread()); | |
496 | |
497 // The JobSet is kept in sorted order so items can be found using binary | |
498 // search. | |
499 JobSet::iterator it = std::lower_bound(jobs_.begin(), jobs_.end(), params, | |
500 JobToRequestParamsComparator()); | |
501 if (it != jobs_.end() && !(params < (*it)->request_params())) | |
502 return *it; | |
503 return NULL; | |
504 } | |
505 | |
506 scoped_ptr<CertNetFetcher::Job> CertNetFetcher::RemoveJob(Job* job) { | |
507 DCHECK(thread_checker_.CalledOnValidThread()); | |
508 bool erased_job = jobs_.erase(job) == 1; | |
509 DCHECK(erased_job); | |
510 return scoped_ptr<Job>(job); | |
511 } | |
512 | |
513 void CertNetFetcher::SetCurrentlyCompletingJob(Job* job) { | |
514 currently_completing_job_ = job; | |
515 } | |
516 | |
517 } // namespace net | |
OLD | NEW |