OLD | NEW |
| (Empty) |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "chrome/browser/renderer_host/async_resource_handler.h" | |
6 | |
7 #include <algorithm> | |
8 #include <vector> | |
9 | |
10 #include "base/hash_tables.h" | |
11 #include "base/logging.h" | |
12 #include "base/shared_memory.h" | |
13 #include "chrome/browser/debugger/devtools_netlog_observer.h" | |
14 #include "chrome/browser/net/chrome_url_request_context.h" | |
15 #include "chrome/browser/net/load_timing_observer.h" | |
16 #include "chrome/browser/renderer_host/global_request_id.h" | |
17 #include "chrome/browser/renderer_host/resource_dispatcher_host.h" | |
18 #include "chrome/browser/renderer_host/resource_dispatcher_host_request_info.h" | |
19 #include "chrome/browser/renderer_host/resource_message_filter.h" | |
20 #include "chrome/common/render_messages.h" | |
21 #include "chrome/common/resource_response.h" | |
22 #include "net/base/io_buffer.h" | |
23 #include "net/base/load_flags.h" | |
24 #include "net/base/net_log.h" | |
25 #include "webkit/glue/resource_loader_bridge.h" | |
26 | |
27 using base::Time; | |
28 using base::TimeTicks; | |
29 | |
30 namespace { | |
31 | |
32 // When reading, we don't know if we are going to get EOF (0 bytes read), so | |
33 // we typically have a buffer that we allocated but did not use. We keep | |
34 // this buffer around for the next read as a small optimization. | |
35 SharedIOBuffer* g_spare_read_buffer = NULL; | |
36 | |
37 // The initial size of the shared memory buffer. (32 kilobytes). | |
38 const int kInitialReadBufSize = 32768; | |
39 | |
40 // The maximum size of the shared memory buffer. (512 kilobytes). | |
41 const int kMaxReadBufSize = 524288; | |
42 | |
43 } // namespace | |
44 | |
45 // Our version of IOBuffer that uses shared memory. | |
46 class SharedIOBuffer : public net::IOBuffer { | |
47 public: | |
48 explicit SharedIOBuffer(int buffer_size) | |
49 : net::IOBuffer(), | |
50 ok_(false), | |
51 buffer_size_(buffer_size) {} | |
52 | |
53 bool Init() { | |
54 if (shared_memory_.CreateAndMapAnonymous(buffer_size_)) { | |
55 data_ = reinterpret_cast<char*>(shared_memory_.memory()); | |
56 DCHECK(data_); | |
57 ok_ = true; | |
58 } | |
59 return ok_; | |
60 } | |
61 | |
62 base::SharedMemory* shared_memory() { return &shared_memory_; } | |
63 bool ok() { return ok_; } | |
64 int buffer_size() { return buffer_size_; } | |
65 | |
66 private: | |
67 ~SharedIOBuffer() { | |
68 DCHECK(g_spare_read_buffer != this); | |
69 data_ = NULL; | |
70 } | |
71 | |
72 base::SharedMemory shared_memory_; | |
73 bool ok_; | |
74 int buffer_size_; | |
75 }; | |
76 | |
77 AsyncResourceHandler::AsyncResourceHandler( | |
78 ResourceMessageFilter* filter, | |
79 int routing_id, | |
80 const GURL& url, | |
81 ResourceDispatcherHost* resource_dispatcher_host) | |
82 : filter_(filter), | |
83 routing_id_(routing_id), | |
84 rdh_(resource_dispatcher_host), | |
85 next_buffer_size_(kInitialReadBufSize) { | |
86 } | |
87 | |
88 AsyncResourceHandler::~AsyncResourceHandler() { | |
89 } | |
90 | |
91 bool AsyncResourceHandler::OnUploadProgress(int request_id, | |
92 uint64 position, | |
93 uint64 size) { | |
94 return filter_->Send(new ViewMsg_Resource_UploadProgress(routing_id_, | |
95 request_id, | |
96 position, size)); | |
97 } | |
98 | |
99 bool AsyncResourceHandler::OnRequestRedirected(int request_id, | |
100 const GURL& new_url, | |
101 ResourceResponse* response, | |
102 bool* defer) { | |
103 *defer = true; | |
104 net::URLRequest* request = rdh_->GetURLRequest( | |
105 GlobalRequestID(filter_->child_id(), request_id)); | |
106 LoadTimingObserver::PopulateTimingInfo(request, response); | |
107 DevToolsNetLogObserver::PopulateResponseInfo(request, response); | |
108 return filter_->Send(new ViewMsg_Resource_ReceivedRedirect( | |
109 routing_id_, request_id, new_url, response->response_head)); | |
110 } | |
111 | |
112 bool AsyncResourceHandler::OnResponseStarted(int request_id, | |
113 ResourceResponse* response) { | |
114 // For changes to the main frame, inform the renderer of the new URL's | |
115 // per-host settings before the request actually commits. This way the | |
116 // renderer will be able to set these precisely at the time the | |
117 // request commits, avoiding the possibility of e.g. zooming the old content | |
118 // or of having to layout the new content twice. | |
119 net::URLRequest* request = rdh_->GetURLRequest( | |
120 GlobalRequestID(filter_->child_id(), request_id)); | |
121 | |
122 LoadTimingObserver::PopulateTimingInfo(request, response); | |
123 DevToolsNetLogObserver::PopulateResponseInfo(request, response); | |
124 | |
125 ResourceDispatcherHostRequestInfo* info = rdh_->InfoForRequest(request); | |
126 if (info->resource_type() == ResourceType::MAIN_FRAME) { | |
127 GURL request_url(request->url()); | |
128 ChromeURLRequestContext* context = | |
129 static_cast<ChromeURLRequestContext*>(request->context()); | |
130 if (context) { | |
131 filter_->Send(new ViewMsg_SetContentSettingsForLoadingURL( | |
132 info->route_id(), request_url, | |
133 context->host_content_settings_map()->GetContentSettings( | |
134 request_url))); | |
135 filter_->Send(new ViewMsg_SetZoomLevelForLoadingURL(info->route_id(), | |
136 request_url, context->host_zoom_map()->GetZoomLevel(request_url))); | |
137 } | |
138 } | |
139 | |
140 filter_->Send(new ViewMsg_Resource_ReceivedResponse( | |
141 routing_id_, request_id, response->response_head)); | |
142 | |
143 if (request->response_info().metadata) { | |
144 std::vector<char> copy(request->response_info().metadata->data(), | |
145 request->response_info().metadata->data() + | |
146 request->response_info().metadata->size()); | |
147 filter_->Send(new ViewMsg_Resource_ReceivedCachedMetadata( | |
148 routing_id_, request_id, copy)); | |
149 } | |
150 | |
151 return true; | |
152 } | |
153 | |
154 bool AsyncResourceHandler::OnWillStart(int request_id, | |
155 const GURL& url, | |
156 bool* defer) { | |
157 return true; | |
158 } | |
159 | |
160 bool AsyncResourceHandler::OnWillRead(int request_id, net::IOBuffer** buf, | |
161 int* buf_size, int min_size) { | |
162 DCHECK_EQ(-1, min_size); | |
163 | |
164 if (g_spare_read_buffer) { | |
165 DCHECK(!read_buffer_); | |
166 read_buffer_.swap(&g_spare_read_buffer); | |
167 DCHECK(read_buffer_->data()); | |
168 | |
169 *buf = read_buffer_.get(); | |
170 *buf_size = read_buffer_->buffer_size(); | |
171 } else { | |
172 read_buffer_ = new SharedIOBuffer(next_buffer_size_); | |
173 if (!read_buffer_->Init()) { | |
174 DLOG(ERROR) << "Couldn't allocate shared io buffer"; | |
175 read_buffer_ = NULL; | |
176 return false; | |
177 } | |
178 DCHECK(read_buffer_->data()); | |
179 *buf = read_buffer_.get(); | |
180 *buf_size = next_buffer_size_; | |
181 } | |
182 | |
183 return true; | |
184 } | |
185 | |
186 bool AsyncResourceHandler::OnReadCompleted(int request_id, int* bytes_read) { | |
187 if (!*bytes_read) | |
188 return true; | |
189 DCHECK(read_buffer_.get()); | |
190 | |
191 if (read_buffer_->buffer_size() == *bytes_read) { | |
192 // The network layer has saturated our buffer. Next time, we should give it | |
193 // a bigger buffer for it to fill, to minimize the number of round trips we | |
194 // do with the renderer process. | |
195 next_buffer_size_ = std::min(next_buffer_size_ * 2, kMaxReadBufSize); | |
196 } | |
197 | |
198 if (!rdh_->WillSendData(filter_->child_id(), request_id)) { | |
199 // We should not send this data now, we have too many pending requests. | |
200 return true; | |
201 } | |
202 | |
203 base::SharedMemoryHandle handle; | |
204 if (!read_buffer_->shared_memory()->GiveToProcess( | |
205 filter_->peer_handle(), &handle)) { | |
206 // We wrongfully incremented the pending data count. Fake an ACK message | |
207 // to fix this. We can't move this call above the WillSendData because | |
208 // it's killing our read_buffer_, and we don't want that when we pause | |
209 // the request. | |
210 rdh_->DataReceivedACK(filter_->child_id(), request_id); | |
211 // We just unmapped the memory. | |
212 read_buffer_ = NULL; | |
213 return false; | |
214 } | |
215 // We just unmapped the memory. | |
216 read_buffer_ = NULL; | |
217 | |
218 filter_->Send(new ViewMsg_Resource_DataReceived( | |
219 routing_id_, request_id, handle, *bytes_read)); | |
220 | |
221 return true; | |
222 } | |
223 | |
224 void AsyncResourceHandler::OnDataDownloaded( | |
225 int request_id, int bytes_downloaded) { | |
226 filter_->Send(new ViewMsg_Resource_DataDownloaded( | |
227 routing_id_, request_id, bytes_downloaded)); | |
228 } | |
229 | |
230 bool AsyncResourceHandler::OnResponseCompleted( | |
231 int request_id, | |
232 const net::URLRequestStatus& status, | |
233 const std::string& security_info) { | |
234 Time completion_time = Time::Now(); | |
235 filter_->Send(new ViewMsg_Resource_RequestComplete(routing_id_, | |
236 request_id, | |
237 status, | |
238 security_info, | |
239 completion_time)); | |
240 | |
241 // If we still have a read buffer, then see about caching it for later... | |
242 // Note that we have to make sure the buffer is not still being used, so we | |
243 // have to perform an explicit check on the status code. | |
244 if (g_spare_read_buffer || | |
245 net::URLRequestStatus::SUCCESS != status.status()) { | |
246 read_buffer_ = NULL; | |
247 } else if (read_buffer_.get()) { | |
248 DCHECK(read_buffer_->data()); | |
249 read_buffer_.swap(&g_spare_read_buffer); | |
250 } | |
251 return true; | |
252 } | |
253 | |
254 void AsyncResourceHandler::OnRequestClosed() { | |
255 } | |
256 | |
257 // static | |
258 void AsyncResourceHandler::GlobalCleanup() { | |
259 if (g_spare_read_buffer) { | |
260 // Avoid the CHECK in SharedIOBuffer::~SharedIOBuffer(). | |
261 SharedIOBuffer* tmp = g_spare_read_buffer; | |
262 g_spare_read_buffer = NULL; | |
263 tmp->Release(); | |
264 } | |
265 } | |
OLD | NEW |