Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(450)

Side by Side Diff: chrome/browser/resource_dispatcher_host.cc

Issue 10895: Add Terminate() to the Process object, have RenderProcessHost use this to avo... (Closed) Base URL: svn://chrome-svn/chrome/trunk/src/
Patch Set: Created 12 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « chrome/browser/render_process_host.cc ('k') | chrome/browser/resource_message_filter.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 // See http://wiki.corp.google.com/twiki/bin/view/Main/ChromeMultiProcessResourc eLoading 5 // See http://wiki.corp.google.com/twiki/bin/view/Main/ChromeMultiProcessResourc eLoading
6 6
7 #include <vector> 7 #include <vector>
8 8
9 #include "chrome/browser/resource_dispatcher_host.h" 9 #include "chrome/browser/resource_dispatcher_host.h"
10 10
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after
114 return true; 114 return true;
115 } 115 }
116 116
117 bool OnWillRead(int request_id, char** buf, int* buf_size, int min_size) { 117 bool OnWillRead(int request_id, char** buf, int* buf_size, int min_size) {
118 DCHECK(min_size == -1); 118 DCHECK(min_size == -1);
119 static const int kReadBufSize = 32768; 119 static const int kReadBufSize = 32768;
120 if (spare_read_buffer_) { 120 if (spare_read_buffer_) {
121 read_buffer_.reset(spare_read_buffer_); 121 read_buffer_.reset(spare_read_buffer_);
122 spare_read_buffer_ = NULL; 122 spare_read_buffer_ = NULL;
123 } else { 123 } else {
124 read_buffer_.reset(new SharedMemory); 124 read_buffer_.reset(new base::SharedMemory);
125 if (!read_buffer_->Create(std::wstring(), false, false, kReadBufSize)) 125 if (!read_buffer_->Create(std::wstring(), false, false, kReadBufSize))
126 return false; 126 return false;
127 if (!read_buffer_->Map(kReadBufSize)) 127 if (!read_buffer_->Map(kReadBufSize))
128 return false; 128 return false;
129 } 129 }
130 *buf = static_cast<char*>(read_buffer_->memory()); 130 *buf = static_cast<char*>(read_buffer_->memory());
131 *buf_size = kReadBufSize; 131 *buf_size = kReadBufSize;
132 return true; 132 return true;
133 } 133 }
134 134
135 bool OnReadCompleted(int request_id, int* bytes_read) { 135 bool OnReadCompleted(int request_id, int* bytes_read) {
136 if (!*bytes_read) 136 if (!*bytes_read)
137 return true; 137 return true;
138 DCHECK(read_buffer_.get()); 138 DCHECK(read_buffer_.get());
139 139
140 if (!rdh_->WillSendData(render_process_host_id_, request_id)) { 140 if (!rdh_->WillSendData(render_process_host_id_, request_id)) {
141 // We should not send this data now, we have too many pending requests. 141 // We should not send this data now, we have too many pending requests.
142 return true; 142 return true;
143 } 143 }
144 144
145 SharedMemoryHandle handle; 145 base::SharedMemoryHandle handle;
146 if (!read_buffer_->GiveToProcess(render_process_, &handle)) { 146 if (!read_buffer_->GiveToProcess(render_process_, &handle)) {
147 // We wrongfully incremented the pending data count. Fake an ACK message 147 // We wrongfully incremented the pending data count. Fake an ACK message
148 // to fix this. We can't move this call above the WillSendData because 148 // to fix this. We can't move this call above the WillSendData because
149 // it's killing our read_buffer_, and we don't want that when we pause 149 // it's killing our read_buffer_, and we don't want that when we pause
150 // the request. 150 // the request.
151 rdh_->OnDataReceivedACK(render_process_host_id_, request_id); 151 rdh_->OnDataReceivedACK(render_process_host_id_, request_id);
152 return false; 152 return false;
153 } 153 }
154 154
155 receiver_->Send(new ViewMsg_Resource_DataReceived( 155 receiver_->Send(new ViewMsg_Resource_DataReceived(
(...skipping 12 matching lines...) Expand all
168 } else if (read_buffer_.get() && read_buffer_->memory()) { 168 } else if (read_buffer_.get() && read_buffer_->memory()) {
169 spare_read_buffer_ = read_buffer_.release(); 169 spare_read_buffer_ = read_buffer_.release();
170 } 170 }
171 return true; 171 return true;
172 } 172 }
173 173
174 private: 174 private:
175 // When reading, we don't know if we are going to get EOF (0 bytes read), so 175 // When reading, we don't know if we are going to get EOF (0 bytes read), so
176 // we typically have a buffer that we allocated but did not use. We keep 176 // we typically have a buffer that we allocated but did not use. We keep
177 // this buffer around for the next read as a small optimization. 177 // this buffer around for the next read as a small optimization.
178 static SharedMemory* spare_read_buffer_; 178 static base::SharedMemory* spare_read_buffer_;
179 179
180 scoped_ptr<SharedMemory> read_buffer_; 180 scoped_ptr<base::SharedMemory> read_buffer_;
181 ResourceDispatcherHost::Receiver* receiver_; 181 ResourceDispatcherHost::Receiver* receiver_;
182 int render_process_host_id_; 182 int render_process_host_id_;
183 int routing_id_; 183 int routing_id_;
184 HANDLE render_process_; 184 HANDLE render_process_;
185 ResourceDispatcherHost* rdh_; 185 ResourceDispatcherHost* rdh_;
186 }; 186 };
187 SharedMemory* ResourceDispatcherHost::AsyncEventHandler::spare_read_buffer_; 187
188 base::SharedMemory*
189 ResourceDispatcherHost::AsyncEventHandler::spare_read_buffer_;
188 190
189 // ---------------------------------------------------------------------------- 191 // ----------------------------------------------------------------------------
190 // ResourceDispatcherHost::SyncEventHandler 192 // ResourceDispatcherHost::SyncEventHandler
191 193
192 // Used to complete a synchronous resource request in response to resource load 194 // Used to complete a synchronous resource request in response to resource load
193 // events from the resource dispatcher host. 195 // events from the resource dispatcher host.
194 class ResourceDispatcherHost::SyncEventHandler 196 class ResourceDispatcherHost::SyncEventHandler
195 : public ResourceDispatcherHost::EventHandler { 197 : public ResourceDispatcherHost::EventHandler {
196 public: 198 public:
197 SyncEventHandler(ResourceDispatcherHost::Receiver* receiver, 199 SyncEventHandler(ResourceDispatcherHost::Receiver* receiver,
(...skipping 2300 matching lines...) Expand 10 before | Expand all | Expand 10 after
2498 bool enough_new_progress = (amt_since_last > (size / kHalfPercentIncrements)); 2500 bool enough_new_progress = (amt_since_last > (size / kHalfPercentIncrements));
2499 bool too_much_time_passed = time_since_last > kOneSecond; 2501 bool too_much_time_passed = time_since_last > kOneSecond;
2500 2502
2501 if (is_finished || enough_new_progress || too_much_time_passed) { 2503 if (is_finished || enough_new_progress || too_much_time_passed) {
2502 info->event_handler->OnUploadProgress(info->request_id, position, size); 2504 info->event_handler->OnUploadProgress(info->request_id, position, size);
2503 info->waiting_for_upload_progress_ack = true; 2505 info->waiting_for_upload_progress_ack = true;
2504 info->last_upload_ticks = TimeTicks::Now(); 2506 info->last_upload_ticks = TimeTicks::Now();
2505 info->last_upload_position = position; 2507 info->last_upload_position = position;
2506 } 2508 }
2507 } 2509 }
OLDNEW
« no previous file with comments | « chrome/browser/render_process_host.cc ('k') | chrome/browser/resource_message_filter.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698