Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(407)

Side by Side Diff: ppapi/host/ppapi_host.cc

Issue 454433002: PPAPI: Introduce concept of ResourceHosts "pinning" each other (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « ppapi/host/ppapi_host.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "ppapi/host/ppapi_host.h" 5 #include "ppapi/host/ppapi_host.h"
6 6
7 #include "base/logging.h" 7 #include "base/logging.h"
8 #include "ppapi/c/pp_errors.h" 8 #include "ppapi/c/pp_errors.h"
9 #include "ppapi/host/host_factory.h" 9 #include "ppapi/host/host_factory.h"
10 #include "ppapi/host/host_message_context.h" 10 #include "ppapi/host/host_message_context.h"
(...skipping 26 matching lines...) Expand all
37 37
38 PpapiHost::~PpapiHost() { 38 PpapiHost::~PpapiHost() {
39 // Delete these explicitly before destruction since then the host is still 39 // Delete these explicitly before destruction since then the host is still
40 // technically alive in case one of the filters accesses us from the 40 // technically alive in case one of the filters accesses us from the
41 // destructor. 41 // destructor.
42 instance_message_filters_.clear(); 42 instance_message_filters_.clear();
43 43
44 // The resources may also want to use us in their destructors. 44 // The resources may also want to use us in their destructors.
45 resources_.clear(); 45 resources_.clear();
46 pending_resource_hosts_.clear(); 46 pending_resource_hosts_.clear();
47 // At this point, all hosts should have Unpinned anything they depend on in
48 // their destructors, so this map should be empty.
49 DCHECK(owner_to_owned_map_.empty()) << "A ResourceHost forgot to Unpin";
47 } 50 }
48 51
49 bool PpapiHost::Send(IPC::Message* msg) { 52 bool PpapiHost::Send(IPC::Message* msg) {
50 return sender_->Send(msg); 53 return sender_->Send(msg);
51 } 54 }
52 55
53 bool PpapiHost::OnMessageReceived(const IPC::Message& msg) { 56 bool PpapiHost::OnMessageReceived(const IPC::Message& msg) {
54 bool handled = true; 57 bool handled = true;
55 IPC_BEGIN_MESSAGE_MAP(PpapiHost, msg) 58 IPC_BEGIN_MESSAGE_MAP(PpapiHost, msg)
56 IPC_MESSAGE_HANDLER(PpapiHostMsg_ResourceCall, 59 IPC_MESSAGE_HANDLER(PpapiHostMsg_ResourceCall,
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
157 160
158 void PpapiHost::AddHostFactoryFilter(scoped_ptr<HostFactory> filter) { 161 void PpapiHost::AddHostFactoryFilter(scoped_ptr<HostFactory> filter) {
159 host_factory_filters_.push_back(filter.release()); 162 host_factory_filters_.push_back(filter.release());
160 } 163 }
161 164
162 void PpapiHost::AddInstanceMessageFilter( 165 void PpapiHost::AddInstanceMessageFilter(
163 scoped_ptr<InstanceMessageFilter> filter) { 166 scoped_ptr<InstanceMessageFilter> filter) {
164 instance_message_filters_.push_back(filter.release()); 167 instance_message_filters_.push_back(filter.release());
165 } 168 }
166 169
170 ResourceHost* PpapiHost::GetResourceHost(PP_Resource resource) const {
171 ResourceMap::const_iterator found = resources_.find(resource);
172 return found == resources_.end() ? NULL : found->second.get();
173 }
174
175 void PpapiHost::PinHost(ResourceHost* owner_host, ResourceHost* owned_host) {
176 // Only allow a ResourceHost to be "owned" or be an "owner", but not both.
177 // This prevents graphs that are >1 level deep and also prevents cycles.
178 DCHECK(owner_to_owned_map_.count(owned_host) == 0)
179 << "A ResourceHost is not allowed to own and also be owned.";
180 linked_ptr<ResourceHost> owned_linked_ptr = RawToLinkedPtr(owned_host);
181 DCHECK(owned_linked_ptr.get());
182 DCHECK(owner_to_owned_map_[owner_host].count(owned_linked_ptr) == 0);
183 owner_to_owned_map_[owner_host][owned_linked_ptr]++;
184 }
185
186 void PpapiHost::UnpinHost(ResourceHost* owner_host, ResourceHost* owned_host) {
187 OwnerToOwnedHostMap::iterator iter = owner_to_owned_map_.find(owner_host);
188 DCHECK(iter != owner_to_owned_map_.end());
189 if (iter != owner_to_owned_map_.end()) {
190 linked_ptr<ResourceHost> owned_linked_ptr = RawToLinkedPtr(owned_host);
191 PinCountMap::iterator count_iter = iter->second.find(owned_linked_ptr);
192 if (--count_iter->second == 0)
193 iter->second.erase(count_iter);
194 if (iter->second.empty())
195 owner_to_owned_map_.erase(iter);
196 }
197 }
198
167 void PpapiHost::OnHostMsgResourceCall( 199 void PpapiHost::OnHostMsgResourceCall(
168 const proxy::ResourceMessageCallParams& params, 200 const proxy::ResourceMessageCallParams& params,
169 const IPC::Message& nested_msg) { 201 const IPC::Message& nested_msg) {
170 TRACE_EVENT2("ppapi proxy", "PpapiHost::OnHostMsgResourceCall", 202 TRACE_EVENT2("ppapi proxy", "PpapiHost::OnHostMsgResourceCall",
171 "Class", IPC_MESSAGE_ID_CLASS(nested_msg.type()), 203 "Class", IPC_MESSAGE_ID_CLASS(nested_msg.type()),
172 "Line", IPC_MESSAGE_ID_LINE(nested_msg.type())); 204 "Line", IPC_MESSAGE_ID_LINE(nested_msg.type()));
173 HostMessageContext context(params); 205 HostMessageContext context(params);
174 HandleResourceCall(params, nested_msg, &context); 206 HandleResourceCall(params, nested_msg, &context);
175 } 207 }
176 208
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
269 } 301 }
270 // Invoking the HostResource destructor might result in looking up the 302 // Invoking the HostResource destructor might result in looking up the
271 // PP_Resource in resources_. std::map is not well specified as to whether the 303 // PP_Resource in resources_. std::map is not well specified as to whether the
272 // element will be there or not. Therefore, we delay destruction of the 304 // element will be there or not. Therefore, we delay destruction of the
273 // HostResource until after we've made sure the map no longer contains 305 // HostResource until after we've made sure the map no longer contains
274 // |resource|. 306 // |resource|.
275 linked_ptr<ResourceHost> delete_at_end_of_scope(found->second); 307 linked_ptr<ResourceHost> delete_at_end_of_scope(found->second);
276 resources_.erase(found); 308 resources_.erase(found);
277 } 309 }
278 310
279 ResourceHost* PpapiHost::GetResourceHost(PP_Resource resource) const { 311 linked_ptr<ResourceHost> PpapiHost::RawToLinkedPtr(ResourceHost* host) {
280 ResourceMap::const_iterator found = resources_.find(resource); 312 if (host->pp_resource()) {
281 return found == resources_.end() ? NULL : found->second.get(); 313 // If it has a valid PP_Resource, it's not "pending" and therefore should
314 // be in resources_.
315 DCHECK(resources_.count(host->pp_resource()));
316 return resources_[host->pp_resource()];
317 } else {
318 // It's a pending host. This map should be small, so a linear search is OK.
319 PendingHostResourceMap::iterator iter = pending_resource_hosts_.begin();
320 for (; iter != pending_resource_hosts_.end(); ++iter) {
321 if (host == iter->second.get())
322 return iter->second;
323 }
324 }
325 NOTREACHED();
326 return linked_ptr<ResourceHost>();
282 } 327 }
283 328
284 } // namespace host 329 } // namespace host
285 } // namespace ppapi 330 } // namespace ppapi
OLDNEW
« no previous file with comments | « ppapi/host/ppapi_host.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698