| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include <stddef.h> | |
| 6 #include <stdint.h> | |
| 7 | |
| 8 #include "base/callback.h" | |
| 9 #include "base/logging.h" | |
| 10 #include "base/memory/scoped_ptr.h" | |
| 11 #include "base/stl_util.h" | |
| 12 #include "sandbox/win/src/crosscall_params.h" | |
| 13 #include "sandbox/win/src/crosscall_server.h" | |
| 14 #include "sandbox/win/src/sandbox.h" | |
| 15 #include "sandbox/win/src/sandbox_types.h" | |
| 16 #include "sandbox/win/src/sharedmem_ipc_client.h" | |
| 17 #include "sandbox/win/src/sharedmem_ipc_server.h" | |
| 18 | |
| 19 namespace { | |
| 20 // This handle must not be closed. | |
| 21 volatile HANDLE g_alive_mutex = NULL; | |
| 22 } | |
| 23 | |
| 24 namespace sandbox { | |
| 25 | |
| 26 SharedMemIPCServer::ServerControl::ServerControl() { | |
| 27 } | |
| 28 | |
| 29 SharedMemIPCServer::ServerControl::~ServerControl() { | |
| 30 } | |
| 31 | |
| 32 SharedMemIPCServer::SharedMemIPCServer(HANDLE target_process, | |
| 33 DWORD target_process_id, | |
| 34 ThreadProvider* thread_provider, | |
| 35 Dispatcher* dispatcher) | |
| 36 : client_control_(NULL), | |
| 37 thread_provider_(thread_provider), | |
| 38 target_process_(target_process), | |
| 39 target_process_id_(target_process_id), | |
| 40 call_dispatcher_(dispatcher) { | |
| 41 // We create a initially owned mutex. If the server dies unexpectedly, | |
| 42 // the thread that owns it will fail to release the lock and windows will | |
| 43 // report to the target (when it tries to acquire it) that the wait was | |
| 44 // abandoned. Note: We purposely leak the local handle because we want it to | |
| 45 // be closed by Windows itself so it is properly marked as abandoned if the | |
| 46 // server dies. | |
| 47 if (!g_alive_mutex) { | |
| 48 HANDLE mutex = ::CreateMutexW(NULL, TRUE, NULL); | |
| 49 if (::InterlockedCompareExchangePointer(&g_alive_mutex, mutex, NULL)) { | |
| 50 // We lost the race to create the mutex. | |
| 51 ::CloseHandle(mutex); | |
| 52 } | |
| 53 } | |
| 54 } | |
| 55 | |
| 56 SharedMemIPCServer::~SharedMemIPCServer() { | |
| 57 // Free the wait handles associated with the thread pool. | |
| 58 if (!thread_provider_->UnRegisterWaits(this)) { | |
| 59 // Better to leak than to crash. | |
| 60 return; | |
| 61 } | |
| 62 STLDeleteElements(&server_contexts_); | |
| 63 | |
| 64 if (client_control_) | |
| 65 ::UnmapViewOfFile(client_control_); | |
| 66 } | |
| 67 | |
| 68 bool SharedMemIPCServer::Init(void* shared_mem, | |
| 69 uint32_t shared_size, | |
| 70 uint32_t channel_size) { | |
| 71 // The shared memory needs to be at least as big as a channel. | |
| 72 if (shared_size < channel_size) { | |
| 73 return false; | |
| 74 } | |
| 75 // The channel size should be aligned. | |
| 76 if (0 != (channel_size % 32)) { | |
| 77 return false; | |
| 78 } | |
| 79 | |
| 80 // Calculate how many channels we can fit in the shared memory. | |
| 81 shared_size -= offsetof(IPCControl, channels); | |
| 82 size_t channel_count = shared_size / (sizeof(ChannelControl) + channel_size); | |
| 83 | |
| 84 // If we cannot fit even one channel we bail out. | |
| 85 if (0 == channel_count) { | |
| 86 return false; | |
| 87 } | |
| 88 // Calculate the start of the first channel. | |
| 89 size_t base_start = (sizeof(ChannelControl)* channel_count) + | |
| 90 offsetof(IPCControl, channels); | |
| 91 | |
| 92 client_control_ = reinterpret_cast<IPCControl*>(shared_mem); | |
| 93 client_control_->channels_count = 0; | |
| 94 | |
| 95 // This is the initialization that we do per-channel. Basically: | |
| 96 // 1) make two events (ping & pong) | |
| 97 // 2) create handles to the events for the client and the server. | |
| 98 // 3) initialize the channel (client_context) with the state. | |
| 99 // 4) initialize the server side of the channel (service_context). | |
| 100 // 5) call the thread provider RegisterWait to register the ping events. | |
| 101 for (size_t ix = 0; ix != channel_count; ++ix) { | |
| 102 ChannelControl* client_context = &client_control_->channels[ix]; | |
| 103 ServerControl* service_context = new ServerControl; | |
| 104 server_contexts_.push_back(service_context); | |
| 105 | |
| 106 if (!MakeEvents(&service_context->ping_event, | |
| 107 &service_context->pong_event, | |
| 108 &client_context->ping_event, | |
| 109 &client_context->pong_event)) { | |
| 110 return false; | |
| 111 } | |
| 112 | |
| 113 client_context->channel_base = base_start; | |
| 114 client_context->state = kFreeChannel; | |
| 115 | |
| 116 // Note that some of these values are available as members of this object | |
| 117 // but we put them again into the service_context because we will be called | |
| 118 // on a static method (ThreadPingEventReady). In particular, target_process_ | |
| 119 // is a raw handle that is not owned by this object (it's owned by the | |
| 120 // owner of this object), and we are storing it in multiple places. | |
| 121 service_context->shared_base = reinterpret_cast<char*>(shared_mem); | |
| 122 service_context->channel_size = channel_size; | |
| 123 service_context->channel = client_context; | |
| 124 service_context->channel_buffer = service_context->shared_base + | |
| 125 client_context->channel_base; | |
| 126 service_context->dispatcher = call_dispatcher_; | |
| 127 service_context->target_info.process = target_process_; | |
| 128 service_context->target_info.process_id = target_process_id_; | |
| 129 // Advance to the next channel. | |
| 130 base_start += channel_size; | |
| 131 // Register the ping event with the threadpool. | |
| 132 thread_provider_->RegisterWait(this, service_context->ping_event.Get(), | |
| 133 ThreadPingEventReady, service_context); | |
| 134 } | |
| 135 if (!::DuplicateHandle(::GetCurrentProcess(), g_alive_mutex, | |
| 136 target_process_, &client_control_->server_alive, | |
| 137 SYNCHRONIZE | EVENT_MODIFY_STATE, FALSE, 0)) { | |
| 138 return false; | |
| 139 } | |
| 140 // This last setting indicates to the client all is setup. | |
| 141 client_control_->channels_count = channel_count; | |
| 142 return true; | |
| 143 } | |
| 144 | |
| 145 // Releases memory allocated for IPC arguments, if needed. | |
| 146 void ReleaseArgs(const IPCParams* ipc_params, void* args[kMaxIpcParams]) { | |
| 147 for (size_t i = 0; i < kMaxIpcParams; i++) { | |
| 148 switch (ipc_params->args[i]) { | |
| 149 case WCHAR_TYPE: { | |
| 150 delete reinterpret_cast<base::string16*>(args[i]); | |
| 151 args[i] = NULL; | |
| 152 break; | |
| 153 } | |
| 154 case INOUTPTR_TYPE: { | |
| 155 delete reinterpret_cast<CountedBuffer*>(args[i]); | |
| 156 args[i] = NULL; | |
| 157 break; | |
| 158 } | |
| 159 default: break; | |
| 160 } | |
| 161 } | |
| 162 } | |
| 163 | |
| 164 // Fills up the list of arguments (args and ipc_params) for an IPC call. | |
| 165 bool GetArgs(CrossCallParamsEx* params, IPCParams* ipc_params, | |
| 166 void* args[kMaxIpcParams]) { | |
| 167 if (kMaxIpcParams < params->GetParamsCount()) | |
| 168 return false; | |
| 169 | |
| 170 for (uint32_t i = 0; i < params->GetParamsCount(); i++) { | |
| 171 uint32_t size; | |
| 172 ArgType type; | |
| 173 args[i] = params->GetRawParameter(i, &size, &type); | |
| 174 if (args[i]) { | |
| 175 ipc_params->args[i] = type; | |
| 176 switch (type) { | |
| 177 case WCHAR_TYPE: { | |
| 178 scoped_ptr<base::string16> data(new base::string16); | |
| 179 if (!params->GetParameterStr(i, data.get())) { | |
| 180 args[i] = 0; | |
| 181 ReleaseArgs(ipc_params, args); | |
| 182 return false; | |
| 183 } | |
| 184 args[i] = data.release(); | |
| 185 break; | |
| 186 } | |
| 187 case UINT32_TYPE: { | |
| 188 uint32_t data; | |
| 189 if (!params->GetParameter32(i, &data)) { | |
| 190 ReleaseArgs(ipc_params, args); | |
| 191 return false; | |
| 192 } | |
| 193 IPCInt ipc_int(data); | |
| 194 args[i] = ipc_int.AsVoidPtr(); | |
| 195 break; | |
| 196 } | |
| 197 case VOIDPTR_TYPE : { | |
| 198 void* data; | |
| 199 if (!params->GetParameterVoidPtr(i, &data)) { | |
| 200 ReleaseArgs(ipc_params, args); | |
| 201 return false; | |
| 202 } | |
| 203 args[i] = data; | |
| 204 break; | |
| 205 } | |
| 206 case INOUTPTR_TYPE: { | |
| 207 if (!args[i]) { | |
| 208 ReleaseArgs(ipc_params, args); | |
| 209 return false; | |
| 210 } | |
| 211 CountedBuffer* buffer = new CountedBuffer(args[i] , size); | |
| 212 args[i] = buffer; | |
| 213 break; | |
| 214 } | |
| 215 default: break; | |
| 216 } | |
| 217 } | |
| 218 } | |
| 219 return true; | |
| 220 } | |
| 221 | |
| 222 bool SharedMemIPCServer::InvokeCallback(const ServerControl* service_context, | |
| 223 void* ipc_buffer, | |
| 224 CrossCallReturn* call_result) { | |
| 225 // Set the default error code; | |
| 226 SetCallError(SBOX_ERROR_INVALID_IPC, call_result); | |
| 227 uint32_t output_size = 0; | |
| 228 // Parse, verify and copy the message. The handler operates on a copy | |
| 229 // of the message so the client cannot play dirty tricks by changing the | |
| 230 // data in the channel while the IPC is being processed. | |
| 231 scoped_ptr<CrossCallParamsEx> params( | |
| 232 CrossCallParamsEx::CreateFromBuffer(ipc_buffer, | |
| 233 service_context->channel_size, | |
| 234 &output_size)); | |
| 235 if (!params.get()) | |
| 236 return false; | |
| 237 | |
| 238 uint32_t tag = params->GetTag(); | |
| 239 static_assert(0 == INVALID_TYPE, "incorrect type enum"); | |
| 240 IPCParams ipc_params = {0}; | |
| 241 ipc_params.ipc_tag = tag; | |
| 242 | |
| 243 void* args[kMaxIpcParams]; | |
| 244 if (!GetArgs(params.get(), &ipc_params, args)) | |
| 245 return false; | |
| 246 | |
| 247 IPCInfo ipc_info = {0}; | |
| 248 ipc_info.ipc_tag = tag; | |
| 249 ipc_info.client_info = &service_context->target_info; | |
| 250 Dispatcher* dispatcher = service_context->dispatcher; | |
| 251 DCHECK(dispatcher); | |
| 252 bool error = true; | |
| 253 Dispatcher* handler = NULL; | |
| 254 | |
| 255 Dispatcher::CallbackGeneric callback_generic; | |
| 256 handler = dispatcher->OnMessageReady(&ipc_params, &callback_generic); | |
| 257 if (handler) { | |
| 258 switch (params->GetParamsCount()) { | |
| 259 case 0: { | |
| 260 // Ask the IPC dispatcher if she can service this IPC. | |
| 261 Dispatcher::Callback0 callback = | |
| 262 reinterpret_cast<Dispatcher::Callback0>(callback_generic); | |
| 263 if (!(handler->*callback)(&ipc_info)) | |
| 264 break; | |
| 265 error = false; | |
| 266 break; | |
| 267 } | |
| 268 case 1: { | |
| 269 Dispatcher::Callback1 callback = | |
| 270 reinterpret_cast<Dispatcher::Callback1>(callback_generic); | |
| 271 if (!(handler->*callback)(&ipc_info, args[0])) | |
| 272 break; | |
| 273 error = false; | |
| 274 break; | |
| 275 } | |
| 276 case 2: { | |
| 277 Dispatcher::Callback2 callback = | |
| 278 reinterpret_cast<Dispatcher::Callback2>(callback_generic); | |
| 279 if (!(handler->*callback)(&ipc_info, args[0], args[1])) | |
| 280 break; | |
| 281 error = false; | |
| 282 break; | |
| 283 } | |
| 284 case 3: { | |
| 285 Dispatcher::Callback3 callback = | |
| 286 reinterpret_cast<Dispatcher::Callback3>(callback_generic); | |
| 287 if (!(handler->*callback)(&ipc_info, args[0], args[1], args[2])) | |
| 288 break; | |
| 289 error = false; | |
| 290 break; | |
| 291 } | |
| 292 case 4: { | |
| 293 Dispatcher::Callback4 callback = | |
| 294 reinterpret_cast<Dispatcher::Callback4>(callback_generic); | |
| 295 if (!(handler->*callback)(&ipc_info, args[0], args[1], args[2], | |
| 296 args[3])) | |
| 297 break; | |
| 298 error = false; | |
| 299 break; | |
| 300 } | |
| 301 case 5: { | |
| 302 Dispatcher::Callback5 callback = | |
| 303 reinterpret_cast<Dispatcher::Callback5>(callback_generic); | |
| 304 if (!(handler->*callback)(&ipc_info, args[0], args[1], args[2], args[3], | |
| 305 args[4])) | |
| 306 break; | |
| 307 error = false; | |
| 308 break; | |
| 309 } | |
| 310 case 6: { | |
| 311 Dispatcher::Callback6 callback = | |
| 312 reinterpret_cast<Dispatcher::Callback6>(callback_generic); | |
| 313 if (!(handler->*callback)(&ipc_info, args[0], args[1], args[2], args[3], | |
| 314 args[4], args[5])) | |
| 315 break; | |
| 316 error = false; | |
| 317 break; | |
| 318 } | |
| 319 case 7: { | |
| 320 Dispatcher::Callback7 callback = | |
| 321 reinterpret_cast<Dispatcher::Callback7>(callback_generic); | |
| 322 if (!(handler->*callback)(&ipc_info, args[0], args[1], args[2], args[3], | |
| 323 args[4], args[5], args[6])) | |
| 324 break; | |
| 325 error = false; | |
| 326 break; | |
| 327 } | |
| 328 case 8: { | |
| 329 Dispatcher::Callback8 callback = | |
| 330 reinterpret_cast<Dispatcher::Callback8>(callback_generic); | |
| 331 if (!(handler->*callback)(&ipc_info, args[0], args[1], args[2], args[3], | |
| 332 args[4], args[5], args[6], args[7])) | |
| 333 break; | |
| 334 error = false; | |
| 335 break; | |
| 336 } | |
| 337 case 9: { | |
| 338 Dispatcher::Callback9 callback = | |
| 339 reinterpret_cast<Dispatcher::Callback9>(callback_generic); | |
| 340 if (!(handler->*callback)(&ipc_info, args[0], args[1], args[2], args[3], | |
| 341 args[4], args[5], args[6], args[7], args[8])) | |
| 342 break; | |
| 343 error = false; | |
| 344 break; | |
| 345 } | |
| 346 default: { | |
| 347 NOTREACHED(); | |
| 348 break; | |
| 349 } | |
| 350 } | |
| 351 } | |
| 352 | |
| 353 if (error) { | |
| 354 if (handler) | |
| 355 SetCallError(SBOX_ERROR_FAILED_IPC, call_result); | |
| 356 } else { | |
| 357 memcpy(call_result, &ipc_info.return_info, sizeof(*call_result)); | |
| 358 SetCallSuccess(call_result); | |
| 359 if (params->IsInOut()) { | |
| 360 // Maybe the params got changed by the broker. We need to upadte the | |
| 361 // memory section. | |
| 362 memcpy(ipc_buffer, params.get(), output_size); | |
| 363 } | |
| 364 } | |
| 365 | |
| 366 ReleaseArgs(&ipc_params, args); | |
| 367 | |
| 368 return !error; | |
| 369 } | |
| 370 | |
| 371 // This function gets called by a thread from the thread pool when a | |
| 372 // ping event fires. The context is the same as passed in the RegisterWait() | |
| 373 // call above. | |
| 374 void __stdcall SharedMemIPCServer::ThreadPingEventReady(void* context, | |
| 375 unsigned char) { | |
| 376 if (NULL == context) { | |
| 377 DCHECK(false); | |
| 378 return; | |
| 379 } | |
| 380 ServerControl* service_context = reinterpret_cast<ServerControl*>(context); | |
| 381 // Since the event fired, the channel *must* be busy. Change to kAckChannel | |
| 382 // while we service it. | |
| 383 LONG last_state = | |
| 384 ::InterlockedCompareExchange(&service_context->channel->state, | |
| 385 kAckChannel, kBusyChannel); | |
| 386 if (kBusyChannel != last_state) { | |
| 387 DCHECK(false); | |
| 388 return; | |
| 389 } | |
| 390 | |
| 391 // Prepare the result structure. At this point we will return some result | |
| 392 // even if the IPC is invalid, malformed or has no handler. | |
| 393 CrossCallReturn call_result = {0}; | |
| 394 void* buffer = service_context->channel_buffer; | |
| 395 | |
| 396 InvokeCallback(service_context, buffer, &call_result); | |
| 397 | |
| 398 // Copy the answer back into the channel and signal the pong event. This | |
| 399 // should wake up the client so he can finish the the ipc cycle. | |
| 400 CrossCallParams* call_params = reinterpret_cast<CrossCallParams*>(buffer); | |
| 401 memcpy(call_params->GetCallReturn(), &call_result, sizeof(call_result)); | |
| 402 ::InterlockedExchange(&service_context->channel->state, kAckChannel); | |
| 403 ::SetEvent(service_context->pong_event.Get()); | |
| 404 } | |
| 405 | |
| 406 bool SharedMemIPCServer::MakeEvents(base::win::ScopedHandle* server_ping, | |
| 407 base::win::ScopedHandle* server_pong, | |
| 408 HANDLE* client_ping, HANDLE* client_pong) { | |
| 409 // Note that the IPC client has no right to delete the events. That would | |
| 410 // cause problems. The server *owns* the events. | |
| 411 const DWORD kDesiredAccess = SYNCHRONIZE | EVENT_MODIFY_STATE; | |
| 412 | |
| 413 // The events are auto reset, and start not signaled. | |
| 414 server_ping->Set(::CreateEventW(NULL, FALSE, FALSE, NULL)); | |
| 415 if (!::DuplicateHandle(::GetCurrentProcess(), server_ping->Get(), | |
| 416 target_process_, client_ping, kDesiredAccess, FALSE, | |
| 417 0)) { | |
| 418 return false; | |
| 419 } | |
| 420 | |
| 421 server_pong->Set(::CreateEventW(NULL, FALSE, FALSE, NULL)); | |
| 422 if (!::DuplicateHandle(::GetCurrentProcess(), server_pong->Get(), | |
| 423 target_process_, client_pong, kDesiredAccess, FALSE, | |
| 424 0)) { | |
| 425 return false; | |
| 426 } | |
| 427 return true; | |
| 428 } | |
| 429 | |
| 430 } // namespace sandbox | |
| OLD | NEW |