Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(312)

Side by Side Diff: third_party/breakpad/src/client/windows/crash_generation/crash_generation_server.cc

Issue 624713003: Keep only base/extractor.[cc|h]. (Closed) Base URL: https://chromium.googlesource.com/external/omaha.git@master
Patch Set: Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright (c) 2008, Google Inc.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
13 // distribution.
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
17 //
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 #include "client/windows/crash_generation/crash_generation_server.h"
31 #include <windows.h>
32 #include <cassert>
33 #include <list>
34 #include "client/windows/common/auto_critical_section.h"
35 #include "processor/scoped_ptr.h"
36
37 #include "client/windows/crash_generation/client_info.h"
38
39 namespace google_breakpad {
40
41 // Output buffer size.
42 static const size_t kOutBufferSize = 64;
43
44 // Input buffer size.
45 static const size_t kInBufferSize = 64;
46
47 // Access flags for the client on the dump request event.
48 static const DWORD kDumpRequestEventAccess = EVENT_MODIFY_STATE;
49
50 // Access flags for the client on the dump generated event.
51 static const DWORD kDumpGeneratedEventAccess = EVENT_MODIFY_STATE |
52 SYNCHRONIZE;
53
54 // Access flags for the client on the mutex.
55 static const DWORD kMutexAccess = SYNCHRONIZE;
56
57 // Attribute flags for the pipe.
58 static const DWORD kPipeAttr = FILE_FLAG_FIRST_PIPE_INSTANCE |
59 PIPE_ACCESS_DUPLEX |
60 FILE_FLAG_OVERLAPPED;
61
62 // Mode for the pipe.
63 static const DWORD kPipeMode = PIPE_TYPE_MESSAGE |
64 PIPE_READMODE_MESSAGE |
65 PIPE_WAIT;
66
67 // For pipe I/O, execute the callback in the wait thread itself,
68 // since the callback does very little work. The callback executes
69 // the code for one of the states of the server state machine and
70 // the code for all of the states perform async I/O and hence
71 // finish very quickly.
72 static const ULONG kPipeIOThreadFlags = WT_EXECUTEINWAITTHREAD;
73
74 // Dump request threads will, most likely, generate dumps. That may
75 // take some time to finish, so specify WT_EXECUTELONGFUNCTION flag.
76 static const ULONG kDumpRequestThreadFlags = WT_EXECUTEINWAITTHREAD |
77 WT_EXECUTELONGFUNCTION;
78
79 // Maximum delay during server shutdown if some work items
80 // are still executing.
81 static const int kShutdownDelayMs = 10000;
82
83 // Interval for each sleep during server shutdown.
84 static const int kShutdownSleepIntervalMs = 5;
85
86 static bool IsClientRequestValid(const ProtocolMessage& msg) {
87 return msg.tag == MESSAGE_TAG_REGISTRATION_REQUEST &&
88 msg.pid != 0 &&
89 msg.thread_id != NULL &&
90 msg.exception_pointers != NULL &&
91 msg.assert_info != NULL;
92 }
93
94 CrashGenerationServer::CrashGenerationServer(
95 const std::wstring& pipe_name,
96 SECURITY_ATTRIBUTES* pipe_sec_attrs,
97 OnClientConnectedCallback connect_callback,
98 void* connect_context,
99 OnClientDumpRequestCallback dump_callback,
100 void* dump_context,
101 OnClientExitedCallback exit_callback,
102 void* exit_context,
103 bool generate_dumps,
104 const std::wstring* dump_path)
105 : pipe_name_(pipe_name),
106 pipe_sec_attrs_(pipe_sec_attrs),
107 pipe_(NULL),
108 pipe_wait_handle_(NULL),
109 server_alive_handle_(NULL),
110 connect_callback_(connect_callback),
111 connect_context_(connect_context),
112 dump_callback_(dump_callback),
113 dump_context_(dump_context),
114 exit_callback_(exit_callback),
115 exit_context_(exit_context),
116 generate_dumps_(generate_dumps),
117 dump_generator_(NULL),
118 server_state_(IPC_SERVER_STATE_UNINITIALIZED),
119 shutting_down_(false),
120 overlapped_(),
121 client_info_(NULL),
122 cleanup_item_count_(0) {
123 InitializeCriticalSection(&clients_sync_);
124
125 if (dump_path) {
126 dump_generator_.reset(new MinidumpGenerator(*dump_path));
127 }
128 }
129
130 CrashGenerationServer::~CrashGenerationServer() {
131 // Indicate to existing threads that server is shutting down.
132 shutting_down_ = true;
133
134 // Even if there are no current worker threads running, it is possible that
135 // an I/O request is pending on the pipe right now but not yet done. In fact,
136 // it's very likely this is the case unless we are in an ERROR state. If we
137 // don't wait for the pending I/O to be done, then when the I/O completes,
138 // it may write to invalid memory. AppVerifier will flag this problem too.
139 // So we disconnect from the pipe and then wait for the server to get into
140 // error state so that the pending I/O will fail and get cleared.
141 DisconnectNamedPipe(pipe_);
142 int num_tries = 100;
143 while (num_tries-- && server_state_ != IPC_SERVER_STATE_ERROR) {
144 Sleep(10);
145 }
146
147 // Unregister wait on the pipe.
148 if (pipe_wait_handle_) {
149 // Wait for already executing callbacks to finish.
150 UnregisterWaitEx(pipe_wait_handle_, INVALID_HANDLE_VALUE);
151 }
152
153 // Close the pipe to avoid further client connections.
154 if (pipe_) {
155 CloseHandle(pipe_);
156 }
157
158 // Request all ClientInfo objects to unregister all waits.
159 // New scope to hold the lock for the shortest time.
160 {
161 AutoCriticalSection lock(&clients_sync_);
162
163 std::list<ClientInfo*>::iterator iter;
164 for (iter = clients_.begin(); iter != clients_.end(); ++iter) {
165 ClientInfo* client_info = *iter;
166 client_info->UnregisterWaits();
167 }
168 }
169
170 // Now that all waits have been unregistered, wait for some time
171 // for all pending work items to finish.
172 int total_wait = 0;
173 while (cleanup_item_count_ > 0) {
174 Sleep(kShutdownSleepIntervalMs);
175
176 total_wait += kShutdownSleepIntervalMs;
177
178 if (total_wait >= kShutdownDelayMs) {
179 break;
180 }
181 }
182
183 // Clean up all the ClientInfo objects.
184 // New scope to hold the lock for the shortest time.
185 {
186 AutoCriticalSection lock(&clients_sync_);
187
188 std::list<ClientInfo*>::iterator iter;
189 for (iter = clients_.begin(); iter != clients_.end(); ++iter) {
190 ClientInfo* client_info = *iter;
191 delete client_info;
192 }
193 }
194
195 if (server_alive_handle_) {
196 // Release the mutex before closing the handle so that clients requesting
197 // dumps wait for a long time for the server to generate a dump.
198 ReleaseMutex(server_alive_handle_);
199 CloseHandle(server_alive_handle_);
200 }
201
202 if (overlapped_.hEvent) {
203 CloseHandle(overlapped_.hEvent);
204 }
205
206 DeleteCriticalSection(&clients_sync_);
207 }
208
209 bool CrashGenerationServer::Start() {
210 if (server_state_ != IPC_SERVER_STATE_UNINITIALIZED) {
211 return false;
212 }
213
214 server_state_ = IPC_SERVER_STATE_INITIAL;
215
216 server_alive_handle_ = CreateMutex(NULL, TRUE, NULL);
217 if (!server_alive_handle_) {
218 return false;
219 }
220
221 // Event to signal the client connection and pipe reads and writes.
222 overlapped_.hEvent = CreateEvent(NULL, // Security descriptor.
223 TRUE, // Manual reset.
224 FALSE, // Initially signaled.
225 NULL); // Name.
226 if (!overlapped_.hEvent) {
227 return false;
228 }
229
230 // Register a callback with the thread pool for the client connection.
231 if (!RegisterWaitForSingleObject(&pipe_wait_handle_,
232 overlapped_.hEvent,
233 OnPipeConnected,
234 this,
235 INFINITE,
236 kPipeIOThreadFlags)) {
237 return false;
238 }
239
240 pipe_ = CreateNamedPipe(pipe_name_.c_str(),
241 kPipeAttr,
242 kPipeMode,
243 1,
244 kOutBufferSize,
245 kInBufferSize,
246 0,
247 pipe_sec_attrs_);
248 if (pipe_ == INVALID_HANDLE_VALUE) {
249 return false;
250 }
251
252 // Kick-start the state machine. This will initiate an asynchronous wait
253 // for client connections.
254 HandleInitialState();
255
256 // If we are in error state, it's because we failed to start listening.
257 return server_state_ != IPC_SERVER_STATE_ERROR;
258 }
259
260 // If the server thread serving clients ever gets into the
261 // ERROR state, reset the event, close the pipe and remain
262 // in the error state forever. Error state means something
263 // that we didn't account for has happened, and it's dangerous
264 // to do anything unknowingly.
265 void CrashGenerationServer::HandleErrorState() {
266 assert(server_state_ == IPC_SERVER_STATE_ERROR);
267
268 // If the server is shutting down anyway, don't clean up
269 // here since shut down process will clean up.
270 if (shutting_down_) {
271 return;
272 }
273
274 if (pipe_wait_handle_) {
275 UnregisterWait(pipe_wait_handle_);
276 pipe_wait_handle_ = NULL;
277 }
278
279 if (pipe_) {
280 CloseHandle(pipe_);
281 pipe_ = NULL;
282 }
283
284 if (overlapped_.hEvent) {
285 CloseHandle(overlapped_.hEvent);
286 overlapped_.hEvent = NULL;
287 }
288 }
289
290 // When the server thread serving clients is in the INITIAL state,
291 // try to connect to the pipe asynchronously. If the connection
292 // finishes synchronously, directly go into the CONNECTED state;
293 // otherwise go into the CONNECTING state. For any problems, go
294 // into the ERROR state.
295 void CrashGenerationServer::HandleInitialState() {
296 assert(server_state_ == IPC_SERVER_STATE_INITIAL);
297
298 if (!ResetEvent(overlapped_.hEvent)) {
299 EnterErrorState();
300 return;
301 }
302
303 bool success = ConnectNamedPipe(pipe_, &overlapped_) != FALSE;
304 DWORD error_code = success ? ERROR_SUCCESS : GetLastError();
305
306 // From MSDN, it is not clear that when ConnectNamedPipe is used
307 // in an overlapped mode, will it ever return non-zero value, and
308 // if so, in what cases.
309 assert(!success);
310
311 switch (error_code) {
312 case ERROR_IO_PENDING:
313 EnterStateWhenSignaled(IPC_SERVER_STATE_CONNECTING);
314 break;
315
316 case ERROR_PIPE_CONNECTED:
317 EnterStateImmediately(IPC_SERVER_STATE_CONNECTED);
318 break;
319
320 default:
321 EnterErrorState();
322 break;
323 }
324 }
325
326 // When the server thread serving the clients is in the CONNECTING state,
327 // try to get the result of the asynchronous connection request using
328 // the OVERLAPPED object. If the result indicates the connection is done,
329 // go into the CONNECTED state. If the result indicates I/O is still
330 // INCOMPLETE, remain in the CONNECTING state. For any problems,
331 // go into the DISCONNECTING state.
332 void CrashGenerationServer::HandleConnectingState() {
333 assert(server_state_ == IPC_SERVER_STATE_CONNECTING);
334
335 DWORD bytes_count = 0;
336 bool success = GetOverlappedResult(pipe_,
337 &overlapped_,
338 &bytes_count,
339 FALSE) != FALSE;
340 DWORD error_code = success ? ERROR_SUCCESS : GetLastError();
341
342 if (success) {
343 EnterStateImmediately(IPC_SERVER_STATE_CONNECTED);
344 } else if (error_code != ERROR_IO_INCOMPLETE) {
345 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
346 } else {
347 // remain in CONNECTING state
348 }
349 }
350
351 // When the server thread serving the clients is in the CONNECTED state,
352 // try to issue an asynchronous read from the pipe. If read completes
353 // synchronously or if I/O is pending then go into the READING state.
354 // For any problems, go into the DISCONNECTING state.
355 void CrashGenerationServer::HandleConnectedState() {
356 assert(server_state_ == IPC_SERVER_STATE_CONNECTED);
357
358 DWORD bytes_count = 0;
359 memset(&msg_, 0, sizeof(msg_));
360 bool success = ReadFile(pipe_,
361 &msg_,
362 sizeof(msg_),
363 &bytes_count,
364 &overlapped_) != FALSE;
365 DWORD error_code = success ? ERROR_SUCCESS : GetLastError();
366
367 // Note that the asynchronous read issued above can finish before the
368 // code below executes. But, it is okay to change state after issuing
369 // the asynchronous read. This is because even if the asynchronous read
370 // is done, the callback for it would not be executed until the current
371 // thread finishes its execution.
372 if (success || error_code == ERROR_IO_PENDING) {
373 EnterStateWhenSignaled(IPC_SERVER_STATE_READING);
374 } else {
375 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
376 }
377 }
378
379 // When the server thread serving the clients is in the READING state,
380 // try to get the result of the async read. If async read is done,
381 // go into the READ_DONE state. For any problems, go into the
382 // DISCONNECTING state.
383 void CrashGenerationServer::HandleReadingState() {
384 assert(server_state_ == IPC_SERVER_STATE_READING);
385
386 DWORD bytes_count = 0;
387 bool success = GetOverlappedResult(pipe_,
388 &overlapped_,
389 &bytes_count,
390 FALSE) != FALSE;
391 DWORD error_code = success ? ERROR_SUCCESS : GetLastError();
392
393 if (success && bytes_count == sizeof(ProtocolMessage)) {
394 EnterStateImmediately(IPC_SERVER_STATE_READ_DONE);
395 } else {
396 // We should never get an I/O incomplete since we should not execute this
397 // unless the Read has finished and the overlapped event is signaled. If
398 // we do get INCOMPLETE, we have a bug in our code.
399 assert(error_code != ERROR_IO_INCOMPLETE);
400
401 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
402 }
403 }
404
405 // When the server thread serving the client is in the READ_DONE state,
406 // validate the client's request message, register the client by
407 // creating appropriate objects and prepare the response. Then try to
408 // write the response to the pipe asynchronously. If that succeeds,
409 // go into the WRITING state. For any problems, go into the DISCONNECTING
410 // state.
411 void CrashGenerationServer::HandleReadDoneState() {
412 assert(server_state_ == IPC_SERVER_STATE_READ_DONE);
413
414 if (!IsClientRequestValid(msg_)) {
415 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
416 return;
417 }
418
419 scoped_ptr<ClientInfo> client_info(
420 new ClientInfo(this,
421 msg_.pid,
422 msg_.dump_type,
423 msg_.thread_id,
424 msg_.exception_pointers,
425 msg_.assert_info,
426 msg_.custom_client_info));
427
428 if (!client_info->Initialize()) {
429 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
430 return;
431 }
432
433 // Issues an asynchronous WriteFile call if successful.
434 // Iff successful, assigns ownership of the client_info pointer to the server
435 // instance, in which case we must be sure not to free it in this function.
436 if (!RespondToClient(client_info.get())) {
437 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
438 return;
439 }
440
441 client_info_ = client_info.release();
442
443 // Note that the asynchronous write issued by RespondToClient function
444 // can finish before the code below executes. But it is okay to change
445 // state after issuing the asynchronous write. This is because even if
446 // the asynchronous write is done, the callback for it would not be
447 // executed until the current thread finishes its execution.
448 EnterStateWhenSignaled(IPC_SERVER_STATE_WRITING);
449 }
450
451 // When the server thread serving the clients is in the WRITING state,
452 // try to get the result of the async write. If the async write is done,
453 // go into the WRITE_DONE state. For any problems, go into the
454 // DISONNECTING state.
455 void CrashGenerationServer::HandleWritingState() {
456 assert(server_state_ == IPC_SERVER_STATE_WRITING);
457
458 DWORD bytes_count = 0;
459 bool success = GetOverlappedResult(pipe_,
460 &overlapped_,
461 &bytes_count,
462 FALSE) != FALSE;
463 DWORD error_code = success ? ERROR_SUCCESS : GetLastError();
464
465 if (success) {
466 EnterStateImmediately(IPC_SERVER_STATE_WRITE_DONE);
467 return;
468 }
469
470 // We should never get an I/O incomplete since we should not execute this
471 // unless the Write has finished and the overlapped event is signaled. If
472 // we do get INCOMPLETE, we have a bug in our code.
473 assert(error_code != ERROR_IO_INCOMPLETE);
474
475 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
476 }
477
478 // When the server thread serving the clients is in the WRITE_DONE state,
479 // try to issue an async read on the pipe. If the read completes synchronously
480 // or if I/O is still pending then go into the READING_ACK state. For any
481 // issues, go into the DISCONNECTING state.
482 void CrashGenerationServer::HandleWriteDoneState() {
483 assert(server_state_ == IPC_SERVER_STATE_WRITE_DONE);
484
485 DWORD bytes_count = 0;
486 bool success = ReadFile(pipe_,
487 &msg_,
488 sizeof(msg_),
489 &bytes_count,
490 &overlapped_) != FALSE;
491 DWORD error_code = success ? ERROR_SUCCESS : GetLastError();
492
493 if (success) {
494 EnterStateImmediately(IPC_SERVER_STATE_READING_ACK);
495 } else if (error_code == ERROR_IO_PENDING) {
496 EnterStateWhenSignaled(IPC_SERVER_STATE_READING_ACK);
497 } else {
498 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
499 }
500 }
501
502 // When the server thread serving the clients is in the READING_ACK state,
503 // try to get result of async read. Go into the DISCONNECTING state.
504 void CrashGenerationServer::HandleReadingAckState() {
505 assert(server_state_ == IPC_SERVER_STATE_READING_ACK);
506
507 DWORD bytes_count = 0;
508 bool success = GetOverlappedResult(pipe_,
509 &overlapped_,
510 &bytes_count,
511 FALSE) != FALSE;
512 DWORD error_code = success ? ERROR_SUCCESS : GetLastError();
513
514 if (success) {
515 // The connection handshake with the client is now complete; perform
516 // the callback.
517 if (connect_callback_) {
518 connect_callback_(connect_context_, client_info_);
519 }
520 } else {
521 // We should never get an I/O incomplete since we should not execute this
522 // unless the Read has finished and the overlapped event is signaled. If
523 // we do get INCOMPLETE, we have a bug in our code.
524 assert(error_code != ERROR_IO_INCOMPLETE);
525 }
526
527 EnterStateImmediately(IPC_SERVER_STATE_DISCONNECTING);
528 }
529
530 // When the server thread serving the client is in the DISCONNECTING state,
531 // disconnect from the pipe and reset the event. If anything fails, go into
532 // the ERROR state. If it goes well, go into the INITIAL state and set the
533 // event to start all over again.
534 void CrashGenerationServer::HandleDisconnectingState() {
535 assert(server_state_ == IPC_SERVER_STATE_DISCONNECTING);
536
537 // Done serving the client.
538 client_info_ = NULL;
539
540 overlapped_.Internal = NULL;
541 overlapped_.InternalHigh = NULL;
542 overlapped_.Offset = 0;
543 overlapped_.OffsetHigh = 0;
544 overlapped_.Pointer = NULL;
545
546 if (!ResetEvent(overlapped_.hEvent)) {
547 EnterErrorState();
548 return;
549 }
550
551 if (!DisconnectNamedPipe(pipe_)) {
552 EnterErrorState();
553 return;
554 }
555
556 // If the server is shutting down do not connect to the
557 // next client.
558 if (shutting_down_) {
559 return;
560 }
561
562 EnterStateImmediately(IPC_SERVER_STATE_INITIAL);
563 }
564
565 void CrashGenerationServer::EnterErrorState() {
566 SetEvent(overlapped_.hEvent);
567 server_state_ = IPC_SERVER_STATE_ERROR;
568 }
569
570 void CrashGenerationServer::EnterStateWhenSignaled(IPCServerState state) {
571 server_state_ = state;
572 }
573
574 void CrashGenerationServer::EnterStateImmediately(IPCServerState state) {
575 server_state_ = state;
576
577 if (!SetEvent(overlapped_.hEvent)) {
578 server_state_ = IPC_SERVER_STATE_ERROR;
579 }
580 }
581
582 bool CrashGenerationServer::PrepareReply(const ClientInfo& client_info,
583 ProtocolMessage* reply) const {
584 reply->tag = MESSAGE_TAG_REGISTRATION_RESPONSE;
585 reply->pid = GetCurrentProcessId();
586
587 if (CreateClientHandles(client_info, reply)) {
588 return true;
589 }
590
591 if (reply->dump_request_handle) {
592 CloseHandle(reply->dump_request_handle);
593 }
594
595 if (reply->dump_generated_handle) {
596 CloseHandle(reply->dump_generated_handle);
597 }
598
599 if (reply->server_alive_handle) {
600 CloseHandle(reply->server_alive_handle);
601 }
602
603 return false;
604 }
605
606 bool CrashGenerationServer::CreateClientHandles(const ClientInfo& client_info,
607 ProtocolMessage* reply) const {
608 HANDLE current_process = GetCurrentProcess();
609 if (!DuplicateHandle(current_process,
610 client_info.dump_requested_handle(),
611 client_info.process_handle(),
612 &reply->dump_request_handle,
613 kDumpRequestEventAccess,
614 FALSE,
615 0)) {
616 return false;
617 }
618
619 if (!DuplicateHandle(current_process,
620 client_info.dump_generated_handle(),
621 client_info.process_handle(),
622 &reply->dump_generated_handle,
623 kDumpGeneratedEventAccess,
624 FALSE,
625 0)) {
626 return false;
627 }
628
629 if (!DuplicateHandle(current_process,
630 server_alive_handle_,
631 client_info.process_handle(),
632 &reply->server_alive_handle,
633 kMutexAccess,
634 FALSE,
635 0)) {
636 return false;
637 }
638
639 return true;
640 }
641
642 bool CrashGenerationServer::RespondToClient(ClientInfo* client_info) {
643 ProtocolMessage reply;
644 if (!PrepareReply(*client_info, &reply)) {
645 return false;
646 }
647
648 DWORD bytes_count = 0;
649 bool success = WriteFile(pipe_,
650 &reply,
651 sizeof(reply),
652 &bytes_count,
653 &overlapped_) != FALSE;
654 DWORD error_code = success ? ERROR_SUCCESS : GetLastError();
655
656 if (!success && error_code != ERROR_IO_PENDING) {
657 return false;
658 }
659
660 // Takes over ownership of client_info. We MUST return true if AddClient
661 // succeeds.
662 if (!AddClient(client_info)) {
663 return false;
664 }
665
666 return true;
667 }
668
669 // The server thread servicing the clients runs this method. The method
670 // implements the state machine described in ReadMe.txt along with the
671 // helper methods HandleXXXState.
672 void CrashGenerationServer::HandleConnectionRequest() {
673 // If we are shutting doen then get into ERROR state, reset the event so more
674 // workers don't run and return immediately.
675 if (shutting_down_) {
676 server_state_ = IPC_SERVER_STATE_ERROR;
677 ResetEvent(overlapped_.hEvent);
678 return;
679 }
680
681 switch (server_state_) {
682 case IPC_SERVER_STATE_ERROR:
683 HandleErrorState();
684 break;
685
686 case IPC_SERVER_STATE_INITIAL:
687 HandleInitialState();
688 break;
689
690 case IPC_SERVER_STATE_CONNECTING:
691 HandleConnectingState();
692 break;
693
694 case IPC_SERVER_STATE_CONNECTED:
695 HandleConnectedState();
696 break;
697
698 case IPC_SERVER_STATE_READING:
699 HandleReadingState();
700 break;
701
702 case IPC_SERVER_STATE_READ_DONE:
703 HandleReadDoneState();
704 break;
705
706 case IPC_SERVER_STATE_WRITING:
707 HandleWritingState();
708 break;
709
710 case IPC_SERVER_STATE_WRITE_DONE:
711 HandleWriteDoneState();
712 break;
713
714 case IPC_SERVER_STATE_READING_ACK:
715 HandleReadingAckState();
716 break;
717
718 case IPC_SERVER_STATE_DISCONNECTING:
719 HandleDisconnectingState();
720 break;
721
722 default:
723 assert(false);
724 // This indicates that we added one more state without
725 // adding handling code.
726 server_state_ = IPC_SERVER_STATE_ERROR;
727 break;
728 }
729 }
730
731 bool CrashGenerationServer::AddClient(ClientInfo* client_info) {
732 HANDLE request_wait_handle = NULL;
733 if (!RegisterWaitForSingleObject(&request_wait_handle,
734 client_info->dump_requested_handle(),
735 OnDumpRequest,
736 client_info,
737 INFINITE,
738 kDumpRequestThreadFlags)) {
739 return false;
740 }
741
742 client_info->set_dump_request_wait_handle(request_wait_handle);
743
744 // OnClientEnd will be called when the client process terminates.
745 HANDLE process_wait_handle = NULL;
746 if (!RegisterWaitForSingleObject(&process_wait_handle,
747 client_info->process_handle(),
748 OnClientEnd,
749 client_info,
750 INFINITE,
751 WT_EXECUTEONLYONCE)) {
752 return false;
753 }
754
755 client_info->set_process_exit_wait_handle(process_wait_handle);
756
757 // New scope to hold the lock for the shortest time.
758 {
759 AutoCriticalSection lock(&clients_sync_);
760 clients_.push_back(client_info);
761 }
762
763 return true;
764 }
765
766 // static
767 void CALLBACK CrashGenerationServer::OnPipeConnected(void* context, BOOLEAN) {
768 assert(context);
769
770 CrashGenerationServer* obj =
771 reinterpret_cast<CrashGenerationServer*>(context);
772 obj->HandleConnectionRequest();
773 }
774
775 // static
776 void CALLBACK CrashGenerationServer::OnDumpRequest(void* context, BOOLEAN) {
777 assert(context);
778 ClientInfo* client_info = reinterpret_cast<ClientInfo*>(context);
779 client_info->PopulateCustomInfo();
780
781 CrashGenerationServer* crash_server = client_info->crash_server();
782 assert(crash_server);
783 crash_server->HandleDumpRequest(*client_info);
784
785 ResetEvent(client_info->dump_requested_handle());
786 }
787
788 // static
789 void CALLBACK CrashGenerationServer::OnClientEnd(void* context, BOOLEAN) {
790 assert(context);
791 ClientInfo* client_info = reinterpret_cast<ClientInfo*>(context);
792
793 CrashGenerationServer* crash_server = client_info->crash_server();
794 assert(crash_server);
795
796 InterlockedIncrement(&crash_server->cleanup_item_count_);
797
798 if (!QueueUserWorkItem(CleanupClient, context, WT_EXECUTEDEFAULT)) {
799 InterlockedDecrement(&crash_server->cleanup_item_count_);
800 }
801 }
802
803 // static
804 DWORD WINAPI CrashGenerationServer::CleanupClient(void* context) {
805 assert(context);
806 ClientInfo* client_info = reinterpret_cast<ClientInfo*>(context);
807
808 CrashGenerationServer* crash_server = client_info->crash_server();
809 assert(crash_server);
810
811 if (crash_server->exit_callback_) {
812 crash_server->exit_callback_(crash_server->exit_context_, client_info);
813 }
814
815 crash_server->DoCleanup(client_info);
816
817 InterlockedDecrement(&crash_server->cleanup_item_count_);
818 return 0;
819 }
820
821 void CrashGenerationServer::DoCleanup(ClientInfo* client_info) {
822 assert(client_info);
823
824 // Start a new scope to release lock automatically.
825 {
826 AutoCriticalSection lock(&clients_sync_);
827 clients_.remove(client_info);
828 }
829
830 delete client_info;
831 }
832
833 void CrashGenerationServer::HandleDumpRequest(const ClientInfo& client_info) {
834 // Generate the dump only if it's explicitly requested by the
835 // server application; otherwise the server might want to generate
836 // dump in the callback.
837 std::wstring dump_path;
838 if (generate_dumps_) {
839 if (!GenerateDump(client_info, &dump_path)) {
840 return;
841 }
842 }
843
844 if (dump_callback_) {
845 std::wstring* ptr_dump_path = (dump_path == L"") ? NULL : &dump_path;
846 dump_callback_(dump_context_, &client_info, ptr_dump_path);
847 }
848
849 SetEvent(client_info.dump_generated_handle());
850 }
851
852 bool CrashGenerationServer::GenerateDump(const ClientInfo& client,
853 std::wstring* dump_path) {
854 assert(client.pid() != 0);
855 assert(client.process_handle());
856
857 // We have to get the address of EXCEPTION_INFORMATION from
858 // the client process address space.
859 EXCEPTION_POINTERS* client_ex_info = NULL;
860 if (!client.GetClientExceptionInfo(&client_ex_info)) {
861 return false;
862 }
863
864 DWORD client_thread_id = 0;
865 if (!client.GetClientThreadId(&client_thread_id)) {
866 return false;
867 }
868
869 return dump_generator_->WriteMinidump(client.process_handle(),
870 client.pid(),
871 client_thread_id,
872 GetCurrentThreadId(),
873 client_ex_info,
874 client.assert_info(),
875 client.dump_type(),
876 true,
877 dump_path);
878 }
879
880 } // namespace google_breakpad
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698