Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(66)

Side by Side Diff: ppapi/native_client/src/trusted/plugin/service_runtime.cc

Issue 338353008: NaCl: clean up nexe loading logic in trusted plugin. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 6 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 * Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be 3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file. 4 * found in the LICENSE file.
5 */ 5 */
6 6
7 #define NACL_LOG_MODULE_NAME "Plugin_ServiceRuntime" 7 #define NACL_LOG_MODULE_NAME "Plugin_ServiceRuntime"
8 8
9 #include "ppapi/native_client/src/trusted/plugin/service_runtime.h" 9 #include "ppapi/native_client/src/trusted/plugin/service_runtime.h"
10 10
(...skipping 418 matching lines...) Expand 10 before | Expand all | Expand 10 after
429 429
430 void PluginReverseInterface::ReportExitStatus(int exit_status) { 430 void PluginReverseInterface::ReportExitStatus(int exit_status) {
431 service_runtime_->set_exit_status(exit_status); 431 service_runtime_->set_exit_status(exit_status);
432 } 432 }
433 433
434 int64_t PluginReverseInterface::RequestQuotaForWrite( 434 int64_t PluginReverseInterface::RequestQuotaForWrite(
435 nacl::string file_id, int64_t offset, int64_t bytes_to_write) { 435 nacl::string file_id, int64_t offset, int64_t bytes_to_write) {
436 return bytes_to_write; 436 return bytes_to_write;
437 } 437 }
438 438
439 // Thin wrapper for the arguments of LoadNexeAndStart(), as WeakRefNewCallback
440 // can take only one argument. Also, this dtor has the responsibility to invoke
441 // callbacks on destruction.
442 struct ServiceRuntime::LoadNexeAndStartData {
443 explicit LoadNexeAndStartData(const pp::CompletionCallback& callback)
444 : callback(callback) {
445 }
446
447 ~LoadNexeAndStartData() {
448 // We must call the callbacks here if they are not yet called, otherwise
449 // the resource would be leaked.
450 if (callback.pp_completion_callback().func)
451 callback.RunAndClear(PP_ERROR_ABORTED);
452 }
453
454 // On success path, this must be invoked manually. Otherwise the dtor would
455 // invoke callbacks with error code unexpectedly.
456 void Clear() {
457 callback = pp::CompletionCallback();
458 }
459
460 pp::CompletionCallback callback;
461 };
462
463 ServiceRuntime::ServiceRuntime(Plugin* plugin, 439 ServiceRuntime::ServiceRuntime(Plugin* plugin,
464 bool main_service_runtime, 440 bool main_service_runtime,
465 bool uses_nonsfi_mode, 441 bool uses_nonsfi_mode,
466 pp::CompletionCallback init_done_cb, 442 pp::CompletionCallback init_done_cb,
467 pp::CompletionCallback crash_cb) 443 pp::CompletionCallback crash_cb)
468 : plugin_(plugin), 444 : plugin_(plugin),
469 main_service_runtime_(main_service_runtime), 445 main_service_runtime_(main_service_runtime),
470 uses_nonsfi_mode_(uses_nonsfi_mode), 446 uses_nonsfi_mode_(uses_nonsfi_mode),
471 reverse_service_(NULL), 447 reverse_service_(NULL),
472 anchor_(new nacl::WeakRefAnchor()), 448 anchor_(new nacl::WeakRefAnchor()),
473 rev_interface_(new PluginReverseInterface(anchor_, plugin, this, 449 rev_interface_(new PluginReverseInterface(anchor_, plugin, this,
474 init_done_cb, crash_cb)), 450 init_done_cb, crash_cb)),
475 start_sel_ldr_done_(false), 451 start_sel_ldr_done_(false),
476 nexe_started_(false) { 452 nexe_started_(false),
453 nexe_started_ok_(false) {
477 NaClSrpcChannelInitialize(&command_channel_); 454 NaClSrpcChannelInitialize(&command_channel_);
478 NaClXMutexCtor(&mu_); 455 NaClXMutexCtor(&mu_);
479 NaClXCondVarCtor(&cond_); 456 NaClXCondVarCtor(&cond_);
480 } 457 }
481 458
482 void ServiceRuntime::LoadNexeAndStartAfterLoadModule(
483 LoadNexeAndStartData* data, int32_t pp_error) {
484 if (pp_error != PP_OK) {
485 DidLoadNexeAndStart(data, pp_error);
486 return;
487 }
488
489 // Here, LoadModule is successfully done. So the remaining task is just
490 // calling StartModule(), here.
491 DidLoadNexeAndStart(data, StartModule() ? PP_OK : PP_ERROR_FAILED);
492 }
493
494 void ServiceRuntime::DidLoadNexeAndStart(
495 LoadNexeAndStartData* data, int32_t pp_error) {
496 if (pp_error == PP_OK) {
497 NaClLog(4, "ServiceRuntime::LoadNexeAndStart (success)\n");
hidehiko 2014/06/18 05:07:04 Probably we want to keep this lo, too.
498 } else {
499 // On a load failure the service runtime does not crash itself to
500 // avoid a race where the no-more-senders error on the reverse
501 // channel esrvice thread might cause the crash-detection logic to
502 // kick in before the start_module RPC reply has been received. So
503 // we induce a service runtime crash here. We do not release
504 // subprocess_ since it's needed to collect crash log output after
505 // the error is reported.
506 Log(LOG_FATAL, "reap logs");
507 if (NULL == reverse_service_) {
508 // No crash detector thread.
509 NaClLog(LOG_ERROR, "scheduling to get crash log\n");
510 // Invoking rev_interface's method is workaround to avoid crash_cb
511 // gets called twice or more. We should clean this up later.
512 rev_interface_->ReportCrash();
513 NaClLog(LOG_ERROR, "should fire soon\n");
514 } else {
515 NaClLog(LOG_ERROR, "Reverse service thread will pick up crash log\n");
516 }
517 }
518
519 pp::Module::Get()->core()->CallOnMainThread(0, data->callback, pp_error);
520
521 // Because the ownership of data is taken by caller, we must clear it
522 // manually here. Otherwise, its dtor invokes callbacks again.
523 data->Clear();
524 }
525
526 bool ServiceRuntime::SetupCommandChannel() { 459 bool ServiceRuntime::SetupCommandChannel() {
527 NaClLog(4, "ServiceRuntime::SetupCommand (this=%p, subprocess=%p)\n", 460 NaClLog(4, "ServiceRuntime::SetupCommand (this=%p, subprocess=%p)\n",
528 static_cast<void*>(this), 461 static_cast<void*>(this),
529 static_cast<void*>(subprocess_.get())); 462 static_cast<void*>(subprocess_.get()));
530 if (!subprocess_->SetupCommand(&command_channel_)) { 463 if (!subprocess_->SetupCommand(&command_channel_)) {
531 if (main_service_runtime_) { 464 if (main_service_runtime_) {
532 ErrorInfo error_info; 465 ErrorInfo error_info;
533 error_info.SetReport(PP_NACL_ERROR_SEL_LDR_COMMUNICATION_CMD_CHANNEL, 466 error_info.SetReport(PP_NACL_ERROR_SEL_LDR_COMMUNICATION_CMD_CHANNEL,
534 "ServiceRuntime: command channel creation failed"); 467 "ServiceRuntime: command channel creation failed");
535 plugin_->ReportLoadError(error_info); 468 plugin_->ReportLoadError(error_info);
536 } 469 }
537 return false; 470 return false;
538 } 471 }
539 return true; 472 return true;
540 } 473 }
541 474
542 void ServiceRuntime::LoadModule(PP_NaClFileInfo file_info,
543 pp::CompletionCallback callback) {
544 NaClFileInfo nacl_file_info;
545 nacl_file_info.desc = ConvertFileDescriptor(file_info.handle, true);
546 nacl_file_info.file_token.lo = file_info.token_lo;
547 nacl_file_info.file_token.hi = file_info.token_hi;
548 NaClDesc* desc = NaClDescIoFromFileInfo(nacl_file_info, O_RDONLY);
549 if (desc == NULL) {
550 DidLoadModule(callback, PP_ERROR_FAILED);
551 return;
552 }
553
554 // We don't use a scoped_ptr here since we would immediately release the
555 // DescWrapper to LoadModule().
556 nacl::DescWrapper* wrapper =
557 plugin_->wrapper_factory()->MakeGenericCleanup(desc);
558
559 // TODO(teravest, hidehiko): Replace this by Chrome IPC.
560 bool result = subprocess_->LoadModule(&command_channel_, wrapper);
561 DidLoadModule(callback, result ? PP_OK : PP_ERROR_FAILED);
562 }
563
564 void ServiceRuntime::DidLoadModule(pp::CompletionCallback callback,
565 int32_t pp_error) {
566 if (pp_error != PP_OK) {
567 ErrorInfo error_info;
568 error_info.SetReport(PP_NACL_ERROR_SEL_LDR_COMMUNICATION_CMD_CHANNEL,
569 "ServiceRuntime: load module failed");
570 plugin_->ReportLoadError(error_info);
571 }
572 callback.Run(pp_error);
573 }
574
575 bool ServiceRuntime::InitReverseService() { 475 bool ServiceRuntime::InitReverseService() {
576 if (uses_nonsfi_mode_) { 476 if (uses_nonsfi_mode_) {
577 // In non-SFI mode, no reverse service is set up. Just returns success. 477 // In non-SFI mode, no reverse service is set up. Just returns success.
578 return true; 478 return true;
579 } 479 }
580 480
581 // Hook up the reverse service channel. We are the IMC client, but 481 // Hook up the reverse service channel. We are the IMC client, but
582 // provide SRPC service. 482 // provide SRPC service.
583 NaClDesc* out_conn_cap; 483 NaClDesc* out_conn_cap;
584 NaClSrpcResultCodes rpc_result = 484 NaClSrpcResultCodes rpc_result =
(...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after
738 } 638 }
739 return start_sel_ldr_done_; 639 return start_sel_ldr_done_;
740 } 640 }
741 641
742 void ServiceRuntime::SignalStartSelLdrDone() { 642 void ServiceRuntime::SignalStartSelLdrDone() {
743 nacl::MutexLocker take(&mu_); 643 nacl::MutexLocker take(&mu_);
744 start_sel_ldr_done_ = true; 644 start_sel_ldr_done_ = true;
745 NaClXCondVarSignal(&cond_); 645 NaClXCondVarSignal(&cond_);
746 } 646 }
747 647
748 void ServiceRuntime::WaitForNexeStart() { 648 bool ServiceRuntime::WaitForNexeStart() {
749 nacl::MutexLocker take(&mu_); 649 nacl::MutexLocker take(&mu_);
750 while (!nexe_started_) 650 while (!nexe_started_)
751 NaClXCondVarWait(&cond_, &mu_); 651 NaClXCondVarWait(&cond_, &mu_);
752 // Reset nexe_started_ here in case we run again. 652 return nexe_started_ok_;
753 nexe_started_ = false;
754 } 653 }
755 654
756 void ServiceRuntime::SignalNexeStarted() { 655 void ServiceRuntime::SignalNexeStarted(bool ok) {
757 nacl::MutexLocker take(&mu_); 656 nacl::MutexLocker take(&mu_);
758 nexe_started_ = true; 657 nexe_started_ = true;
hidehiko 2014/06/18 05:07:04 Probably we should rename this variable (and the m
Nick Bray (chromium) 2014/06/18 18:29:44 Done, but please provide suggestions when names ar
658 nexe_started_ok_ = ok;
759 NaClXCondVarSignal(&cond_); 659 NaClXCondVarSignal(&cond_);
760 } 660 }
761 661
762 void ServiceRuntime::LoadNexeAndStart(PP_NaClFileInfo file_info, 662 void ServiceRuntime::LoadNexeAndStart(PP_NaClFileInfo file_info) {
763 const pp::CompletionCallback& callback) {
764 NaClLog(4, "ServiceRuntime::LoadNexeAndStart (handle_valid=%d " 663 NaClLog(4, "ServiceRuntime::LoadNexeAndStart (handle_valid=%d "
765 "token_lo=%" NACL_PRIu64 " token_hi=%" NACL_PRIu64 ")\n", 664 "token_lo=%" NACL_PRIu64 " token_hi=%" NACL_PRIu64 ")\n",
766 file_info.handle != PP_kInvalidFileHandle, 665 file_info.handle != PP_kInvalidFileHandle,
767 file_info.token_lo, 666 file_info.token_lo,
768 file_info.token_hi); 667 file_info.token_hi);
769 668
770 nacl::scoped_ptr<LoadNexeAndStartData> data( 669 bool ok = LoadNexeAndStartInternal(file_info);
771 new LoadNexeAndStartData(callback)); 670 if (!ok) {
772 if (!SetupCommandChannel() || !InitReverseService()) { 671 ReapLogs();
773 DidLoadNexeAndStart(data.get(), PP_ERROR_FAILED);
774 return;
775 } 672 }
673 // This only matters if a background thread is waiting, but we signal in all
674 // cases to simplify the code.
675 SignalNexeStarted(ok);
676 }
776 677
777 LoadModule( 678 bool ServiceRuntime::LoadNexeAndStartInternal(PP_NaClFileInfo file_info) {
778 file_info, 679 if(!SetupCommandChannel()) {
779 WeakRefNewCallback(anchor_, 680 return false;
780 this, 681 }
781 &ServiceRuntime::LoadNexeAndStartAfterLoadModule, 682 if (!InitReverseService()) {
782 data.release())); // Delegate the ownership. 683 return false;
684 }
685 NaClFileInfo nacl_file_info;
686 nacl_file_info.desc = ConvertFileDescriptor(file_info.handle, true);
687 nacl_file_info.file_token.lo = file_info.token_lo;
688 nacl_file_info.file_token.hi = file_info.token_hi;
689 NaClDesc* desc = NaClDescIoFromFileInfo(nacl_file_info, O_RDONLY);
690 if (desc == NULL) {
691 // TODO(ncbray): better error reporting?
692 ErrorInfo error_info;
hidehiko 2014/06/18 05:07:04 Can we avoid dup? Probably, we should refactor Lo
Nick Bray (chromium) 2014/06/18 18:29:44 Done. Good call. I am not 100% happy with report
693 error_info.SetReport(PP_NACL_ERROR_SEL_LDR_COMMUNICATION_CMD_CHANNEL,
694 "ServiceRuntime: load module failed");
695 plugin_->ReportLoadError(error_info);
696 return false;
697 }
698 // We don't use a scoped_ptr here since we would immediately release the
699 // DescWrapper to LoadModule().
700 nacl::DescWrapper* wrapper =
701 plugin_->wrapper_factory()->MakeGenericCleanup(desc);
702 // TODO(teravest, hidehiko): Replace this by Chrome IPC.
703 if (!subprocess_->LoadModule(&command_channel_, wrapper)) {
704 ErrorInfo error_info;
705 error_info.SetReport(PP_NACL_ERROR_SEL_LDR_COMMUNICATION_CMD_CHANNEL,
706 "ServiceRuntime: load module failed");
707 plugin_->ReportLoadError(error_info);
708 return false;
709 }
710 if (!StartModule()) {
teravest 2014/06/18 01:37:52 We had initially planned on LoadModule() being asy
Nick Bray (chromium) 2014/06/18 02:02:38 I think the rest of the cleanup is still good (?),
711 return false;
712 }
713 return true;
714 }
715
716 void ServiceRuntime::ReapLogs() {
717 // On a load failure the service runtime does not crash itself to
718 // avoid a race where the no-more-senders error on the reverse
719 // channel esrvice thread might cause the crash-detection logic to
720 // kick in before the start_module RPC reply has been received. So
721 // we induce a service runtime crash here. We do not release
722 // subprocess_ since it's needed to collect crash log output after
723 // the error is reported.
724 NaClLog(LOG_ERROR, "reap logs\n");
hidehiko 2014/06/18 05:07:04 Why not fatal?
Nick Bray (chromium) 2014/06/18 18:29:44 Oh. This is even more subtle than I realized. It
725 if (NULL == reverse_service_) {
726 // No crash detector thread.
727 NaClLog(LOG_ERROR, "scheduling to get crash log\n");
728 // Invoking rev_interface's method is workaround to avoid crash_cb
729 // gets called twice or more. We should clean this up later.
730 rev_interface_->ReportCrash();
731 NaClLog(LOG_ERROR, "should fire soon\n");
732 } else {
733 NaClLog(LOG_ERROR, "Reverse service thread will pick up crash log\n");
734 }
783 } 735 }
784 736
785 SrpcClient* ServiceRuntime::SetupAppChannel() { 737 SrpcClient* ServiceRuntime::SetupAppChannel() {
786 NaClLog(4, "ServiceRuntime::SetupAppChannel (subprocess_=%p)\n", 738 NaClLog(4, "ServiceRuntime::SetupAppChannel (subprocess_=%p)\n",
787 reinterpret_cast<void*>(subprocess_.get())); 739 reinterpret_cast<void*>(subprocess_.get()));
788 nacl::DescWrapper* connect_desc = subprocess_->socket_addr()->Connect(); 740 nacl::DescWrapper* connect_desc = subprocess_->socket_addr()->Connect();
789 if (NULL == connect_desc) { 741 if (NULL == connect_desc) {
790 NaClLog(LOG_ERROR, "ServiceRuntime::SetupAppChannel (connect failed)\n"); 742 NaClLog(LOG_ERROR, "ServiceRuntime::SetupAppChannel (connect failed)\n");
791 return NULL; 743 return NULL;
792 } else { 744 } else {
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
859 811
860 nacl::string ServiceRuntime::GetCrashLogOutput() { 812 nacl::string ServiceRuntime::GetCrashLogOutput() {
861 if (NULL != subprocess_.get()) { 813 if (NULL != subprocess_.get()) {
862 return subprocess_->GetCrashLogOutput(); 814 return subprocess_->GetCrashLogOutput();
863 } else { 815 } else {
864 return std::string(); 816 return std::string();
865 } 817 }
866 } 818 }
867 819
868 } // namespace plugin 820 } // namespace plugin
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698