 Chromium Code Reviews
 Chromium Code Reviews Issue 879233003:
  Initial RemoteCommandService  (Closed) 
  Base URL: https://chromium.googlesource.com/chromium/src.git@remote-commands
    
  
    Issue 879233003:
  Initial RemoteCommandService  (Closed) 
  Base URL: https://chromium.googlesource.com/chromium/src.git@remote-commands| Index: components/policy/core/common/remote_commands/remote_commands_service.cc | 
| diff --git a/components/policy/core/common/remote_commands/remote_commands_service.cc b/components/policy/core/common/remote_commands/remote_commands_service.cc | 
| new file mode 100644 | 
| index 0000000000000000000000000000000000000000..2816b2d82fc3259dea40fa3450a147b48bb79d79 | 
| --- /dev/null | 
| +++ b/components/policy/core/common/remote_commands/remote_commands_service.cc | 
| @@ -0,0 +1,151 @@ | 
| +// Copyright 2015 The Chromium Authors. All rights reserved. | 
| +// Use of this source code is governed by a BSD-style license that can be | 
| +// found in the LICENSE file. | 
| + | 
| +#include "components/policy/core/common/remote_commands/remote_commands_service.h" | 
| + | 
| +#include <algorithm> | 
| +#include <string> | 
| + | 
| +#include "base/bind.h" | 
| +#include "base/logging.h" | 
| +#include "base/time/clock.h" | 
| +#include "base/time/time.h" | 
| +#include "components/policy/core/common/cloud/cloud_policy_client.h" | 
| +#include "components/policy/core/common/remote_commands/remote_commands_factory.h" | 
| + | 
| +namespace policy { | 
| + | 
| +namespace em = enterprise_management; | 
| + | 
| +RemoteCommandsService::RemoteCommandsService( | 
| + scoped_ptr<RemoteCommandsFactory> factory, | 
| + CloudPolicyClient* client) | 
| + : factory_(factory.Pass()), client_(client), weak_factory_(this) { | 
| + queue_.AddObserver(this); | 
| +} | 
| + | 
| +RemoteCommandsService::~RemoteCommandsService() { | 
| + queue_.RemoveObserver(this); | 
| +} | 
| + | 
| +bool RemoteCommandsService::FetchRemoteCommands() { | 
| + if (command_fetch_in_progress_) { | 
| + has_enqueued_fetch_request_ = true; | 
| + return false; | 
| + } | 
| + | 
| + command_fetch_in_progress_ = true; | 
| + has_enqueued_fetch_request_ = false; | 
| + | 
| + std::vector<em::RemoteCommandResult> previous_results; | 
| + unsent_results_.swap(previous_results); | 
| + | 
| + scoped_ptr<RemoteCommandJob::UniqueIDType> id_to_acknowledge = nullptr; | 
| 
bartfab (slow)
2015/03/17 14:01:22
Nit: s/ = nullptr// (this happens automatically fo
 
binjin
2015/03/18 10:09:38
Done.
 | 
| + | 
| + if (has_finished_command_) { | 
| + // Acknowledges |lastest_finished_command_id_|, and removes it and every | 
| + // command before it from |fetched_command_ids_|. | 
| + id_to_acknowledge.reset( | 
| + new RemoteCommandJob::UniqueIDType(lastest_finished_command_id_)); | 
| + // It's safe to remove these IDs from |fetched_command_ids_| here, since | 
| + // there is at most one ongoing command fetch request. | 
| 
bartfab (slow)
2015/03/17 14:01:22
Maybe it would be even clearer if you wrote:
"sin
 
binjin
2015/03/18 10:09:38
Done.
 | 
| + while (!fetched_command_ids_.empty() && | 
| + fetched_command_ids_.front() != lastest_finished_command_id_) { | 
| + fetched_command_ids_.pop_front(); | 
| + } | 
| + } | 
| + | 
| + client_->FetchRemoteCommands( | 
| + id_to_acknowledge.Pass(), previous_results, | 
| + base::Bind(&RemoteCommandsService::OnRemoteCommandsFetched, | 
| + weak_factory_.GetWeakPtr())); | 
| + | 
| + return true; | 
| +} | 
| + | 
| +void RemoteCommandsService::SetClockForTesting(scoped_ptr<base::Clock> clock) { | 
| + queue_.SetClockForTesting(clock.Pass()); | 
| +} | 
| + | 
| +void RemoteCommandsService::EnqueueCommand( | 
| + const enterprise_management::RemoteCommand& command) { | 
| + if (!command.has_type() || !command.has_unique_id()) { | 
| + LOG(WARNING) << "Invalid remote command from server."; | 
| + return; | 
| + } | 
| + | 
| + // If the command is already fetched, ignore it. | 
| + if (std::find(fetched_command_ids_.begin(), fetched_command_ids_.end(), | 
| + command.unique_id()) != fetched_command_ids_.end()) { | 
| + return; | 
| + } | 
| + | 
| + fetched_command_ids_.push_back(command.unique_id()); | 
| + | 
| + scoped_ptr<RemoteCommandJob> job = factory_->BuildJobForType(command.type()); | 
| + | 
| + if (!job || !job->Init(command)) { | 
| + em::RemoteCommandResult ignored_result; | 
| + ignored_result.set_result( | 
| + em::RemoteCommandResult_ResultType_RESULT_IGNORED); | 
| + ignored_result.set_unique_id(command.unique_id()); | 
| + unsent_results_.push_back(ignored_result); | 
| + return; | 
| + } | 
| + | 
| + queue_.AddJob(job.Pass()); | 
| +} | 
| + | 
| +void RemoteCommandsService::OnJobStarted(RemoteCommandJob* command) { | 
| 
bartfab (slow)
2015/03/17 14:01:22
Nit: Is anyone using OnJobStarted() now? If not, y
 
binjin
2015/03/18 10:09:38
Acknowledged. I'd like to keep this since persiste
 
bartfab (slow)
2015/03/18 10:52:25
I will not stop your CL that is almost landed. But
 | 
| +} | 
| + | 
| +void RemoteCommandsService::OnJobFinished(RemoteCommandJob* command) { | 
| + has_finished_command_ = true; | 
| + lastest_finished_command_id_ = command->unique_id(); | 
| + // TODO(binjin): Attempt to sync |lastest_finished_command_id_| to some | 
| + // persistent source, so that we can reload it later without relying solely on | 
| + // the server to keep our last acknowledged command ID. | 
| + // See http://crbug.com/466572. | 
| + | 
| + em::RemoteCommandResult result; | 
| + result.set_unique_id(command->unique_id()); | 
| + result.set_timestamp((command->execution_started_time() - | 
| + base::Time::UnixEpoch()).InMilliseconds()); | 
| + | 
| + if (command->status() == RemoteCommandJob::SUCCEEDED) { | 
| + result.set_result(em::RemoteCommandResult_ResultType_RESULT_SUCCESS); | 
| + const scoped_ptr<std::string> result_payload = command->GetResultPayload(); | 
| + if (result_payload) | 
| + result.set_payload(*result_payload); | 
| + } else if (command->status() == RemoteCommandJob::EXPIRED || | 
| + command->status() == RemoteCommandJob::INVALID) { | 
| + result.set_result(em::RemoteCommandResult_ResultType_RESULT_IGNORED); | 
| + } else { | 
| + result.set_result(em::RemoteCommandResult_ResultType_RESULT_FAILURE); | 
| + } | 
| + | 
| + unsent_results_.push_back(result); | 
| + | 
| + FetchRemoteCommands(); | 
| +} | 
| + | 
| +void RemoteCommandsService::OnRemoteCommandsFetched( | 
| + DeviceManagementStatus status, | 
| + const std::vector<enterprise_management::RemoteCommand>& commands) { | 
| + DCHECK(command_fetch_in_progress_); | 
| + command_fetch_in_progress_ = false; | 
| + | 
| + // TODO(binjin): Add retrying on errors. See http://crbug.com/466572. | 
| + if (status == DM_STATUS_SUCCESS) { | 
| + for (const auto& command : commands) | 
| + EnqueueCommand(command); | 
| + } | 
| + | 
| + // Start another fetch request job immediately if there are unsent command | 
| + // results or enqueued fetch requests. | 
| + if (!unsent_results_.empty() || has_enqueued_fetch_request_) | 
| + FetchRemoteCommands(); | 
| +} | 
| + | 
| +} // namespace policy |