Chromium Code Reviews| Index: content/browser/background_fetch/background_fetch_data_manager.cc |
| diff --git a/content/browser/background_fetch/background_fetch_data_manager.cc b/content/browser/background_fetch/background_fetch_data_manager.cc |
| index f47bac8d0c80d83a4419f7bb94ee82ae3137b83a..eff3599425fa99042fae60850208553963bd680f 100644 |
| --- a/content/browser/background_fetch/background_fetch_data_manager.cc |
| +++ b/content/browser/background_fetch/background_fetch_data_manager.cc |
| @@ -6,21 +6,25 @@ |
| #include "base/memory/ptr_util.h" |
| #include "content/browser/background_fetch/background_fetch_context.h" |
| +#include "content/browser/background_fetch/background_fetch_job_response_data.h" |
| #include "content/browser/background_fetch/background_fetch_request_info.h" |
| +#include "content/public/browser/blob_handle.h" |
| +#include "content/public/browser/browser_context.h" |
| +#include "content/public/browser/download_interrupt_reasons.h" |
| +#include "content/public/browser/download_item.h" |
| namespace content { |
| BackgroundFetchDataManager::BackgroundFetchDataManager( |
| - BackgroundFetchContext* background_fetch_context) |
| - : background_fetch_context_(background_fetch_context) { |
| - DCHECK(background_fetch_context_); |
| + BrowserContext* browser_context) |
| + : browser_context_(browser_context), weak_ptr_factory_(this) { |
| + DCHECK(browser_context_); |
| // TODO(harkness) Read from persistent storage and recreate requests. |
| } |
| BackgroundFetchDataManager::~BackgroundFetchDataManager() = default; |
| -std::unique_ptr<BackgroundFetchJobData> |
| -BackgroundFetchDataManager::CreateRequest( |
| +void BackgroundFetchDataManager::CreateRequest( |
| std::unique_ptr<BackgroundFetchJobInfo> job_info, |
| BackgroundFetchRequestInfos request_infos) { |
| JobIdentifier id(job_info->service_worker_registration_id(), job_info->tag()); |
| @@ -30,36 +34,190 @@ BackgroundFetchDataManager::CreateRequest( |
| << " has already created a batch request with tag " |
| << job_info->tag(); |
| // TODO(harkness) Figure out how to return errors like this. |
| - return nullptr; |
| + return; |
| } |
| - // Add the request to our maps and return a JobData to track the individual |
| - // files in the request. |
| + // Add the JobInfo to the in-memory map, and write the individual requests out |
| + // to storage. |
| + job_info->set_num_requests(request_infos.size()); |
| const std::string job_guid = job_info->guid(); |
| service_worker_tag_map_[id] = job_guid; |
| WriteJobToStorage(std::move(job_info), std::move(request_infos)); |
| - // TODO(harkness): Remove data when the job is complete. |
| - |
| - return base::MakeUnique<BackgroundFetchJobData>( |
| - ReadRequestsFromStorage(job_guid)); |
| } |
| void BackgroundFetchDataManager::WriteJobToStorage( |
| std::unique_ptr<BackgroundFetchJobInfo> job_info, |
| BackgroundFetchRequestInfos request_infos) { |
| - // TODO(harkness): Replace these maps with actually writing to storage. |
| // TODO(harkness): Check for job_guid clash. |
| const std::string job_guid = job_info->guid(); |
| job_map_[job_guid] = std::move(job_info); |
| - request_map_[job_guid] = std::move(request_infos); |
| + |
| + // Make an explicit copy of the original requests |
| + // TODO(harkness): Replace this with actually writing to storage. |
| + std::vector<BackgroundFetchRequestInfo> requests; |
| + for (const auto& request_info : request_infos) { |
| + requests.emplace_back(*(request_info.get())); |
| + } |
| + request_map_[job_guid] = std::move(requests); |
| + |
| + // |request_infos| will be destroyed when it leaves scope here. |
| +} |
| + |
| +void BackgroundFetchDataManager::WriteRequestToStorage( |
| + const std::string& job_guid, |
| + BackgroundFetchRequestInfo* request_info) { |
| + std::vector<BackgroundFetchRequestInfo>& request_infos = |
| + request_map_[job_guid]; |
| + |
| + // Copy the updated |request_info| over the in-memory version. |
| + for (size_t i = 0; i < request_infos.size(); i++) { |
| + if (request_infos[i].guid() == request_info->guid()) |
| + request_infos[i] = *request_info; |
| + } |
| +} |
| + |
| +std::unique_ptr<BackgroundFetchRequestInfo> |
| +BackgroundFetchDataManager::GetRequestInfo(const std::string& job_guid, |
| + size_t request_index) const { |
| + // Explicitly create a copy. When this is persisted to StorageWorkerStorage, |
| + // the request_map_ will not exist. |
| + auto iter = request_map_.find(job_guid); |
| + DCHECK(iter != request_map_.end()); |
| + const std::vector<BackgroundFetchRequestInfo>& request_infos = iter->second; |
| + |
| + DCHECK(request_index <= request_infos.size()); |
| + BackgroundFetchRequestInfo request_info = |
| + *(request_infos.begin() + request_index); |
|
Peter Beverloo
2017/03/25 03:38:50
Why wouldn't request_infos[request_index] work?
harkness
2017/03/26 16:13:18
I could do that, but then the method couldn't be c
Peter Beverloo
2017/03/26 22:32:39
This can't be const anyway - conceptually it could
harkness
2017/03/27 07:32:37
Changed.
|
| + return base::MakeUnique<BackgroundFetchRequestInfo>(request_info); |
| } |
| -// TODO(harkness): This should be changed to read (and cache) small numbers of |
| -// the RequestInfos instead of returning all of them. |
| -BackgroundFetchRequestInfos& |
| -BackgroundFetchDataManager::ReadRequestsFromStorage( |
| +void BackgroundFetchDataManager::GetJobResponse( |
| + const std::string& job_guid, |
| + const BackgroundFetchResponseCompleteCallback& callback) { |
| + BackgroundFetchJobInfo* job_info = job_map_[job_guid].get(); |
| + DCHECK(job_info); |
| + |
| + // Create a BackgroundFetchJobResponseData object which will aggregate |
| + // together the response blobs. |
| + job_info->set_job_response_data( |
| + base::MakeUnique<BackgroundFetchJobResponseData>(job_info->num_requests(), |
| + std::move(callback))); |
|
Peter Beverloo
2017/03/25 03:38:50
This std::move() doesn't do anything because |call
harkness
2017/03/26 16:13:18
Done.
|
| + |
| + // Iterate over the requests and create blobs for each response. |
| + for (size_t request_index = 0; request_index < job_info->num_requests(); |
| + request_index++) { |
| + // TODO(harkness): This will need to be asynchronous. |
| + std::unique_ptr<BackgroundFetchRequestInfo> request_info = |
| + GetRequestInfo(job_guid, request_index); |
| + |
| + // TODO(harkness): Only create a blob response if the request was |
| + // successful. Otherwise create an error response. |
| + content::BrowserContext::CreateFileBackedBlob( |
|
Peter Beverloo
2017/03/25 03:38:50
That method only exists to bridge UI thread calls
harkness
2017/03/26 16:13:18
Won't the DataManager need access to the browser c
Peter Beverloo
2017/03/26 22:32:39
We'd give it the ServiceWorkerContextWrapper inste
harkness
2017/03/27 07:32:37
Fair enough.
|
| + browser_context_, request_info->file_path(), 0 /* offset */, |
| + request_info->received_bytes(), |
| + base::Time() /* expected_modification_time */, |
| + base::Bind(&BackgroundFetchDataManager::DidGetRequestResponse, |
| + weak_ptr_factory_.GetWeakPtr(), job_guid, request_index)); |
| + } |
| +} |
| + |
| +void BackgroundFetchDataManager::DidGetRequestResponse( |
| + const std::string& job_guid, |
| + int request_sequence_number, |
| + std::unique_ptr<BlobHandle> blob_handle) { |
| + BackgroundFetchJobInfo* job_info = job_map_[job_guid].get(); |
| + DCHECK(job_info); |
| + |
| + BackgroundFetchJobResponseData* job_response_data = |
| + job_info->job_response_data(); |
| + DCHECK(job_response_data); |
| + |
| + job_response_data->AddResponse(request_sequence_number, |
| + std::move(blob_handle)); |
| +} |
| + |
| +bool BackgroundFetchDataManager::UpdateRequestState( |
| + const std::string& job_guid, |
| + const std::string& request_guid, |
| + DownloadItem::DownloadState state, |
| + DownloadInterruptReason interrupt_reason) { |
| + // Find the request and set the state and the interrupt reason. |
| + BackgroundFetchJobInfo* job_info = job_map_[job_guid].get(); |
| + DCHECK(job_info); |
|
Peter Beverloo
2017/03/25 03:38:50
This is the sort of error condition that we'll hav
harkness
2017/03/26 16:13:18
The only way I could see this being hit would be i
|
| + BackgroundFetchRequestInfo* request = |
| + job_info->GetActiveRequest(request_guid); |
| + DCHECK(request); |
| + request->set_state(state); |
| + request->set_interrupt_reason(interrupt_reason); |
| + |
| + // If the request is now finished, remove it from the active requests. |
| + switch (state) { |
| + case DownloadItem::DownloadState::COMPLETE: |
| + case DownloadItem::DownloadState::CANCELLED: |
| + WriteRequestToStorage(job_guid, request); |
| + job_info->RemoveActiveRequest(request_guid); |
| + case DownloadItem::DownloadState::IN_PROGRESS: |
| + case DownloadItem::DownloadState::INTERRUPTED: |
| + case DownloadItem::DownloadState::MAX_DOWNLOAD_STATE: |
| + break; |
| + } |
| + |
| + // Return a boolean indicating whether there are more requests to be |
| + // processed. |
| + return job_info->HasRequestsRemaining(); |
| +} |
| + |
| +void BackgroundFetchDataManager::UpdateRequestStorageState( |
| + const std::string& job_guid, |
| + const std::string& request_guid, |
| + const base::FilePath& file_path, |
| + int64_t received_bytes) { |
| + BackgroundFetchJobInfo* job_info = job_map_[job_guid].get(); |
| + DCHECK(job_info); |
| + BackgroundFetchRequestInfo* request = |
| + job_info->GetActiveRequest(request_guid); |
| + DCHECK(request); |
| + request->set_file_path(file_path); |
| + request->set_received_bytes(received_bytes); |
| +} |
| + |
| +const BackgroundFetchRequestInfo& |
| +BackgroundFetchDataManager::GetNextBackgroundFetchRequestInfo( |
| const std::string& job_guid) { |
| - return request_map_[job_guid]; |
| + BackgroundFetchJobInfo* job_info = job_map_[job_guid].get(); |
| + DCHECK(job_info); |
| + |
| + // TODO(harkness): This needs to be async when it queries real storage. |
| + std::unique_ptr<BackgroundFetchRequestInfo> request_info = |
| + GetRequestInfo(job_guid, job_info->next_request_index()); |
| + const std::string request_guid = request_info->guid(); |
| + job_info->AddActiveRequest(std::move(request_info)); |
| + return *job_info->GetActiveRequest(request_guid); |
| +} |
| + |
| +bool BackgroundFetchDataManager::IsComplete(const std::string& job_guid) const { |
| + auto iter = job_map_.find(job_guid); |
| + DCHECK(iter != job_map_.end()); |
| + return iter->second->IsComplete(); |
| +} |
| + |
| +bool BackgroundFetchDataManager::HasRequestsRemaining( |
| + const std::string& job_guid) const { |
| + auto iter = job_map_.find(job_guid); |
| + DCHECK(iter != job_map_.end()); |
| + return iter->second->HasRequestsRemaining(); |
| +} |
| + |
| +void BackgroundFetchDataManager::UpdateRequestDownloadGuid( |
| + const std::string& job_guid, |
| + const std::string& request_guid, |
| + const std::string& download_guid) { |
|
Peter Beverloo
2017/03/25 03:38:50
I'd love to simplify the data model away from GUID
harkness
2017/03/26 16:13:18
I do hate that we have to have the download_guid a
Peter Beverloo
2017/03/26 22:32:39
Good!
harkness
2017/03/27 07:32:38
All the knowledge now on the JobInfo are things th
|
| + BackgroundFetchJobInfo* job_info = job_map_[job_guid].get(); |
| + DCHECK(job_info); |
| + BackgroundFetchRequestInfo* request = |
| + job_info->GetActiveRequest(request_guid); |
| + DCHECK(request); |
| + request->set_download_guid(download_guid); |
| } |
| } // namespace content |