Chromium Code Reviews| Index: content/browser/cache_storage/cache_storage_cache.cc |
| diff --git a/content/browser/cache_storage/cache_storage_cache.cc b/content/browser/cache_storage/cache_storage_cache.cc |
| index a120339b5d9c1bccf7d60979c248d805a032b29f..31dad4b85d8acd833f712e557ded2983306b5a49 100644 |
| --- a/content/browser/cache_storage/cache_storage_cache.cc |
| +++ b/content/browser/cache_storage/cache_storage_cache.cc |
| @@ -6,6 +6,7 @@ |
| #include <stddef.h> |
| #include <algorithm> |
| +#include <functional> |
| #include <limits> |
| #include <memory> |
| #include <string> |
| @@ -15,6 +16,7 @@ |
| #include "base/bind_helpers.h" |
| #include "base/files/file_path.h" |
| #include "base/guid.h" |
| +#include "base/lazy_instance.h" |
| #include "base/macros.h" |
| #include "base/memory/ptr_util.h" |
| #include "base/metrics/histogram_macros.h" |
| @@ -28,6 +30,8 @@ |
| #include "content/browser/cache_storage/cache_storage_scheduler.h" |
| #include "content/public/browser/browser_thread.h" |
| #include "content/public/common/referrer.h" |
| +#include "crypto/hmac.h" |
| +#include "crypto/random.h" |
| #include "net/base/completion_callback.h" |
| #include "net/base/io_buffer.h" |
| #include "net/base/net_errors.h" |
| @@ -254,6 +258,61 @@ std::unique_ptr<ServiceWorkerResponse> CreateResponse( |
| metadata.response().cors_exposed_header_names().end())); |
| } |
| +// The size of opaque (non-cors) resource responses are padded in order |
| +// to obfuscate their actual size. |
| +bool ShouldPadResourceSize(const ServiceWorkerResponse& response) { |
| + return (response.response_type == |
| + blink::kWebServiceWorkerResponseTypeOpaque || |
| + response.response_type == |
| + blink::kWebServiceWorkerResponseTypeOpaqueRedirect) && |
| + !response.url_list.empty(); |
| +} |
| + |
| +// Return a hash of the |response| values to be used for the resource padding |
| +// calculation. |
| +std::string CalculateResponsePaddingHash( |
| + const ServiceWorkerResponse& response) { |
| + DCHECK(!response.url_list.empty()); |
| + |
| + size_t h = std::hash<std::string>{}(response.url_list.back().spec()); |
| + return std::string(reinterpret_cast<const char*>(&h), sizeof(h)); |
| +} |
| + |
| +class PaddingHMACKey : public std::string { |
| + public: |
| + PaddingHMACKey() { |
| + const size_t kKeyLen = 20; |
| + reserve(kKeyLen); |
| + crypto::RandBytes(&(*this)[0], capacity()); |
| + } |
| +}; |
| + |
| +const std::string& GetPaddingHMACKey() { |
| + static base::LazyInstance<PaddingHMACKey>::Leaky s_key = |
| + LAZY_INSTANCE_INITIALIZER; |
| + return s_key.Get(); |
| +} |
| + |
| +std::string CalculatePaddingHMAC(const std::string& value) { |
| + crypto::HMAC hmac(crypto::HMAC::SHA256); |
| + std::string result(hmac.DigestLength(), '\0'); |
| + if (!hmac.Init(GetPaddingHMACKey()) || |
| + !hmac.Sign(value, reinterpret_cast<uint8_t*>(&result[0]), |
| + result.length())) { |
| + LOG(FATAL) << "Failed to calculate HMAC."; |
| + } |
| + return result; |
| +} |
| + |
| +// Converts the |response_hmac| bytes into a value from the random distribution |
| +// determined by threat analysis. |
| +int64_t ResponsePaddingDistribution(const std::string& response_hmac) { |
| + size_t response_hash = std::hash<std::string>{}(response_hmac); |
| + const size_t kPaddingRange = 400 * 1024; |
| + const size_t kMinPadding = 20 * 1024; |
| + return kMinPadding + (response_hash % kPaddingRange); |
| +} |
| + |
| } // namespace |
| // The state needed to pass between CacheStorageCache::Put callbacks. |
| @@ -291,10 +350,12 @@ struct CacheStorageCache::QueryCacheResult { |
| struct CacheStorageCache::QueryCacheContext { |
| QueryCacheContext(std::unique_ptr<ServiceWorkerFetchRequest> request, |
| const CacheStorageCacheQueryParams& options, |
| - const QueryCacheCallback& callback) |
| + const QueryCacheCallback& callback, |
| + uint32_t query_types) |
| : request(std::move(request)), |
| options(options), |
| callback(callback), |
| + query_types(query_types), |
| matches(base::MakeUnique<QueryCacheResults>()) {} |
| ~QueryCacheContext() { |
| @@ -311,7 +372,7 @@ struct CacheStorageCache::QueryCacheContext { |
| std::unique_ptr<ServiceWorkerFetchRequest> request; |
| CacheStorageCacheQueryParams options; |
| QueryCacheCallback callback; |
| - QueryCacheType query_type; |
| + uint32_t query_types = 0x0; |
| size_t estimated_out_bytes = 0; |
| // Iteration state |
| @@ -611,10 +672,12 @@ CacheStorageCache::CacheStorageCache( |
| void CacheStorageCache::QueryCache( |
| std::unique_ptr<ServiceWorkerFetchRequest> request, |
| const CacheStorageCacheQueryParams& options, |
| - QueryCacheType query_type, |
| + uint32_t query_types, |
| const QueryCacheCallback& callback) { |
| - DCHECK_NE(BACKEND_UNINITIALIZED, backend_state_); |
| - if (backend_state_ != BACKEND_OPEN) { |
| + DCHECK_NE( |
| + QUERY_CACHE_ENTRIES | QUERY_CACHE_RESPONSES_WITH_BODIES, |
| + query_types & (QUERY_CACHE_ENTRIES | QUERY_CACHE_RESPONSES_WITH_BODIES)); |
| + if (backend_state_ == BACKEND_CLOSED) { |
| callback.Run(CACHE_STORAGE_ERROR_STORAGE, |
| std::unique_ptr<QueryCacheResults>()); |
| return; |
| @@ -627,9 +690,8 @@ void CacheStorageCache::QueryCache( |
| } |
| ServiceWorkerFetchRequest* request_ptr = request.get(); |
| - std::unique_ptr<QueryCacheContext> query_cache_context( |
| - new QueryCacheContext(std::move(request), options, callback)); |
| - query_cache_context->query_type = query_type; |
| + std::unique_ptr<QueryCacheContext> query_cache_context(new QueryCacheContext( |
| + std::move(request), options, callback, query_types)); |
| if (query_cache_context->request && |
| !query_cache_context->request->url.is_empty() && !options.ignore_search) { |
| @@ -710,7 +772,7 @@ void CacheStorageCache::QueryCacheFilterEntry( |
| disk_cache::ScopedEntryPtr entry(query_cache_context->enumerated_entry); |
| query_cache_context->enumerated_entry = nullptr; |
| - if (backend_state_ != BACKEND_OPEN) { |
| + if (backend_state_ == BACKEND_CLOSED) { |
| QueryCacheCallback callback = query_cache_context->callback; |
| callback.Run(CACHE_STORAGE_ERROR_NOT_FOUND, |
| std::move(query_cache_context->matches)); |
| @@ -772,54 +834,48 @@ void CacheStorageCache::QueryCacheDidReadMetadata( |
| return; |
| } |
| - if (query_cache_context->query_type == QueryCacheType::CACHE_ENTRIES) { |
| - match->request.reset(); |
| - match->response.reset(); |
| + if (query_cache_context->query_types & QUERY_CACHE_ENTRIES) |
| match->entry = std::move(entry); |
| - QueryCacheOpenNextEntry(std::move(query_cache_context)); |
| - return; |
| - } |
| - |
| - query_cache_context->estimated_out_bytes += |
| - match->request->EstimatedStructSize(); |
| - if (query_cache_context->estimated_out_bytes > max_query_size_bytes_) { |
| - query_cache_context->callback.Run(CACHE_STORAGE_ERROR_QUERY_TOO_LARGE, |
| - std::unique_ptr<QueryCacheResults>()); |
| - return; |
| - } |
| - if (query_cache_context->query_type == QueryCacheType::REQUESTS) { |
| - match->response.reset(); |
| - QueryCacheOpenNextEntry(std::move(query_cache_context)); |
| - return; |
| + if (query_cache_context->query_types & QUERY_CACHE_REQUESTS) { |
| + query_cache_context->estimated_out_bytes += |
| + match->request->EstimatedStructSize(); |
| + if (query_cache_context->estimated_out_bytes > max_query_size_bytes_) { |
| + query_cache_context->callback.Run(CACHE_STORAGE_ERROR_QUERY_TOO_LARGE, |
| + std::unique_ptr<QueryCacheResults>()); |
| + return; |
| + } |
| + } else { |
| + match->request.reset(); |
| } |
| - DCHECK_EQ(QueryCacheType::REQUESTS_AND_RESPONSES, |
| - query_cache_context->query_type); |
| - |
| - query_cache_context->estimated_out_bytes += |
| - match->response->EstimatedStructSize(); |
| - if (query_cache_context->estimated_out_bytes > max_query_size_bytes_) { |
| - query_cache_context->callback.Run(CACHE_STORAGE_ERROR_QUERY_TOO_LARGE, |
| - std::unique_ptr<QueryCacheResults>()); |
| - return; |
| - } |
| + if (query_cache_context->query_types & QUERY_CACHE_RESPONSES_WITH_BODIES) { |
| + query_cache_context->estimated_out_bytes += |
| + match->response->EstimatedStructSize(); |
| + if (query_cache_context->estimated_out_bytes > max_query_size_bytes_) { |
| + query_cache_context->callback.Run(CACHE_STORAGE_ERROR_QUERY_TOO_LARGE, |
| + std::unique_ptr<QueryCacheResults>()); |
| + return; |
| + } |
| + if (entry->GetDataSize(INDEX_RESPONSE_BODY) == 0) { |
| + QueryCacheOpenNextEntry(std::move(query_cache_context)); |
| + return; |
| + } |
| - if (entry->GetDataSize(INDEX_RESPONSE_BODY) == 0) { |
| - QueryCacheOpenNextEntry(std::move(query_cache_context)); |
| - return; |
| - } |
| + if (!blob_storage_context_) { |
| + query_cache_context->callback.Run(CACHE_STORAGE_ERROR_STORAGE, |
| + std::unique_ptr<QueryCacheResults>()); |
| + return; |
| + } |
| - if (!blob_storage_context_) { |
| - query_cache_context->callback.Run(CACHE_STORAGE_ERROR_STORAGE, |
| - base::MakeUnique<QueryCacheResults>()); |
| - return; |
| + std::unique_ptr<storage::BlobDataHandle> blob_data_handle = |
| + PopulateResponseBody(std::move(entry), match->response.get()); |
| + match->blob_handle = std::move(blob_data_handle); |
| + } else if (!(query_cache_context->query_types & |
| + QUERY_CACHE_RESPONSES_NO_BODIES)) { |
| + match->response.reset(); |
| } |
| - std::unique_ptr<storage::BlobDataHandle> blob_data_handle = |
| - PopulateResponseBody(std::move(entry), match->response.get()); |
| - match->blob_handle = std::move(blob_data_handle); |
| - |
| QueryCacheOpenNextEntry(std::move(query_cache_context)); |
| } |
| @@ -829,6 +885,15 @@ bool CacheStorageCache::QueryCacheResultCompare(const QueryCacheResult& lhs, |
| return lhs.entry_time < rhs.entry_time; |
| } |
| +// static |
| +int64_t CacheStorageCache::CalculateResponsePadding( |
| + const ServiceWorkerResponse& response) { |
| + if (!ShouldPadResourceSize(response)) |
| + return 0; |
| + return ResponsePaddingDistribution( |
| + CalculatePaddingHMAC(CalculateResponsePaddingHash(response))); |
| +} |
| + |
| void CacheStorageCache::MatchImpl( |
| std::unique_ptr<ServiceWorkerFetchRequest> request, |
| const CacheStorageCacheQueryParams& match_params, |
| @@ -875,7 +940,7 @@ void CacheStorageCache::MatchAllImpl( |
| } |
| QueryCache(std::move(request), options, |
| - QueryCacheType::REQUESTS_AND_RESPONSES, |
| + QUERY_CACHE_REQUESTS | QUERY_CACHE_RESPONSES_WITH_BODIES, |
| base::Bind(&CacheStorageCache::MatchAllDidQueryCache, |
| weak_ptr_factory_.GetWeakPtr(), callback)); |
| } |
| @@ -1064,25 +1129,36 @@ void CacheStorageCache::PutImpl(std::unique_ptr<PutContext> put_context) { |
| return; |
| } |
| - std::string key = put_context->request->url.spec(); |
| - |
| - net::CompletionCallback callback = base::Bind( |
| - &CacheStorageCache::PutDidDoomEntry, weak_ptr_factory_.GetWeakPtr(), |
| - base::Passed(std::move(put_context))); |
| - |
| - int rv = backend_->DoomEntry(key, callback); |
| - if (rv != net::ERR_IO_PENDING) |
| - callback.Run(rv); |
| + // Explicitly delete the incumbent resource (which may not exist). This is |
| + // only done so that it's padding will be decremented from the calculated |
| + // cache padding. |
| + auto delete_request = base::MakeUnique<ServiceWorkerFetchRequest>( |
| + put_context->request->url, "", ServiceWorkerHeaderMap(), Referrer(), |
| + false); |
| + |
| + CacheStorageCacheQueryParams query_options; |
| + query_options.ignore_method = true; |
| + query_options.ignore_vary = true; |
| + DeleteImpl(std::move(delete_request), query_options, |
| + base::Bind(&CacheStorageCache::PutDidDeleteEntry, |
| + weak_ptr_factory_.GetWeakPtr(), |
| + base::Passed(std::move(put_context)))); |
| } |
| -void CacheStorageCache::PutDidDoomEntry(std::unique_ptr<PutContext> put_context, |
| - int rv) { |
| +void CacheStorageCache::PutDidDeleteEntry( |
| + std::unique_ptr<PutContext> put_context, |
| + CacheStorageError error, |
| + std::unique_ptr<QueryCacheResults> query_results) { |
| if (backend_state_ != BACKEND_OPEN) { |
| put_context->callback.Run(CACHE_STORAGE_ERROR_STORAGE); |
| return; |
| } |
| - // |rv| is ignored as doom entry can fail if the entry doesn't exist. |
|
jkarlin
2017/05/26 13:21:25
Why can't we just use doom as we did before and ch
cmumford
2017/05/30 20:56:13
Currently I use the last URL in the URL list. That
jkarlin
2017/05/31 11:47:25
I'm not following what you're saying here.
cmumford
2017/06/08 17:54:04
In answer to your initial question; DoomEntry retu
jkarlin
2017/06/08 18:04:02
Got it, thanks!
|
| + if (error != CACHE_STORAGE_OK && error != CACHE_STORAGE_ERROR_NOT_FOUND) { |
| + cache_padding_ -= CalculateResponsePadding(*put_context->response); |
| + put_context->callback.Run(error); |
| + return; |
| + } |
| std::unique_ptr<disk_cache::Entry*> scoped_entry_ptr( |
| new disk_cache::Entry*()); |
| @@ -1095,11 +1171,11 @@ void CacheStorageCache::PutDidDoomEntry(std::unique_ptr<PutContext> put_context, |
| base::Passed(std::move(scoped_entry_ptr)), |
| base::Passed(std::move(put_context))); |
| - int create_rv = backend_ptr->CreateEntry(request_ptr->url.spec(), entry_ptr, |
| - create_entry_callback); |
| + int rv = backend_ptr->CreateEntry(request_ptr->url.spec(), entry_ptr, |
| + create_entry_callback); |
| - if (create_rv != net::ERR_IO_PENDING) |
| - create_entry_callback.Run(create_rv); |
| + if (rv != net::ERR_IO_PENDING) |
| + create_entry_callback.Run(rv); |
| } |
| void CacheStorageCache::PutDidCreateEntry( |
| @@ -1184,6 +1260,7 @@ void CacheStorageCache::PutDidWriteHeaders( |
| if (rv > 0) |
| storage::RecordBytesWritten(kRecordBytesLabel, rv); |
| + cache_padding_ += CalculateResponsePadding(*put_context->response); |
| // The metadata is written, now for the response content. The data is streamed |
| // from the blob into the cache entry. |
| @@ -1233,6 +1310,53 @@ void CacheStorageCache::PutDidWriteBlobToCache( |
| UpdateCacheSize(base::Bind(put_context->callback, CACHE_STORAGE_OK)); |
| } |
| +void CacheStorageCache::CalculateCacheSizePadding( |
| + const SizePaddingCallback& got_sizes_callback) { |
| + net::CompletionCallback got_size_callback = |
| + base::Bind(&CacheStorageCache::CalculateCacheSizePaddingGotSize, |
| + weak_ptr_factory_.GetWeakPtr(), std::move(got_sizes_callback)); |
| + |
| + int rv = backend_->CalculateSizeOfAllEntries(got_size_callback); |
| + if (rv != net::ERR_IO_PENDING) |
| + got_size_callback.Run(rv); |
| +} |
| + |
| +void CacheStorageCache::CalculateCacheSizePaddingGotSize( |
| + const SizePaddingCallback& callback, |
| + int cache_size) { |
| + // Enumerating entries is only done during cache initialization. |
| + DCHECK_EQ(backend_state_, BACKEND_UNINITIALIZED); |
| + std::unique_ptr<ServiceWorkerFetchRequest> request; |
| + CacheStorageCacheQueryParams options; |
| + options.ignore_search = true; |
| + QueryCache(std::move(request), options, |
| + QUERY_CACHE_REQUESTS | QUERY_CACHE_RESPONSES_WITH_BODIES, |
| + base::Bind(&CacheStorageCache::PaddingDidQueryCache, |
| + weak_ptr_factory_.GetWeakPtr(), std::move(callback), |
| + cache_size)); |
| +} |
| + |
| +void CacheStorageCache::PaddingDidQueryCache( |
| + const SizePaddingCallback& callback, |
| + int cache_size, |
| + CacheStorageError error, |
| + std::unique_ptr<QueryCacheResults> query_cache_results) { |
| + int64_t cache_padding = 0; |
| + if (error == CACHE_STORAGE_OK) { |
| + for (const auto& result : *query_cache_results) |
| + cache_padding += CalculateResponsePadding(*result.response); |
| + } |
| + |
| + callback.Run(cache_size, cache_padding); |
| +} |
| + |
| +void CacheStorageCache::CalculateCacheSize( |
| + const net::CompletionCallback& callback) { |
| + int rv = backend_->CalculateSizeOfAllEntries(callback); |
| + if (rv != net::ERR_IO_PENDING) |
| + callback.Run(rv); |
| +} |
| + |
| void CacheStorageCache::UpdateCacheSize(const base::Closure& callback) { |
| if (backend_state_ != BACKEND_OPEN) |
| return; |
| @@ -1240,13 +1364,9 @@ void CacheStorageCache::UpdateCacheSize(const base::Closure& callback) { |
| // Note that the callback holds a cache handle to keep the cache alive during |
| // the operation since this UpdateCacheSize is often run after an operation |
| // completes and runs its callback. |
| - int rv = backend_->CalculateSizeOfAllEntries( |
| - base::Bind(&CacheStorageCache::UpdateCacheSizeGotSize, |
| - weak_ptr_factory_.GetWeakPtr(), |
| - base::Passed(CreateCacheHandle()), callback)); |
| - |
| - if (rv != net::ERR_IO_PENDING) |
| - UpdateCacheSizeGotSize(CreateCacheHandle(), callback, rv); |
| + CalculateCacheSize(base::Bind(&CacheStorageCache::UpdateCacheSizeGotSize, |
| + weak_ptr_factory_.GetWeakPtr(), |
| + base::Passed(CreateCacheHandle()), callback)); |
| } |
| void CacheStorageCache::UpdateCacheSizeGotSize( |
| @@ -1254,17 +1374,16 @@ void CacheStorageCache::UpdateCacheSizeGotSize( |
| const base::Closure& callback, |
| int current_cache_size) { |
| DCHECK_NE(current_cache_size, CacheStorage::kSizeUnknown); |
| - int64_t old_cache_size = cache_size_; |
| cache_size_ = current_cache_size; |
| - |
| - int64_t size_delta = current_cache_size - old_cache_size; |
| + int64_t size_delta = CacheSize() - last_reported_size_; |
| + last_reported_size_ = CacheSize(); |
| quota_manager_proxy_->NotifyStorageModified( |
| storage::QuotaClient::kServiceWorkerCache, origin_, |
| storage::kStorageTypeTemporary, size_delta); |
| if (cache_observer_) |
| - cache_observer_->CacheSizeUpdated(this, current_cache_size); |
| + cache_observer_->CacheSizeUpdated(this, CacheSize()); |
| callback.Run(); |
| } |
| @@ -1281,47 +1400,64 @@ void CacheStorageCache::Delete(const CacheStorageBatchOperation& operation, |
| operation.request.headers, operation.request.referrer, |
| operation.request.is_reload)); |
| + QueryCacheCallback delete_callback = |
| + base::Bind(&CacheStorageCache::DeleteDidDelete, |
| + weak_ptr_factory_.GetWeakPtr(), callback); |
| + |
| scheduler_->ScheduleOperation( |
| base::Bind(&CacheStorageCache::DeleteImpl, weak_ptr_factory_.GetWeakPtr(), |
| base::Passed(std::move(request)), operation.match_params, |
| - scheduler_->WrapCallbackToRunNext(callback))); |
| + scheduler_->WrapCallbackToRunNext(delete_callback))); |
| +} |
| + |
| +void CacheStorageCache::DeleteDidDelete( |
| + const ErrorCallback& callback, |
| + CacheStorageError error, |
| + std::unique_ptr<QueryCacheResults> query_results) { |
| + callback.Run(error); |
| } |
| void CacheStorageCache::DeleteImpl( |
| std::unique_ptr<ServiceWorkerFetchRequest> request, |
| const CacheStorageCacheQueryParams& match_params, |
| - const ErrorCallback& callback) { |
| + const QueryCacheCallback& callback) { |
| DCHECK_NE(BACKEND_UNINITIALIZED, backend_state_); |
| if (backend_state_ != BACKEND_OPEN) { |
| - callback.Run(CACHE_STORAGE_ERROR_STORAGE); |
| + callback.Run(CACHE_STORAGE_ERROR_STORAGE, |
| + base::MakeUnique<QueryCacheResults>()); |
| return; |
| } |
| - QueryCache(std::move(request), match_params, QueryCacheType::CACHE_ENTRIES, |
| + QueryCache(std::move(request), match_params, |
| + QUERY_CACHE_ENTRIES | QUERY_CACHE_RESPONSES_NO_BODIES, |
| base::Bind(&CacheStorageCache::DeleteDidQueryCache, |
| weak_ptr_factory_.GetWeakPtr(), callback)); |
| } |
| void CacheStorageCache::DeleteDidQueryCache( |
| - const ErrorCallback& callback, |
| + const QueryCacheCallback& callback, |
| CacheStorageError error, |
| std::unique_ptr<QueryCacheResults> query_cache_results) { |
| if (error != CACHE_STORAGE_OK) { |
| - callback.Run(error); |
| + callback.Run(error, std::move(query_cache_results)); |
| return; |
| } |
| if (query_cache_results->empty()) { |
| - callback.Run(CACHE_STORAGE_ERROR_NOT_FOUND); |
| + callback.Run(CACHE_STORAGE_ERROR_NOT_FOUND, std::move(query_cache_results)); |
| return; |
| } |
| for (auto& result : *query_cache_results) { |
| disk_cache::ScopedEntryPtr entry = std::move(result.entry); |
| + cache_padding_ -= CalculateResponsePadding(*result.response); |
| + // TODO(cmumford): Not sure why this should be allowed to go negative. |
| + // DCHECK_GT(cache_padding_, 0); |
| entry->Doom(); |
| } |
| - UpdateCacheSize(base::Bind(callback, CACHE_STORAGE_OK)); |
| + UpdateCacheSize(base::Bind(callback, CACHE_STORAGE_OK, |
| + base::Passed(std::move(query_cache_results)))); |
| } |
| void CacheStorageCache::KeysImpl( |
| @@ -1334,7 +1470,7 @@ void CacheStorageCache::KeysImpl( |
| return; |
| } |
| - QueryCache(std::move(request), options, QueryCacheType::REQUESTS, |
| + QueryCache(std::move(request), options, QUERY_CACHE_REQUESTS, |
| base::Bind(&CacheStorageCache::KeysDidQueryCache, |
| weak_ptr_factory_.GetWeakPtr(), callback)); |
| } |
| @@ -1367,7 +1503,14 @@ void CacheStorageCache::CloseImpl(const base::Closure& callback) { |
| void CacheStorageCache::SizeImpl(const SizeCallback& callback) { |
| DCHECK_NE(BACKEND_UNINITIALIZED, backend_state_); |
| - int64_t size = backend_state_ == BACKEND_OPEN ? cache_size_ : 0; |
| + // TODO(cmumford): Can CacheStorage::kSizeUnknown be returned instead of zero? |
| + if (backend_state_ != BACKEND_OPEN) { |
| + base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, |
| + base::Bind(callback, 0)); |
| + return; |
| + } |
| + |
| + int64_t size = backend_state_ == BACKEND_OPEN ? CacheSize() : 0; |
| base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, |
| base::Bind(callback, size)); |
| } |
| @@ -1435,35 +1578,37 @@ void CacheStorageCache::InitDidCreateBackend( |
| const base::Closure& callback, |
| CacheStorageError cache_create_error) { |
| if (cache_create_error != CACHE_STORAGE_OK) { |
| - InitGotCacheSize(callback, cache_create_error, 0); |
| + InitGotCacheSize(callback, cache_create_error, 0 /* cache_size */, |
| + 0 /* cache_padding */); |
| return; |
| } |
| - int rv = backend_->CalculateSizeOfAllEntries( |
| - base::Bind(&CacheStorageCache::InitGotCacheSize, |
| - weak_ptr_factory_.GetWeakPtr(), callback, cache_create_error)); |
| - |
| - if (rv != net::ERR_IO_PENDING) |
| - InitGotCacheSize(callback, cache_create_error, rv); |
| + CalculateCacheSizePadding(base::Bind(&CacheStorageCache::InitGotCacheSize, |
|
jkarlin
2017/05/26 13:21:25
QueryCache calls that require enumerations are *in
cmumford
2017/05/30 20:56:13
Understood. I understand your concerns about #1, b
jkarlin
2017/05/31 11:44:48
So, the plan is for each browsing session to have
cmumford
2017/06/08 17:54:04
Yes.
|
| + weak_ptr_factory_.GetWeakPtr(), callback, |
| + cache_create_error)); |
| } |
| void CacheStorageCache::InitGotCacheSize(const base::Closure& callback, |
| CacheStorageError cache_create_error, |
| - int cache_size) { |
| + int64_t cache_size, |
| + int64_t cache_padding) { |
| // Now that we know the cache size either 1) the cache size should be unknown |
| // (which is why the size was calculated), or 2) it must match the current |
| // size. If the sizes aren't equal then there is a bug in how the cache size |
| // is saved in the store's index. |
| - if (cache_size_ != CacheStorage::kSizeUnknown) { |
| - LOG_IF(ERROR, cache_size_ != cache_size) |
| - << "Cache size: " << cache_size |
| - << " does not match size from index: " << cache_size_; |
| + int64_t prior_total_cache_size = CacheSize(); |
| + cache_size_ = cache_size; |
| + cache_padding_ = cache_padding; |
| + |
| + if (prior_total_cache_size != CacheStorage::kSizeUnknown) { |
| + LOG_IF(ERROR, prior_total_cache_size != CacheSize()) |
| + << "Cache size: " << CacheSize() |
| + << " does not match size from index: " << prior_total_cache_size; |
| UMA_HISTOGRAM_COUNTS_10M("ServiceWorkerCache.IndexSizeDifference", |
| - std::abs(cache_size_ - cache_size)); |
| + std::abs(CacheSize() - prior_total_cache_size)); |
| // Disabled for crbug.com/681900. |
| - // DCHECK_EQ(cache_size_, cache_size); |
| + // DCHECK_EQ(prior_total_cache_size, CacheSize()); |
| } |
| - cache_size_ = cache_size; |
| initializing_ = false; |
| backend_state_ = (cache_create_error == CACHE_STORAGE_OK && backend_ && |
| backend_state_ == BACKEND_UNINITIALIZED) |
| @@ -1474,7 +1619,7 @@ void CacheStorageCache::InitGotCacheSize(const base::Closure& callback, |
| cache_create_error, CACHE_STORAGE_ERROR_LAST + 1); |
| if (cache_observer_) |
| - cache_observer_->CacheSizeUpdated(this, cache_size_); |
| + cache_observer_->CacheSizeUpdated(this, CacheSize()); |
| callback.Run(); |
| } |
| @@ -1501,4 +1646,10 @@ CacheStorageCache::CreateCacheHandle() { |
| return cache_storage_->CreateCacheHandle(this); |
| } |
| +int64_t CacheStorageCache::CacheSize() const { |
| + return cache_size_ == CacheStorage::kSizeUnknown |
| + ? CacheStorage::kSizeUnknown |
| + : cache_size_ + cache_padding_; |
| +} |
| + |
| } // namespace content |