| Index: content/browser/cache_storage/cache_storage_cache.cc
|
| diff --git a/content/browser/cache_storage/cache_storage_cache.cc b/content/browser/cache_storage/cache_storage_cache.cc
|
| index d8729778100f13d774841c0eb0093df128c7e961..a48ed042054e2e639187b095e7f7fe74612a39ad 100644
|
| --- a/content/browser/cache_storage/cache_storage_cache.cc
|
| +++ b/content/browser/cache_storage/cache_storage_cache.cc
|
| @@ -6,6 +6,7 @@
|
|
|
| #include <stddef.h>
|
| #include <algorithm>
|
| +#include <functional>
|
| #include <limits>
|
| #include <memory>
|
| #include <string>
|
| @@ -29,6 +30,8 @@
|
| #include "content/public/browser/browser_thread.h"
|
| #include "content/public/common/content_features.h"
|
| #include "content/public/common/referrer.h"
|
| +#include "crypto/hmac.h"
|
| +#include "crypto/symmetric_key.h"
|
| #include "net/base/completion_callback.h"
|
| #include "net/base/io_buffer.h"
|
| #include "net/base/net_errors.h"
|
| @@ -260,6 +263,78 @@ std::unique_ptr<ServiceWorkerResponse> CreateResponse(
|
| metadata.response().cors_exposed_header_names().end()));
|
| }
|
|
|
| +// The size of opaque (non-cors) resource responses are padded in order
|
| +// to obfuscate their actual size.
|
| +bool ShouldPadResponseType(network::mojom::FetchResponseType response_type,
|
| + bool has_urls) {
|
| + switch (response_type) {
|
| + case network::mojom::FetchResponseType::kBasic:
|
| + case network::mojom::FetchResponseType::kCORS:
|
| + case network::mojom::FetchResponseType::kDefault:
|
| + case network::mojom::FetchResponseType::kError:
|
| + return false;
|
| + case network::mojom::FetchResponseType::kOpaque:
|
| + case network::mojom::FetchResponseType::kOpaqueRedirect:
|
| + return has_urls;
|
| + }
|
| + NOTREACHED();
|
| + return false;
|
| +}
|
| +
|
| +bool ShouldPadResourceSize(const content::proto::CacheResponse* response) {
|
| + return ShouldPadResponseType(
|
| + ProtoResponseTypeToFetchResponseType(response->response_type()),
|
| + response->url_list_size());
|
| +}
|
| +
|
| +bool ShouldPadResourceSize(const ServiceWorkerResponse* response) {
|
| + return ShouldPadResponseType(response->response_type,
|
| + !response->url_list.empty());
|
| +}
|
| +
|
| +// If the way that a cache's padding is calculated changes increment this
|
| +// version.
|
| +//
|
| +// History:
|
| +//
|
| +// 1: Uniform random 400K.
|
| +const int32_t kCachePaddingAlgorithmVersion = 1;
|
| +
|
| +int64_t CalculateResponsePaddingInternal(
|
| + const std::string& response_url,
|
| + const crypto::SymmetricKey* padding_key,
|
| + int side_data_size) {
|
| + const uint64_t kPaddingRange = 400 * 1024; // Increment version if changed.
|
| +
|
| + DCHECK(!response_url.empty());
|
| +
|
| + crypto::HMAC hmac(crypto::HMAC::SHA256);
|
| + if (!hmac.Init(padding_key))
|
| + LOG(FATAL) << "Failed to init HMAC.";
|
| +
|
| + std::vector<uint8_t> digest(hmac.DigestLength());
|
| + bool success;
|
| + if (side_data_size)
|
| + success = hmac.Sign(response_url + "METADATA", &digest[0], digest.size());
|
| + else
|
| + success = hmac.Sign(response_url, &digest[0], digest.size());
|
| + if (!success)
|
| + LOG(FATAL) << "Failed to sign URL.";
|
| +
|
| + DCHECK_GE(digest.size(), sizeof(uint64_t));
|
| + uint64_t val = *(reinterpret_cast<uint64_t*>(&digest[0]));
|
| + return val % kPaddingRange;
|
| +}
|
| +
|
| +int64_t CalculateResponsePaddingInternal(
|
| + const ::content::proto::CacheResponse* response,
|
| + const crypto::SymmetricKey* padding_key,
|
| + int side_data_size) {
|
| + DCHECK(ShouldPadResourceSize(response));
|
| + const std::string& url = response->url_list(response->url_list_size() - 1);
|
| + return CalculateResponsePaddingInternal(url, padding_key, side_data_size);
|
| +}
|
| +
|
| } // namespace
|
|
|
| // The state needed to pass between CacheStorageCache::Put callbacks.
|
| @@ -297,10 +372,12 @@ struct CacheStorageCache::QueryCacheResult {
|
| struct CacheStorageCache::QueryCacheContext {
|
| QueryCacheContext(std::unique_ptr<ServiceWorkerFetchRequest> request,
|
| const CacheStorageCacheQueryParams& options,
|
| - QueryCacheCallback callback)
|
| + QueryCacheCallback callback,
|
| + QueryTypes query_types)
|
| : request(std::move(request)),
|
| options(options),
|
| callback(std::move(callback)),
|
| + query_types(query_types),
|
| matches(base::MakeUnique<QueryCacheResults>()) {}
|
|
|
| ~QueryCacheContext() {
|
| @@ -317,7 +394,7 @@ struct CacheStorageCache::QueryCacheContext {
|
| std::unique_ptr<ServiceWorkerFetchRequest> request;
|
| CacheStorageCacheQueryParams options;
|
| QueryCacheCallback callback;
|
| - QueryCacheType query_type;
|
| + QueryTypes query_types = 0;
|
| size_t estimated_out_bytes = 0;
|
|
|
| // Iteration state
|
| @@ -338,11 +415,13 @@ std::unique_ptr<CacheStorageCache> CacheStorageCache::CreateMemoryCache(
|
| CacheStorage* cache_storage,
|
| scoped_refptr<net::URLRequestContextGetter> request_context_getter,
|
| scoped_refptr<storage::QuotaManagerProxy> quota_manager_proxy,
|
| - base::WeakPtr<storage::BlobStorageContext> blob_context) {
|
| + base::WeakPtr<storage::BlobStorageContext> blob_context,
|
| + std::unique_ptr<crypto::SymmetricKey> cache_padding_key) {
|
| CacheStorageCache* cache = new CacheStorageCache(
|
| origin, cache_name, base::FilePath(), cache_storage,
|
| std::move(request_context_getter), std::move(quota_manager_proxy),
|
| - blob_context, 0 /* cache_size */);
|
| + blob_context, 0 /* cache_size */, 0 /* cache_padding */,
|
| + std::move(cache_padding_key));
|
| cache->SetObserver(cache_storage);
|
| cache->InitBackend();
|
| return base::WrapUnique(cache);
|
| @@ -357,11 +436,13 @@ std::unique_ptr<CacheStorageCache> CacheStorageCache::CreatePersistentCache(
|
| scoped_refptr<net::URLRequestContextGetter> request_context_getter,
|
| scoped_refptr<storage::QuotaManagerProxy> quota_manager_proxy,
|
| base::WeakPtr<storage::BlobStorageContext> blob_context,
|
| - int64_t cache_size) {
|
| + int64_t cache_size,
|
| + int64_t cache_padding,
|
| + std::unique_ptr<crypto::SymmetricKey> cache_padding_key) {
|
| CacheStorageCache* cache = new CacheStorageCache(
|
| origin, cache_name, path, cache_storage,
|
| std::move(request_context_getter), std::move(quota_manager_proxy),
|
| - blob_context, cache_size);
|
| + blob_context, cache_size, cache_padding, std::move(cache_padding_key));
|
| cache->SetObserver(cache_storage), cache->InitBackend();
|
| return base::WrapUnique(cache);
|
| }
|
| @@ -607,7 +688,9 @@ CacheStorageCache::CacheStorageCache(
|
| scoped_refptr<net::URLRequestContextGetter> request_context_getter,
|
| scoped_refptr<storage::QuotaManagerProxy> quota_manager_proxy,
|
| base::WeakPtr<storage::BlobStorageContext> blob_context,
|
| - int64_t cache_size)
|
| + int64_t cache_size,
|
| + int64_t cache_padding,
|
| + std::unique_ptr<crypto::SymmetricKey> cache_padding_key)
|
| : origin_(origin),
|
| cache_name_(cache_name),
|
| path_(path),
|
| @@ -618,12 +701,15 @@ CacheStorageCache::CacheStorageCache(
|
| scheduler_(
|
| new CacheStorageScheduler(CacheStorageSchedulerClient::CLIENT_CACHE)),
|
| cache_size_(cache_size),
|
| + cache_padding_(cache_padding),
|
| + cache_padding_key_(std::move(cache_padding_key)),
|
| max_query_size_bytes_(kMaxQueryCacheResultBytes),
|
| cache_observer_(nullptr),
|
| memory_only_(path.empty()),
|
| weak_ptr_factory_(this) {
|
| DCHECK(!origin_.is_empty());
|
| DCHECK(quota_manager_proxy_.get());
|
| + DCHECK(cache_padding_key_.get());
|
|
|
| quota_manager_proxy_->NotifyOriginInUse(origin_);
|
| }
|
| @@ -631,10 +717,12 @@ CacheStorageCache::CacheStorageCache(
|
| void CacheStorageCache::QueryCache(
|
| std::unique_ptr<ServiceWorkerFetchRequest> request,
|
| const CacheStorageCacheQueryParams& options,
|
| - QueryCacheType query_type,
|
| + QueryTypes query_types,
|
| QueryCacheCallback callback) {
|
| - DCHECK_NE(BACKEND_UNINITIALIZED, backend_state_);
|
| - if (backend_state_ != BACKEND_OPEN) {
|
| + DCHECK_NE(
|
| + QUERY_CACHE_ENTRIES | QUERY_CACHE_RESPONSES_WITH_BODIES,
|
| + query_types & (QUERY_CACHE_ENTRIES | QUERY_CACHE_RESPONSES_WITH_BODIES));
|
| + if (backend_state_ == BACKEND_CLOSED) {
|
| std::move(callback).Run(CACHE_STORAGE_ERROR_STORAGE,
|
| std::unique_ptr<QueryCacheResults>());
|
| return;
|
| @@ -648,9 +736,8 @@ void CacheStorageCache::QueryCache(
|
| }
|
|
|
| ServiceWorkerFetchRequest* request_ptr = request.get();
|
| - std::unique_ptr<QueryCacheContext> query_cache_context(
|
| - new QueryCacheContext(std::move(request), options, std::move(callback)));
|
| - query_cache_context->query_type = query_type;
|
| + std::unique_ptr<QueryCacheContext> query_cache_context(new QueryCacheContext(
|
| + std::move(request), options, std::move(callback), query_types));
|
|
|
| if (query_cache_context->request &&
|
| !query_cache_context->request->url.is_empty() && !options.ignore_search) {
|
| @@ -733,7 +820,7 @@ void CacheStorageCache::QueryCacheFilterEntry(
|
| disk_cache::ScopedEntryPtr entry(query_cache_context->enumerated_entry);
|
| query_cache_context->enumerated_entry = nullptr;
|
|
|
| - if (backend_state_ != BACKEND_OPEN) {
|
| + if (backend_state_ == BACKEND_CLOSED) {
|
| std::move(query_cache_context->callback)
|
| .Run(CACHE_STORAGE_ERROR_NOT_FOUND,
|
| std::move(query_cache_context->matches));
|
| @@ -795,57 +882,50 @@ void CacheStorageCache::QueryCacheDidReadMetadata(
|
| return;
|
| }
|
|
|
| - if (query_cache_context->query_type == QueryCacheType::CACHE_ENTRIES) {
|
| - match->request.reset();
|
| - match->response.reset();
|
| + if (query_cache_context->query_types & QUERY_CACHE_ENTRIES)
|
| match->entry = std::move(entry);
|
| - QueryCacheOpenNextEntry(std::move(query_cache_context));
|
| - return;
|
| - }
|
|
|
| - query_cache_context->estimated_out_bytes +=
|
| - match->request->EstimatedStructSize();
|
| - if (query_cache_context->estimated_out_bytes > max_query_size_bytes_) {
|
| - std::move(query_cache_context->callback)
|
| - .Run(CACHE_STORAGE_ERROR_QUERY_TOO_LARGE,
|
| - std::unique_ptr<QueryCacheResults>());
|
| - return;
|
| - }
|
| -
|
| - if (query_cache_context->query_type == QueryCacheType::REQUESTS) {
|
| - match->response.reset();
|
| - QueryCacheOpenNextEntry(std::move(query_cache_context));
|
| - return;
|
| + if (query_cache_context->query_types & QUERY_CACHE_REQUESTS) {
|
| + query_cache_context->estimated_out_bytes +=
|
| + match->request->EstimatedStructSize();
|
| + if (query_cache_context->estimated_out_bytes > max_query_size_bytes_) {
|
| + std::move(query_cache_context->callback)
|
| + .Run(CACHE_STORAGE_ERROR_QUERY_TOO_LARGE,
|
| + std::unique_ptr<QueryCacheResults>());
|
| + return;
|
| + }
|
| + } else {
|
| + match->request.reset();
|
| }
|
|
|
| - DCHECK_EQ(QueryCacheType::REQUESTS_AND_RESPONSES,
|
| - query_cache_context->query_type);
|
| -
|
| - query_cache_context->estimated_out_bytes +=
|
| - match->response->EstimatedStructSize();
|
| - if (query_cache_context->estimated_out_bytes > max_query_size_bytes_) {
|
| - std::move(query_cache_context->callback)
|
| - .Run(CACHE_STORAGE_ERROR_QUERY_TOO_LARGE,
|
| - std::unique_ptr<QueryCacheResults>());
|
| - return;
|
| - }
|
| + if (query_cache_context->query_types & QUERY_CACHE_RESPONSES_WITH_BODIES) {
|
| + query_cache_context->estimated_out_bytes +=
|
| + match->response->EstimatedStructSize();
|
| + if (query_cache_context->estimated_out_bytes > max_query_size_bytes_) {
|
| + std::move(query_cache_context->callback)
|
| + .Run(CACHE_STORAGE_ERROR_QUERY_TOO_LARGE,
|
| + std::unique_ptr<QueryCacheResults>());
|
| + return;
|
| + }
|
| + if (entry->GetDataSize(INDEX_RESPONSE_BODY) == 0) {
|
| + QueryCacheOpenNextEntry(std::move(query_cache_context));
|
| + return;
|
| + }
|
|
|
| - if (entry->GetDataSize(INDEX_RESPONSE_BODY) == 0) {
|
| - QueryCacheOpenNextEntry(std::move(query_cache_context));
|
| - return;
|
| - }
|
| + if (!blob_storage_context_) {
|
| + std::move(query_cache_context->callback)
|
| + .Run(CACHE_STORAGE_ERROR_STORAGE,
|
| + std::unique_ptr<QueryCacheResults>());
|
| + return;
|
| + }
|
|
|
| - if (!blob_storage_context_) {
|
| - std::move(query_cache_context->callback)
|
| - .Run(CACHE_STORAGE_ERROR_STORAGE,
|
| - base::MakeUnique<QueryCacheResults>());
|
| - return;
|
| + match->blob_handle =
|
| + PopulateResponseBody(std::move(entry), match->response.get());
|
| + } else if (!(query_cache_context->query_types &
|
| + QUERY_CACHE_RESPONSES_NO_BODIES)) {
|
| + match->response.reset();
|
| }
|
|
|
| - std::unique_ptr<storage::BlobDataHandle> blob_data_handle =
|
| - PopulateResponseBody(std::move(entry), match->response.get());
|
| - match->blob_handle = std::move(blob_data_handle);
|
| -
|
| QueryCacheOpenNextEntry(std::move(query_cache_context));
|
| }
|
|
|
| @@ -855,6 +935,22 @@ bool CacheStorageCache::QueryCacheResultCompare(const QueryCacheResult& lhs,
|
| return lhs.entry_time < rhs.entry_time;
|
| }
|
|
|
| +// static
|
| +int64_t CacheStorageCache::CalculateResponsePadding(
|
| + const ServiceWorkerResponse& response,
|
| + const crypto::SymmetricKey* padding_key,
|
| + int side_data_size) {
|
| + if (!ShouldPadResourceSize(&response))
|
| + return 0;
|
| + return CalculateResponsePaddingInternal(response.url_list.back().spec(),
|
| + padding_key, side_data_size);
|
| +}
|
| +
|
| +// static
|
| +int32_t CacheStorageCache::GetResponsePaddingVersion() {
|
| + return kCachePaddingAlgorithmVersion;
|
| +}
|
| +
|
| void CacheStorageCache::MatchImpl(
|
| std::unique_ptr<ServiceWorkerFetchRequest> request,
|
| const CacheStorageCacheQueryParams& match_params,
|
| @@ -904,7 +1000,8 @@ void CacheStorageCache::MatchAllImpl(
|
| }
|
|
|
| QueryCache(
|
| - std::move(request), options, QueryCacheType::REQUESTS_AND_RESPONSES,
|
| + std::move(request), options,
|
| + QUERY_CACHE_REQUESTS | QUERY_CACHE_RESPONSES_WITH_BODIES,
|
| base::BindOnce(&CacheStorageCache::MatchAllDidQueryCache,
|
| weak_ptr_factory_.GetWeakPtr(), std::move(callback)));
|
| }
|
| @@ -1017,11 +1114,19 @@ void CacheStorageCache::WriteSideDataDidReadMetaData(
|
| // Get a temporary copy of the entry pointer before passing it in base::Bind.
|
| disk_cache::Entry* temp_entry_ptr = entry.get();
|
|
|
| + std::unique_ptr<content::proto::CacheResponse> response(
|
| + headers->release_response());
|
| +
|
| + int side_data_size_before_write = 0;
|
| + if (ShouldPadResourceSize(response.get()))
|
| + side_data_size_before_write = entry->GetDataSize(INDEX_SIDE_DATA);
|
| +
|
| net::CompletionCallback write_side_data_callback =
|
| base::AdaptCallbackForRepeating(
|
| base::BindOnce(&CacheStorageCache::WriteSideDataDidWrite,
|
| weak_ptr_factory_.GetWeakPtr(), std::move(callback),
|
| - base::Passed(std::move(entry)), buf_len));
|
| + base::Passed(std::move(entry)), buf_len,
|
| + std::move(response), side_data_size_before_write));
|
|
|
| int rv = temp_entry_ptr->WriteData(
|
| INDEX_SIDE_DATA, 0 /* offset */, buffer.get(), buf_len,
|
| @@ -1031,10 +1136,13 @@ void CacheStorageCache::WriteSideDataDidReadMetaData(
|
| write_side_data_callback.Run(rv);
|
| }
|
|
|
| -void CacheStorageCache::WriteSideDataDidWrite(ErrorCallback callback,
|
| - disk_cache::ScopedEntryPtr entry,
|
| - int expected_bytes,
|
| - int rv) {
|
| +void CacheStorageCache::WriteSideDataDidWrite(
|
| + ErrorCallback callback,
|
| + disk_cache::ScopedEntryPtr entry,
|
| + int expected_bytes,
|
| + std::unique_ptr<::content::proto::CacheResponse> response,
|
| + int side_data_size_before_write,
|
| + int rv) {
|
| if (rv != expected_bytes) {
|
| entry->Doom();
|
| UpdateCacheSize(
|
| @@ -1045,6 +1153,14 @@ void CacheStorageCache::WriteSideDataDidWrite(ErrorCallback callback,
|
| if (rv > 0)
|
| storage::RecordBytesWritten(kRecordBytesLabel, rv);
|
|
|
| + if (ShouldPadResourceSize(response.get())) {
|
| + cache_padding_ -= CalculateResponsePaddingInternal(
|
| + response.get(), cache_padding_key_.get(), side_data_size_before_write);
|
| +
|
| + cache_padding_ += CalculateResponsePaddingInternal(
|
| + response.get(), cache_padding_key_.get(), rv);
|
| + }
|
| +
|
| UpdateCacheSize(base::BindOnce(std::move(callback), CACHE_STORAGE_OK));
|
| }
|
|
|
| @@ -1099,26 +1215,36 @@ void CacheStorageCache::PutImpl(std::unique_ptr<PutContext> put_context) {
|
| return;
|
| }
|
|
|
| - std::string key = put_context->request->url.spec();
|
| + // Explicitly delete the incumbent resource (which may not exist). This is
|
| + // only done so that it's padding will be decremented from the calculated
|
| + // cache padding.
|
| + // TODO(cmumford): Research alternatives to this explicit delete as it
|
| + // seriously impacts put performance.
|
| + auto delete_request = base::MakeUnique<ServiceWorkerFetchRequest>(
|
| + put_context->request->url, "", ServiceWorkerHeaderMap(), Referrer(),
|
| + false);
|
|
|
| - net::CompletionCallback callback =
|
| - base::AdaptCallbackForRepeating(base::BindOnce(
|
| - &CacheStorageCache::PutDidDoomEntry, weak_ptr_factory_.GetWeakPtr(),
|
| - base::Passed(std::move(put_context))));
|
| -
|
| - int rv = backend_->DoomEntry(key, callback);
|
| - if (rv != net::ERR_IO_PENDING)
|
| - callback.Run(rv);
|
| + CacheStorageCacheQueryParams query_options;
|
| + query_options.ignore_method = true;
|
| + query_options.ignore_vary = true;
|
| + DeleteImpl(std::move(delete_request), query_options,
|
| + base::BindOnce(&CacheStorageCache::PutDidDeleteEntry,
|
| + weak_ptr_factory_.GetWeakPtr(),
|
| + base::Passed(std::move(put_context))));
|
| }
|
|
|
| -void CacheStorageCache::PutDidDoomEntry(std::unique_ptr<PutContext> put_context,
|
| - int rv) {
|
| +void CacheStorageCache::PutDidDeleteEntry(
|
| + std::unique_ptr<PutContext> put_context,
|
| + CacheStorageError error) {
|
| if (backend_state_ != BACKEND_OPEN) {
|
| std::move(put_context->callback).Run(CACHE_STORAGE_ERROR_STORAGE);
|
| return;
|
| }
|
|
|
| - // |rv| is ignored as doom entry can fail if the entry doesn't exist.
|
| + if (error != CACHE_STORAGE_OK && error != CACHE_STORAGE_ERROR_NOT_FOUND) {
|
| + std::move(put_context->callback).Run(error);
|
| + return;
|
| + }
|
|
|
| std::unique_ptr<disk_cache::Entry*> scoped_entry_ptr(
|
| new disk_cache::Entry*());
|
| @@ -1132,11 +1258,11 @@ void CacheStorageCache::PutDidDoomEntry(std::unique_ptr<PutContext> put_context,
|
| base::Passed(std::move(scoped_entry_ptr)),
|
| base::Passed(std::move(put_context))));
|
|
|
| - int create_rv = backend_ptr->CreateEntry(request_ptr->url.spec(), entry_ptr,
|
| - create_entry_callback);
|
| + int rv = backend_ptr->CreateEntry(request_ptr->url.spec(), entry_ptr,
|
| + create_entry_callback);
|
|
|
| - if (create_rv != net::ERR_IO_PENDING)
|
| - create_entry_callback.Run(create_rv);
|
| + if (rv != net::ERR_IO_PENDING)
|
| + create_entry_callback.Run(rv);
|
| }
|
|
|
| void CacheStorageCache::PutDidCreateEntry(
|
| @@ -1223,6 +1349,11 @@ void CacheStorageCache::PutDidWriteHeaders(
|
|
|
| if (rv > 0)
|
| storage::RecordBytesWritten(kRecordBytesLabel, rv);
|
| + if (ShouldPadResourceSize(put_context->response.get())) {
|
| + cache_padding_ += CalculateResponsePadding(*put_context->response,
|
| + cache_padding_key_.get(),
|
| + 0 /* side_data_size */);
|
| + }
|
|
|
| // The metadata is written, now for the response content. The data is streamed
|
| // from the blob into the cache entry.
|
| @@ -1274,6 +1405,60 @@ void CacheStorageCache::PutDidWriteBlobToCache(
|
| base::BindOnce(std::move(put_context->callback), CACHE_STORAGE_OK));
|
| }
|
|
|
| +void CacheStorageCache::CalculateCacheSizePadding(
|
| + SizePaddingCallback got_sizes_callback) {
|
| + net::CompletionCallback got_size_callback =
|
| + base::AdaptCallbackForRepeating(base::BindOnce(
|
| + &CacheStorageCache::CalculateCacheSizePaddingGotSize,
|
| + weak_ptr_factory_.GetWeakPtr(), std::move(got_sizes_callback)));
|
| +
|
| + int rv = backend_->CalculateSizeOfAllEntries(got_size_callback);
|
| + if (rv != net::ERR_IO_PENDING)
|
| + got_size_callback.Run(rv);
|
| +}
|
| +
|
| +void CacheStorageCache::CalculateCacheSizePaddingGotSize(
|
| + SizePaddingCallback callback,
|
| + int cache_size) {
|
| + // Enumerating entries is only done during cache initialization and only if
|
| + // necessary.
|
| + DCHECK_EQ(backend_state_, BACKEND_UNINITIALIZED);
|
| + std::unique_ptr<ServiceWorkerFetchRequest> request;
|
| + CacheStorageCacheQueryParams options;
|
| + options.ignore_search = true;
|
| + QueryCache(std::move(request), options, QUERY_CACHE_RESPONSES_NO_BODIES,
|
| + base::BindOnce(&CacheStorageCache::PaddingDidQueryCache,
|
| + weak_ptr_factory_.GetWeakPtr(), std::move(callback),
|
| + cache_size));
|
| +}
|
| +
|
| +void CacheStorageCache::PaddingDidQueryCache(
|
| + SizePaddingCallback callback,
|
| + int cache_size,
|
| + CacheStorageError error,
|
| + std::unique_ptr<QueryCacheResults> query_cache_results) {
|
| + int64_t cache_padding = 0;
|
| + if (error == CACHE_STORAGE_OK) {
|
| + for (const auto& result : *query_cache_results) {
|
| + if (ShouldPadResourceSize(result.response.get())) {
|
| + int32_t side_data_size =
|
| + result.entry ? result.entry->GetDataSize(INDEX_SIDE_DATA) : 0;
|
| + cache_padding += CalculateResponsePadding(
|
| + *result.response, cache_padding_key_.get(), side_data_size);
|
| + }
|
| + }
|
| + }
|
| +
|
| + std::move(callback).Run(cache_size, cache_padding);
|
| +}
|
| +
|
| +void CacheStorageCache::CalculateCacheSize(
|
| + const net::CompletionCallback& callback) {
|
| + int rv = backend_->CalculateSizeOfAllEntries(callback);
|
| + if (rv != net::ERR_IO_PENDING)
|
| + callback.Run(rv);
|
| +}
|
| +
|
| void CacheStorageCache::UpdateCacheSize(base::OnceClosure callback) {
|
| if (backend_state_ != BACKEND_OPEN)
|
| return;
|
| @@ -1281,14 +1466,10 @@ void CacheStorageCache::UpdateCacheSize(base::OnceClosure callback) {
|
| // Note that the callback holds a cache handle to keep the cache alive during
|
| // the operation since this UpdateCacheSize is often run after an operation
|
| // completes and runs its callback.
|
| - net::CompletionCallback size_callback = base::AdaptCallbackForRepeating(
|
| + CalculateCacheSize(base::AdaptCallbackForRepeating(
|
| base::BindOnce(&CacheStorageCache::UpdateCacheSizeGotSize,
|
| weak_ptr_factory_.GetWeakPtr(),
|
| - base::Passed(CreateCacheHandle()), std::move(callback)));
|
| -
|
| - int rv = backend_->CalculateSizeOfAllEntries(size_callback);
|
| - if (rv != net::ERR_IO_PENDING)
|
| - std::move(size_callback).Run(rv);
|
| + base::Passed(CreateCacheHandle()), std::move(callback))));
|
| }
|
|
|
| void CacheStorageCache::UpdateCacheSizeGotSize(
|
| @@ -1296,17 +1477,16 @@ void CacheStorageCache::UpdateCacheSizeGotSize(
|
| base::OnceClosure callback,
|
| int current_cache_size) {
|
| DCHECK_NE(current_cache_size, CacheStorage::kSizeUnknown);
|
| - int64_t old_cache_size = cache_size_;
|
| cache_size_ = current_cache_size;
|
| -
|
| - int64_t size_delta = current_cache_size - old_cache_size;
|
| + int64_t size_delta = PaddedCacheSize() - last_reported_size_;
|
| + last_reported_size_ = PaddedCacheSize();
|
|
|
| quota_manager_proxy_->NotifyStorageModified(
|
| storage::QuotaClient::kServiceWorkerCache, origin_,
|
| storage::kStorageTypeTemporary, size_delta);
|
|
|
| if (cache_observer_)
|
| - cache_observer_->CacheSizeUpdated(this, current_cache_size);
|
| + cache_observer_->CacheSizeUpdated(this);
|
|
|
| std::move(callback).Run();
|
| }
|
| @@ -1340,7 +1520,8 @@ void CacheStorageCache::DeleteImpl(
|
| }
|
|
|
| QueryCache(
|
| - std::move(request), match_params, QueryCacheType::CACHE_ENTRIES,
|
| + std::move(request), match_params,
|
| + QUERY_CACHE_ENTRIES | QUERY_CACHE_RESPONSES_NO_BODIES,
|
| base::BindOnce(&CacheStorageCache::DeleteDidQueryCache,
|
| weak_ptr_factory_.GetWeakPtr(), std::move(callback)));
|
| }
|
| @@ -1361,6 +1542,11 @@ void CacheStorageCache::DeleteDidQueryCache(
|
|
|
| for (auto& result : *query_cache_results) {
|
| disk_cache::ScopedEntryPtr entry = std::move(result.entry);
|
| + if (ShouldPadResourceSize(result.response.get())) {
|
| + cache_padding_ -=
|
| + CalculateResponsePadding(*result.response, cache_padding_key_.get(),
|
| + entry->GetDataSize(INDEX_SIDE_DATA));
|
| + }
|
| entry->Doom();
|
| }
|
|
|
| @@ -1379,7 +1565,7 @@ void CacheStorageCache::KeysImpl(
|
| }
|
|
|
| QueryCache(
|
| - std::move(request), options, QueryCacheType::REQUESTS,
|
| + std::move(request), options, QUERY_CACHE_REQUESTS,
|
| base::BindOnce(&CacheStorageCache::KeysDidQueryCache,
|
| weak_ptr_factory_.GetWeakPtr(), std::move(callback)));
|
| }
|
| @@ -1419,7 +1605,14 @@ void CacheStorageCache::DeleteBackendCompletedIO() {
|
| void CacheStorageCache::SizeImpl(SizeCallback callback) {
|
| DCHECK_NE(BACKEND_UNINITIALIZED, backend_state_);
|
|
|
| - int64_t size = backend_state_ == BACKEND_OPEN ? cache_size_ : 0;
|
| + // TODO(cmumford): Can CacheStorage::kSizeUnknown be returned instead of zero?
|
| + if (backend_state_ != BACKEND_OPEN) {
|
| + base::ThreadTaskRunnerHandle::Get()->PostTask(
|
| + FROM_HERE, base::BindOnce(std::move(callback), 0));
|
| + return;
|
| + }
|
| +
|
| + int64_t size = backend_state_ == BACKEND_OPEN ? PaddedCacheSize() : 0;
|
| base::ThreadTaskRunnerHandle::Get()->PostTask(
|
| FROM_HERE, base::BindOnce(std::move(callback), size));
|
| }
|
| @@ -1492,14 +1685,14 @@ void CacheStorageCache::InitDidCreateBackend(
|
| return;
|
| }
|
|
|
| - net::CompletionCallback size_callback =
|
| - base::AdaptCallbackForRepeating(base::BindOnce(
|
| - &CacheStorageCache::InitGotCacheSize, weak_ptr_factory_.GetWeakPtr(),
|
| - std::move(callback), cache_create_error));
|
| + auto calculate_size_callback =
|
| + base::AdaptCallbackForRepeating(std::move(callback));
|
| + int rv = backend_->CalculateSizeOfAllEntries(base::Bind(
|
| + &CacheStorageCache::InitGotCacheSize, weak_ptr_factory_.GetWeakPtr(),
|
| + calculate_size_callback, cache_create_error));
|
|
|
| - int rv = backend_->CalculateSizeOfAllEntries(size_callback);
|
| if (rv != net::ERR_IO_PENDING)
|
| - std::move(size_callback).Run(rv);
|
| + InitGotCacheSize(calculate_size_callback, cache_create_error, rv);
|
| }
|
|
|
| void CacheStorageCache::InitGotCacheSize(base::OnceClosure callback,
|
| @@ -1515,10 +1708,41 @@ void CacheStorageCache::InitGotCacheSize(base::OnceClosure callback,
|
| << " does not match size from index: " << cache_size_;
|
| UMA_HISTOGRAM_COUNTS_10M("ServiceWorkerCache.IndexSizeDifference",
|
| std::abs(cache_size_ - cache_size));
|
| - // Disabled for crbug.com/681900.
|
| - // DCHECK_EQ(cache_size_, cache_size);
|
| + if (cache_size_ != cache_size) {
|
| + // We assume that if the sizes match then then cached padding is still
|
| + // correct. If not then we recalculate the padding.
|
| + CalculateCacheSizePaddingGotSize(
|
| + base::BindOnce(&CacheStorageCache::InitGotCacheSizeAndPadding,
|
| + weak_ptr_factory_.GetWeakPtr(), std::move(callback),
|
| + cache_create_error),
|
| + cache_size);
|
| + return;
|
| + }
|
| }
|
| +
|
| + if (cache_padding_ == CacheStorage::kSizeUnknown || cache_padding_ < 0) {
|
| + CalculateCacheSizePaddingGotSize(
|
| + base::BindOnce(&CacheStorageCache::InitGotCacheSizeAndPadding,
|
| + weak_ptr_factory_.GetWeakPtr(), std::move(callback),
|
| + cache_create_error),
|
| + cache_size);
|
| + return;
|
| + }
|
| +
|
| + // If cached size matches actual size then assume cached padding is still
|
| + // correct.
|
| + InitGotCacheSizeAndPadding(std::move(callback), cache_create_error,
|
| + cache_size, cache_padding_);
|
| +}
|
| +
|
| +void CacheStorageCache::InitGotCacheSizeAndPadding(
|
| + base::OnceClosure callback,
|
| + CacheStorageError cache_create_error,
|
| + int64_t cache_size,
|
| + int64_t cache_padding) {
|
| cache_size_ = cache_size;
|
| + cache_padding_ = cache_padding;
|
| +
|
| initializing_ = false;
|
| backend_state_ = (cache_create_error == CACHE_STORAGE_OK && backend_ &&
|
| backend_state_ == BACKEND_UNINITIALIZED)
|
| @@ -1529,7 +1753,7 @@ void CacheStorageCache::InitGotCacheSize(base::OnceClosure callback,
|
| cache_create_error, CACHE_STORAGE_ERROR_LAST + 1);
|
|
|
| if (cache_observer_)
|
| - cache_observer_->CacheSizeUpdated(this, cache_size_);
|
| + cache_observer_->CacheSizeUpdated(this);
|
|
|
| std::move(callback).Run();
|
| }
|
| @@ -1567,4 +1791,13 @@ CacheStorageCache::CreateCacheHandle() {
|
| return cache_storage_->CreateCacheHandle(this);
|
| }
|
|
|
| +int64_t CacheStorageCache::PaddedCacheSize() const {
|
| + DCHECK_NE(BACKEND_UNINITIALIZED, backend_state_);
|
| + if (cache_size_ == CacheStorage::kSizeUnknown ||
|
| + cache_padding_ == CacheStorage::kSizeUnknown) {
|
| + return CacheStorage::kSizeUnknown;
|
| + }
|
| + return cache_size_ + cache_padding_;
|
| +}
|
| +
|
| } // namespace content
|
|
|