Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1519)

Unified Diff: content/browser/cache_storage/cache_storage_cache.cc

Issue 2901083002: [CacheStorage] Pad and bin opaque resource sizes. (Closed)
Patch Set: Storing padding key in cache. Created 3 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: content/browser/cache_storage/cache_storage_cache.cc
diff --git a/content/browser/cache_storage/cache_storage_cache.cc b/content/browser/cache_storage/cache_storage_cache.cc
index a120339b5d9c1bccf7d60979c248d805a032b29f..30cb0898591622da1030f610ce7d7e30c3485cd5 100644
--- a/content/browser/cache_storage/cache_storage_cache.cc
+++ b/content/browser/cache_storage/cache_storage_cache.cc
@@ -6,6 +6,7 @@
#include <stddef.h>
#include <algorithm>
+#include <functional>
#include <limits>
#include <memory>
#include <string>
@@ -15,6 +16,7 @@
#include "base/bind_helpers.h"
#include "base/files/file_path.h"
#include "base/guid.h"
+#include "base/lazy_instance.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/histogram_macros.h"
@@ -28,6 +30,8 @@
#include "content/browser/cache_storage/cache_storage_scheduler.h"
#include "content/public/browser/browser_thread.h"
#include "content/public/common/referrer.h"
+#include "crypto/hmac.h"
+#include "crypto/random.h"
#include "net/base/completion_callback.h"
#include "net/base/io_buffer.h"
#include "net/base/net_errors.h"
@@ -254,6 +258,56 @@ std::unique_ptr<ServiceWorkerResponse> CreateResponse(
metadata.response().cors_exposed_header_names().end()));
}
+// The size of opaque (non-cors) resource responses are padded in order
+// to obfuscate their actual size.
+bool ShouldPadResourceSize(const ServiceWorkerResponse& response) {
+ return (response.response_type ==
+ blink::kWebServiceWorkerResponseTypeOpaque ||
+ response.response_type ==
+ blink::kWebServiceWorkerResponseTypeOpaqueRedirect) &&
+ !response.url_list.empty();
+}
+
+// Return a hash of the |response| values to be used for the resource padding
+// calculation.
+std::string CalculateResponsePaddingHash(
+ const ServiceWorkerResponse& response) {
+ DCHECK(!response.url_list.empty());
+
+ size_t h = std::hash<std::string>{}(response.url_list.back().spec());
+ return std::string(reinterpret_cast<const char*>(&h), sizeof(h));
+}
+
+class PaddingHMACKey : public std::string {
+ public:
+ PaddingHMACKey() {
+ const size_t kKeyLen = 20;
+ reserve(kKeyLen);
+ crypto::RandBytes(&(*this)[0], capacity());
+ }
+};
+
+std::string CalculatePaddingHMAC(const std::string& value,
+ const std::string& hmac_key) {
+ crypto::HMAC hmac(crypto::HMAC::SHA256);
+ std::string result(hmac.DigestLength(), '\0');
+ if (!hmac.Init(hmac_key) ||
+ !hmac.Sign(value, reinterpret_cast<uint8_t*>(&result[0]),
+ result.length())) {
+ LOG(FATAL) << "Failed to calculate HMAC.";
+ }
+ return result;
+}
+
+// Converts the |response_hmac| bytes into a value from the random distribution
+// determined by threat analysis.
+int64_t ResponsePaddingDistribution(const std::string& response_hmac) {
+ size_t response_hash = std::hash<std::string>{}(response_hmac);
+ const size_t kPaddingRange = 400 * 1024;
+ const size_t kMinPadding = 20 * 1024;
jkarlin 2017/06/08 18:58:48 I don't believe having a min padding > 0 has any s
cmumford 2017/06/12 18:09:31 Done.
+ return kMinPadding + (response_hash % kPaddingRange);
+}
+
} // namespace
// The state needed to pass between CacheStorageCache::Put callbacks.
@@ -291,10 +345,12 @@ struct CacheStorageCache::QueryCacheResult {
struct CacheStorageCache::QueryCacheContext {
QueryCacheContext(std::unique_ptr<ServiceWorkerFetchRequest> request,
const CacheStorageCacheQueryParams& options,
- const QueryCacheCallback& callback)
+ const QueryCacheCallback& callback,
+ uint32_t query_types)
: request(std::move(request)),
options(options),
callback(callback),
+ query_types(query_types),
matches(base::MakeUnique<QueryCacheResults>()) {}
~QueryCacheContext() {
@@ -311,7 +367,7 @@ struct CacheStorageCache::QueryCacheContext {
std::unique_ptr<ServiceWorkerFetchRequest> request;
CacheStorageCacheQueryParams options;
QueryCacheCallback callback;
- QueryCacheType query_type;
+ uint32_t query_types = 0x0;
size_t estimated_out_bytes = 0;
// Iteration state
@@ -336,7 +392,8 @@ std::unique_ptr<CacheStorageCache> CacheStorageCache::CreateMemoryCache(
CacheStorageCache* cache = new CacheStorageCache(
origin, cache_name, base::FilePath(), cache_storage,
std::move(request_context_getter), std::move(quota_manager_proxy),
- blob_context, 0 /* cache_size */);
+ blob_context, 0 /* cache_size */, 0 /* cache_padding */,
+ SessionPaddingHMACKey());
cache->SetObserver(cache_storage);
cache->InitBackend();
return base::WrapUnique(cache);
@@ -351,15 +408,24 @@ std::unique_ptr<CacheStorageCache> CacheStorageCache::CreatePersistentCache(
scoped_refptr<net::URLRequestContextGetter> request_context_getter,
scoped_refptr<storage::QuotaManagerProxy> quota_manager_proxy,
base::WeakPtr<storage::BlobStorageContext> blob_context,
- int64_t cache_size) {
+ int64_t cache_size,
+ int64_t cache_padding,
+ const std::string& cache_padding_key) {
CacheStorageCache* cache = new CacheStorageCache(
origin, cache_name, path, cache_storage,
std::move(request_context_getter), std::move(quota_manager_proxy),
- blob_context, cache_size);
+ blob_context, cache_size, cache_padding, cache_padding_key);
cache->SetObserver(cache_storage), cache->InitBackend();
return base::WrapUnique(cache);
}
+// static
+const std::string& CacheStorageCache::SessionPaddingHMACKey() {
+ static base::LazyInstance<PaddingHMACKey>::Leaky s_key =
+ LAZY_INSTANCE_INITIALIZER;
+ return s_key.Get();
+}
+
base::WeakPtr<CacheStorageCache> CacheStorageCache::AsWeakPtr() {
return weak_ptr_factory_.GetWeakPtr();
}
@@ -587,7 +653,9 @@ CacheStorageCache::CacheStorageCache(
scoped_refptr<net::URLRequestContextGetter> request_context_getter,
scoped_refptr<storage::QuotaManagerProxy> quota_manager_proxy,
base::WeakPtr<storage::BlobStorageContext> blob_context,
- int64_t cache_size)
+ int64_t cache_size,
+ int64_t cache_padding,
+ const std::string& cache_padding_key)
: origin_(origin),
cache_name_(cache_name),
path_(path),
@@ -598,6 +666,8 @@ CacheStorageCache::CacheStorageCache(
scheduler_(
new CacheStorageScheduler(CacheStorageSchedulerClient::CLIENT_CACHE)),
cache_size_(cache_size),
+ cache_padding_(cache_padding),
+ cache_padding_key_(cache_padding_key),
max_query_size_bytes_(kMaxQueryCacheResultBytes),
cache_observer_(nullptr),
memory_only_(path.empty()),
@@ -611,10 +681,12 @@ CacheStorageCache::CacheStorageCache(
void CacheStorageCache::QueryCache(
std::unique_ptr<ServiceWorkerFetchRequest> request,
const CacheStorageCacheQueryParams& options,
- QueryCacheType query_type,
+ uint32_t query_types,
const QueryCacheCallback& callback) {
- DCHECK_NE(BACKEND_UNINITIALIZED, backend_state_);
- if (backend_state_ != BACKEND_OPEN) {
+ DCHECK_NE(
+ QUERY_CACHE_ENTRIES | QUERY_CACHE_RESPONSES_WITH_BODIES,
+ query_types & (QUERY_CACHE_ENTRIES | QUERY_CACHE_RESPONSES_WITH_BODIES));
+ if (backend_state_ == BACKEND_CLOSED) {
callback.Run(CACHE_STORAGE_ERROR_STORAGE,
std::unique_ptr<QueryCacheResults>());
return;
@@ -627,9 +699,8 @@ void CacheStorageCache::QueryCache(
}
ServiceWorkerFetchRequest* request_ptr = request.get();
- std::unique_ptr<QueryCacheContext> query_cache_context(
- new QueryCacheContext(std::move(request), options, callback));
- query_cache_context->query_type = query_type;
+ std::unique_ptr<QueryCacheContext> query_cache_context(new QueryCacheContext(
+ std::move(request), options, callback, query_types));
if (query_cache_context->request &&
!query_cache_context->request->url.is_empty() && !options.ignore_search) {
@@ -710,7 +781,7 @@ void CacheStorageCache::QueryCacheFilterEntry(
disk_cache::ScopedEntryPtr entry(query_cache_context->enumerated_entry);
query_cache_context->enumerated_entry = nullptr;
- if (backend_state_ != BACKEND_OPEN) {
+ if (backend_state_ == BACKEND_CLOSED) {
QueryCacheCallback callback = query_cache_context->callback;
callback.Run(CACHE_STORAGE_ERROR_NOT_FOUND,
std::move(query_cache_context->matches));
@@ -772,54 +843,48 @@ void CacheStorageCache::QueryCacheDidReadMetadata(
return;
}
- if (query_cache_context->query_type == QueryCacheType::CACHE_ENTRIES) {
- match->request.reset();
- match->response.reset();
+ if (query_cache_context->query_types & QUERY_CACHE_ENTRIES)
match->entry = std::move(entry);
- QueryCacheOpenNextEntry(std::move(query_cache_context));
- return;
- }
- query_cache_context->estimated_out_bytes +=
- match->request->EstimatedStructSize();
- if (query_cache_context->estimated_out_bytes > max_query_size_bytes_) {
- query_cache_context->callback.Run(CACHE_STORAGE_ERROR_QUERY_TOO_LARGE,
- std::unique_ptr<QueryCacheResults>());
- return;
- }
-
- if (query_cache_context->query_type == QueryCacheType::REQUESTS) {
- match->response.reset();
- QueryCacheOpenNextEntry(std::move(query_cache_context));
- return;
+ if (query_cache_context->query_types & QUERY_CACHE_REQUESTS) {
+ query_cache_context->estimated_out_bytes +=
+ match->request->EstimatedStructSize();
+ if (query_cache_context->estimated_out_bytes > max_query_size_bytes_) {
+ query_cache_context->callback.Run(CACHE_STORAGE_ERROR_QUERY_TOO_LARGE,
+ std::unique_ptr<QueryCacheResults>());
+ return;
+ }
+ } else {
+ match->request.reset();
}
- DCHECK_EQ(QueryCacheType::REQUESTS_AND_RESPONSES,
- query_cache_context->query_type);
-
- query_cache_context->estimated_out_bytes +=
- match->response->EstimatedStructSize();
- if (query_cache_context->estimated_out_bytes > max_query_size_bytes_) {
- query_cache_context->callback.Run(CACHE_STORAGE_ERROR_QUERY_TOO_LARGE,
- std::unique_ptr<QueryCacheResults>());
- return;
- }
+ if (query_cache_context->query_types & QUERY_CACHE_RESPONSES_WITH_BODIES) {
+ query_cache_context->estimated_out_bytes +=
+ match->response->EstimatedStructSize();
+ if (query_cache_context->estimated_out_bytes > max_query_size_bytes_) {
+ query_cache_context->callback.Run(CACHE_STORAGE_ERROR_QUERY_TOO_LARGE,
+ std::unique_ptr<QueryCacheResults>());
+ return;
+ }
+ if (entry->GetDataSize(INDEX_RESPONSE_BODY) == 0) {
+ QueryCacheOpenNextEntry(std::move(query_cache_context));
+ return;
+ }
- if (entry->GetDataSize(INDEX_RESPONSE_BODY) == 0) {
- QueryCacheOpenNextEntry(std::move(query_cache_context));
- return;
- }
+ if (!blob_storage_context_) {
+ query_cache_context->callback.Run(CACHE_STORAGE_ERROR_STORAGE,
+ std::unique_ptr<QueryCacheResults>());
+ return;
+ }
- if (!blob_storage_context_) {
- query_cache_context->callback.Run(CACHE_STORAGE_ERROR_STORAGE,
- base::MakeUnique<QueryCacheResults>());
- return;
+ std::unique_ptr<storage::BlobDataHandle> blob_data_handle =
+ PopulateResponseBody(std::move(entry), match->response.get());
+ match->blob_handle = std::move(blob_data_handle);
+ } else if (!(query_cache_context->query_types &
+ QUERY_CACHE_RESPONSES_NO_BODIES)) {
+ match->response.reset();
}
- std::unique_ptr<storage::BlobDataHandle> blob_data_handle =
- PopulateResponseBody(std::move(entry), match->response.get());
- match->blob_handle = std::move(blob_data_handle);
-
QueryCacheOpenNextEntry(std::move(query_cache_context));
}
@@ -829,6 +894,16 @@ bool CacheStorageCache::QueryCacheResultCompare(const QueryCacheResult& lhs,
return lhs.entry_time < rhs.entry_time;
}
+// static
+int64_t CacheStorageCache::CalculateResponsePadding(
+ const ServiceWorkerResponse& response,
+ const std::string& padding_key) {
+ if (!ShouldPadResourceSize(response))
+ return 0;
+ return ResponsePaddingDistribution(CalculatePaddingHMAC(
+ CalculateResponsePaddingHash(response), padding_key));
+}
+
void CacheStorageCache::MatchImpl(
std::unique_ptr<ServiceWorkerFetchRequest> request,
const CacheStorageCacheQueryParams& match_params,
@@ -875,7 +950,7 @@ void CacheStorageCache::MatchAllImpl(
}
QueryCache(std::move(request), options,
- QueryCacheType::REQUESTS_AND_RESPONSES,
+ QUERY_CACHE_REQUESTS | QUERY_CACHE_RESPONSES_WITH_BODIES,
base::Bind(&CacheStorageCache::MatchAllDidQueryCache,
weak_ptr_factory_.GetWeakPtr(), callback));
}
@@ -1064,25 +1139,37 @@ void CacheStorageCache::PutImpl(std::unique_ptr<PutContext> put_context) {
return;
}
- std::string key = put_context->request->url.spec();
-
- net::CompletionCallback callback = base::Bind(
- &CacheStorageCache::PutDidDoomEntry, weak_ptr_factory_.GetWeakPtr(),
- base::Passed(std::move(put_context)));
-
- int rv = backend_->DoomEntry(key, callback);
- if (rv != net::ERR_IO_PENDING)
- callback.Run(rv);
+ // Explicitly delete the incumbent resource (which may not exist). This is
+ // only done so that it's padding will be decremented from the calculated
+ // cache padding.
+ auto delete_request = base::MakeUnique<ServiceWorkerFetchRequest>(
+ put_context->request->url, "", ServiceWorkerHeaderMap(), Referrer(),
+ false);
+
+ CacheStorageCacheQueryParams query_options;
+ query_options.ignore_method = true;
+ query_options.ignore_vary = true;
+ DeleteImpl(std::move(delete_request), query_options,
+ base::Bind(&CacheStorageCache::PutDidDeleteEntry,
+ weak_ptr_factory_.GetWeakPtr(),
+ base::Passed(std::move(put_context))));
}
-void CacheStorageCache::PutDidDoomEntry(std::unique_ptr<PutContext> put_context,
- int rv) {
+void CacheStorageCache::PutDidDeleteEntry(
+ std::unique_ptr<PutContext> put_context,
+ CacheStorageError error,
+ std::unique_ptr<QueryCacheResults> query_results) {
if (backend_state_ != BACKEND_OPEN) {
put_context->callback.Run(CACHE_STORAGE_ERROR_STORAGE);
return;
}
- // |rv| is ignored as doom entry can fail if the entry doesn't exist.
+ if (error != CACHE_STORAGE_OK && error != CACHE_STORAGE_ERROR_NOT_FOUND) {
+ cache_padding_ -=
+ CalculateResponsePadding(*put_context->response, cache_padding_key_);
+ put_context->callback.Run(error);
+ return;
+ }
std::unique_ptr<disk_cache::Entry*> scoped_entry_ptr(
new disk_cache::Entry*());
@@ -1095,11 +1182,11 @@ void CacheStorageCache::PutDidDoomEntry(std::unique_ptr<PutContext> put_context,
base::Passed(std::move(scoped_entry_ptr)),
base::Passed(std::move(put_context)));
- int create_rv = backend_ptr->CreateEntry(request_ptr->url.spec(), entry_ptr,
- create_entry_callback);
+ int rv = backend_ptr->CreateEntry(request_ptr->url.spec(), entry_ptr,
+ create_entry_callback);
- if (create_rv != net::ERR_IO_PENDING)
- create_entry_callback.Run(create_rv);
+ if (rv != net::ERR_IO_PENDING)
+ create_entry_callback.Run(rv);
}
void CacheStorageCache::PutDidCreateEntry(
@@ -1184,6 +1271,8 @@ void CacheStorageCache::PutDidWriteHeaders(
if (rv > 0)
storage::RecordBytesWritten(kRecordBytesLabel, rv);
+ cache_padding_ +=
+ CalculateResponsePadding(*put_context->response, cache_padding_key_);
// The metadata is written, now for the response content. The data is streamed
// from the blob into the cache entry.
@@ -1233,6 +1322,54 @@ void CacheStorageCache::PutDidWriteBlobToCache(
UpdateCacheSize(base::Bind(put_context->callback, CACHE_STORAGE_OK));
}
+void CacheStorageCache::CalculateCacheSizePadding(
+ const SizePaddingCallback& got_sizes_callback) {
+ net::CompletionCallback got_size_callback =
+ base::Bind(&CacheStorageCache::CalculateCacheSizePaddingGotSize,
+ weak_ptr_factory_.GetWeakPtr(), std::move(got_sizes_callback));
+
+ int rv = backend_->CalculateSizeOfAllEntries(got_size_callback);
+ if (rv != net::ERR_IO_PENDING)
+ got_size_callback.Run(rv);
+}
+
+void CacheStorageCache::CalculateCacheSizePaddingGotSize(
+ const SizePaddingCallback& callback,
+ int cache_size) {
+ // Enumerating entries is only done during cache initialization.
+ DCHECK_EQ(backend_state_, BACKEND_UNINITIALIZED);
+ std::unique_ptr<ServiceWorkerFetchRequest> request;
+ CacheStorageCacheQueryParams options;
+ options.ignore_search = true;
+ QueryCache(std::move(request), options,
+ QUERY_CACHE_REQUESTS | QUERY_CACHE_RESPONSES_WITH_BODIES,
+ base::Bind(&CacheStorageCache::PaddingDidQueryCache,
+ weak_ptr_factory_.GetWeakPtr(), std::move(callback),
+ cache_size));
+}
+
+void CacheStorageCache::PaddingDidQueryCache(
+ const SizePaddingCallback& callback,
+ int cache_size,
+ CacheStorageError error,
+ std::unique_ptr<QueryCacheResults> query_cache_results) {
+ int64_t cache_padding = 0;
+ if (error == CACHE_STORAGE_OK) {
+ for (const auto& result : *query_cache_results)
+ cache_padding +=
+ CalculateResponsePadding(*result.response, cache_padding_key_);
+ }
+
+ callback.Run(cache_size, cache_padding);
+}
+
+void CacheStorageCache::CalculateCacheSize(
+ const net::CompletionCallback& callback) {
+ int rv = backend_->CalculateSizeOfAllEntries(callback);
+ if (rv != net::ERR_IO_PENDING)
+ callback.Run(rv);
+}
+
void CacheStorageCache::UpdateCacheSize(const base::Closure& callback) {
if (backend_state_ != BACKEND_OPEN)
return;
@@ -1240,13 +1377,9 @@ void CacheStorageCache::UpdateCacheSize(const base::Closure& callback) {
// Note that the callback holds a cache handle to keep the cache alive during
// the operation since this UpdateCacheSize is often run after an operation
// completes and runs its callback.
- int rv = backend_->CalculateSizeOfAllEntries(
- base::Bind(&CacheStorageCache::UpdateCacheSizeGotSize,
- weak_ptr_factory_.GetWeakPtr(),
- base::Passed(CreateCacheHandle()), callback));
-
- if (rv != net::ERR_IO_PENDING)
- UpdateCacheSizeGotSize(CreateCacheHandle(), callback, rv);
+ CalculateCacheSize(base::Bind(&CacheStorageCache::UpdateCacheSizeGotSize,
+ weak_ptr_factory_.GetWeakPtr(),
+ base::Passed(CreateCacheHandle()), callback));
}
void CacheStorageCache::UpdateCacheSizeGotSize(
@@ -1254,17 +1387,16 @@ void CacheStorageCache::UpdateCacheSizeGotSize(
const base::Closure& callback,
int current_cache_size) {
DCHECK_NE(current_cache_size, CacheStorage::kSizeUnknown);
- int64_t old_cache_size = cache_size_;
cache_size_ = current_cache_size;
-
- int64_t size_delta = current_cache_size - old_cache_size;
+ int64_t size_delta = PaddedCacheSize() - last_reported_size_;
+ last_reported_size_ = PaddedCacheSize();
quota_manager_proxy_->NotifyStorageModified(
storage::QuotaClient::kServiceWorkerCache, origin_,
storage::kStorageTypeTemporary, size_delta);
if (cache_observer_)
- cache_observer_->CacheSizeUpdated(this, current_cache_size);
+ cache_observer_->CacheSizeUpdated(this, PaddedCacheSize());
callback.Run();
}
@@ -1281,47 +1413,63 @@ void CacheStorageCache::Delete(const CacheStorageBatchOperation& operation,
operation.request.headers, operation.request.referrer,
operation.request.is_reload));
+ QueryCacheCallback delete_callback =
+ base::Bind(&CacheStorageCache::DeleteDidDelete,
+ weak_ptr_factory_.GetWeakPtr(), callback);
+
scheduler_->ScheduleOperation(
base::Bind(&CacheStorageCache::DeleteImpl, weak_ptr_factory_.GetWeakPtr(),
base::Passed(std::move(request)), operation.match_params,
- scheduler_->WrapCallbackToRunNext(callback)));
+ scheduler_->WrapCallbackToRunNext(delete_callback)));
+}
+
+void CacheStorageCache::DeleteDidDelete(
+ const ErrorCallback& callback,
+ CacheStorageError error,
+ std::unique_ptr<QueryCacheResults> query_results) {
+ callback.Run(error);
}
void CacheStorageCache::DeleteImpl(
std::unique_ptr<ServiceWorkerFetchRequest> request,
const CacheStorageCacheQueryParams& match_params,
- const ErrorCallback& callback) {
+ const QueryCacheCallback& callback) {
DCHECK_NE(BACKEND_UNINITIALIZED, backend_state_);
if (backend_state_ != BACKEND_OPEN) {
- callback.Run(CACHE_STORAGE_ERROR_STORAGE);
+ callback.Run(CACHE_STORAGE_ERROR_STORAGE,
+ base::MakeUnique<QueryCacheResults>());
return;
}
- QueryCache(std::move(request), match_params, QueryCacheType::CACHE_ENTRIES,
+ QueryCache(std::move(request), match_params,
+ QUERY_CACHE_ENTRIES | QUERY_CACHE_RESPONSES_NO_BODIES,
base::Bind(&CacheStorageCache::DeleteDidQueryCache,
weak_ptr_factory_.GetWeakPtr(), callback));
}
void CacheStorageCache::DeleteDidQueryCache(
- const ErrorCallback& callback,
+ const QueryCacheCallback& callback,
CacheStorageError error,
std::unique_ptr<QueryCacheResults> query_cache_results) {
if (error != CACHE_STORAGE_OK) {
- callback.Run(error);
+ callback.Run(error, std::move(query_cache_results));
return;
}
if (query_cache_results->empty()) {
- callback.Run(CACHE_STORAGE_ERROR_NOT_FOUND);
+ callback.Run(CACHE_STORAGE_ERROR_NOT_FOUND, std::move(query_cache_results));
return;
}
for (auto& result : *query_cache_results) {
disk_cache::ScopedEntryPtr entry = std::move(result.entry);
+ cache_padding_ -=
+ CalculateResponsePadding(*result.response, cache_padding_key_);
entry->Doom();
}
- UpdateCacheSize(base::Bind(callback, CACHE_STORAGE_OK));
+ UpdateCacheSize(base::Bind(callback, CACHE_STORAGE_OK,
+ base::Passed(std::move(query_cache_results))));
}
void CacheStorageCache::KeysImpl(
@@ -1334,7 +1482,7 @@ void CacheStorageCache::KeysImpl(
return;
}
- QueryCache(std::move(request), options, QueryCacheType::REQUESTS,
+ QueryCache(std::move(request), options, QUERY_CACHE_REQUESTS,
base::Bind(&CacheStorageCache::KeysDidQueryCache,
weak_ptr_factory_.GetWeakPtr(), callback));
}
@@ -1367,7 +1515,14 @@ void CacheStorageCache::CloseImpl(const base::Closure& callback) {
void CacheStorageCache::SizeImpl(const SizeCallback& callback) {
DCHECK_NE(BACKEND_UNINITIALIZED, backend_state_);
- int64_t size = backend_state_ == BACKEND_OPEN ? cache_size_ : 0;
+ // TODO(cmumford): Can CacheStorage::kSizeUnknown be returned instead of zero?
+ if (backend_state_ != BACKEND_OPEN) {
+ base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ base::Bind(callback, 0));
+ return;
+ }
+
+ int64_t size = backend_state_ == BACKEND_OPEN ? PaddedCacheSize() : 0;
base::ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
base::Bind(callback, size));
}
@@ -1460,10 +1615,41 @@ void CacheStorageCache::InitGotCacheSize(const base::Closure& callback,
<< " does not match size from index: " << cache_size_;
UMA_HISTOGRAM_COUNTS_10M("ServiceWorkerCache.IndexSizeDifference",
std::abs(cache_size_ - cache_size));
- // Disabled for crbug.com/681900.
- // DCHECK_EQ(cache_size_, cache_size);
+ if (cache_size_ != cache_size) {
+ // If the actual size doesn't match the cached size, then assume the
+ // cached padding is also incorrect and recalculate.
+ CalculateCacheSizePaddingGotSize(
+ base::Bind(&CacheStorageCache::InitGotCacheSizeAndPadding,
+ weak_ptr_factory_.GetWeakPtr(), callback,
+ cache_create_error),
+ cache_size);
+ return;
+ }
}
+
+ if (cache_padding_ == CacheStorage::kSizeUnknown || cache_padding_ < 0) {
+ CalculateCacheSizePaddingGotSize(
+ base::Bind(&CacheStorageCache::InitGotCacheSizeAndPadding,
+ weak_ptr_factory_.GetWeakPtr(), callback,
+ cache_create_error),
+ cache_size);
+ return;
+ }
+
+ // If cached size matches actual size then assume cached padding is still
+ // correct.
+ InitGotCacheSizeAndPadding(callback, cache_create_error, cache_size,
+ cache_padding_);
+}
+
+void CacheStorageCache::InitGotCacheSizeAndPadding(
+ const base::Closure& callback,
+ CacheStorageError cache_create_error,
+ int64_t cache_size,
+ int64_t cache_padding) {
cache_size_ = cache_size;
+ cache_padding_ = cache_padding;
+
initializing_ = false;
backend_state_ = (cache_create_error == CACHE_STORAGE_OK && backend_ &&
backend_state_ == BACKEND_UNINITIALIZED)
@@ -1474,7 +1660,7 @@ void CacheStorageCache::InitGotCacheSize(const base::Closure& callback,
cache_create_error, CACHE_STORAGE_ERROR_LAST + 1);
if (cache_observer_)
- cache_observer_->CacheSizeUpdated(this, cache_size_);
+ cache_observer_->CacheSizeUpdated(this, PaddedCacheSize());
callback.Run();
}
@@ -1501,4 +1687,12 @@ CacheStorageCache::CreateCacheHandle() {
return cache_storage_->CreateCacheHandle(this);
}
+int64_t CacheStorageCache::PaddedCacheSize() const {
+ if (cache_size_ == CacheStorage::kSizeUnknown ||
+ cache_padding_ == CacheStorage::kSizeUnknown) {
+ return CacheStorage::kSizeUnknown;
+ }
+ return cache_size_ + cache_padding_;
+}
+
} // namespace content

Powered by Google App Engine
This is Rietveld 408576698