Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(372)

Unified Diff: net/http/http_cache_writers.cc

Issue 2886483002: Adds a new class HttpCache::Writers for multiple cache transactions reading from the network. (Closed)
Patch Set: Feedback addressed Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: net/http/http_cache_writers.cc
diff --git a/net/http/http_cache_writers.cc b/net/http/http_cache_writers.cc
new file mode 100644
index 0000000000000000000000000000000000000000..35a4f33ad466571699675aaf4df897c977f628e0
--- /dev/null
+++ b/net/http/http_cache_writers.cc
@@ -0,0 +1,408 @@
+// Copyright (c) 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/http/http_cache_writers.h"
+
+#include <algorithm>
+#include <utility>
+#include "net/disk_cache/disk_cache.h"
+#include "net/http/http_cache_transaction.h"
+
+namespace net {
+
+HttpCache::Writers::Writers(HttpCache* cache, ActiveEntry* entry)
+ : cache_(cache), entry_(entry), weak_factory_(this) {
+ io_callback_ =
+ base::Bind(&HttpCache::Writers::OnIOComplete, weak_factory_.GetWeakPtr());
+}
+
+HttpCache::Writers::~Writers() {}
+
+int HttpCache::Writers::Read(scoped_refptr<IOBuffer> buf,
+ int buf_len,
+ const CompletionCallback& callback,
+ Transaction* transaction) {
+ DCHECK(buf);
+ DCHECK_GT(buf_len, 0);
+ DCHECK(!callback.is_null());
+ DCHECK(transaction);
+
+ // If another transaction invoked a Read which is currently ongoing , then
+ // this transaction waits for the read to complete and gets its buffer filled
+ // with the data returned from that read.
+ if (next_state_ != State::NONE) {
+ WaitingForRead waiting_transaction(transaction, buf, buf_len, callback);
+ waiting_for_read_.push_back(waiting_transaction);
+ return ERR_IO_PENDING;
+ }
+
+ DCHECK_EQ(next_state_, State::NONE);
+ DCHECK(callback_.is_null());
+ active_transaction_ = transaction;
+
+ read_buf_ = std::move(buf);
+ io_buf_len_ = buf_len;
+ next_state_ = State::NETWORK_READ;
+
+ int rv = DoLoop(OK);
+ if (rv == ERR_IO_PENDING) {
+ callback_ = callback;
+ }
+
+ return rv;
+}
+
+bool HttpCache::Writers::StopCaching(Transaction* transaction) {
+ // If this is the only transaction in Writers, then stopping will be
+ // successful. If not, then we will not stop caching since there are
+ // other consumers waiting to read from the cache.
+ if (all_writers_.size() == 1) {
+ DCHECK(all_writers_.count(transaction));
+ network_read_only_ = true;
+ return true;
+ }
+ return false;
+}
+
+void HttpCache::Writers::AddTransaction(
+ Transaction* transaction,
+ std::unique_ptr<HttpTransaction> network_transaction) {
+ DCHECK(transaction);
+ DCHECK(CanAddWriters());
+
+ std::pair<TransactionSet::iterator, bool> return_val =
+ all_writers_.insert(transaction);
+ DCHECK_EQ(return_val.second, true);
+
+ if (network_transaction)
+ network_transaction_ = std::move(network_transaction);
+ DCHECK(network_transaction_);
+
+ priority_ = std::max(transaction->priority(), priority_);
+ network_transaction_->SetPriority(priority_);
+}
+
+void HttpCache::Writers::RemoveTransaction(Transaction* transaction) {
+ if (!transaction)
+ return;
+
+ // The transaction should be part of all_writers.
+ auto it = all_writers_.find(transaction);
+ DCHECK(it != all_writers_.end());
+ all_writers_.erase(transaction);
+ PriorityChanged();
+
+ if (active_transaction_ == transaction) {
+ active_transaction_ = nullptr;
+ callback_.Reset();
+ network_transaction_.reset();
Randy Smith (Not in Mondays) 2017/05/24 23:09:19 If this is done without changing state_ can't that
shivanisha 2017/05/31 19:21:26 Good catch. I can't seem to remember why I added i
+ return;
+ }
+
+ auto waiting_it = waiting_for_read_.begin();
+ for (; waiting_it != waiting_for_read_.end(); waiting_it++) {
+ if (transaction == waiting_it->transaction) {
+ waiting_for_read_.erase(waiting_it);
+ // If a waiting transaction existed, there should have been an
+ // active_transaction_.
+ DCHECK(active_transaction_);
+ return;
+ }
+ }
+}
+
+void HttpCache::Writers::PriorityChanged() {
+ // Get the current highest priority.
+ RequestPriority current_highest = MINIMUM_PRIORITY;
+ for (auto* transaction : all_writers_)
+ current_highest = std::max(transaction->priority(), current_highest);
+
+ if (priority_ != current_highest) {
+ network_transaction_->SetPriority(current_highest);
+ priority_ = current_highest;
+ }
+}
+
+bool HttpCache::Writers::ContainsOnlyIdleWriters() const {
+ return waiting_for_read_.empty() && !active_transaction_;
+}
+
+void HttpCache::Writers::MoveIdleWritersToReaders() {
+ // Should be invoked after |waiting_for_read_| transactions and
+ // |active_transaction_| are processed so that all_writers_ only contains idle
+ // writers.
+ DCHECK(ContainsOnlyIdleWriters());
+ for (auto* idle_writer : all_writers_) {
+ entry_->readers.insert(idle_writer);
+ }
+ all_writers_.clear();
+}
+
+bool HttpCache::Writers::CanAddWriters() {
+ if (all_writers_.empty()) {
+ // Reset to take care of a race where network_read_only_ was set to true,
+ // then all transactions left and then before entry could be destroyed a new
+ // transaction needs to be added, it should be allowed.
+ network_read_only_ = false;
+ return true;
+ }
+
+ return !is_exclusive_ && !network_read_only_;
+}
+
+void HttpCache::Writers::ProcessFailure(Transaction* transaction, int error) {
+ DCHECK(!transaction || transaction == active_transaction_);
+
+ // Notify waiting_for_read_ of the failure. Tasks will be posted for all the
+ // transactions.
+ ProcessWaitingForReadTransactions(error);
+
+ // Idle readers should know to fail when Read is invoked by their consumers.
+ SetIdleWritersFailState(error);
+ DCHECK(all_writers_.empty());
+}
+
+void HttpCache::Writers::TruncateEntry() {
+ // TODO(shivanisha) On integration, see if the entry really needs to be
+ // truncated on the lines of Transaction::AddTruncatedFlag and then proceed.
+ DCHECK_EQ(next_state_, State::NONE);
+ next_state_ = State::CACHE_WRITE_TRUNCATED_RESPONSE;
+ DoLoop(OK);
+}
+
+LoadState HttpCache::Writers::GetWriterLoadState() {
+ return network_transaction_->GetLoadState();
+}
+
+HttpCache::Writers::WaitingForRead::WaitingForRead(
+ Transaction* cache_transaction,
+ scoped_refptr<IOBuffer> buf,
+ int len,
+ const CompletionCallback& consumer_callback)
+ : transaction(cache_transaction),
+ read_buf(std::move(buf)),
+ read_buf_len(len),
+ write_len(0),
+ callback(consumer_callback) {
+ DCHECK(cache_transaction);
+ DCHECK(buf);
+ DCHECK_GT(len, 0);
+ DCHECK(!consumer_callback.is_null());
+}
+
+HttpCache::Writers::WaitingForRead::~WaitingForRead() {}
+HttpCache::Writers::WaitingForRead::WaitingForRead(const WaitingForRead&) =
+ default;
+
+int HttpCache::Writers::DoLoop(int result) {
+ DCHECK(next_state_ != State::NONE);
+ int rv = result;
+ do {
+ State state = next_state_;
+ next_state_ = State::NONE;
+ switch (state) {
+ case State::NETWORK_READ:
+ DCHECK_EQ(OK, rv);
+ rv = DoNetworkRead();
+ break;
+ case State::NETWORK_READ_COMPLETE:
+ rv = DoNetworkReadComplete(rv);
+ break;
+ case State::CACHE_WRITE_DATA:
+ rv = DoCacheWriteData(rv);
+ break;
+ case State::CACHE_WRITE_DATA_COMPLETE:
+ rv = DoCacheWriteDataComplete(rv);
+ break;
+ case State::CACHE_WRITE_TRUNCATED_RESPONSE:
+ rv = DoCacheWriteTruncatedResponse();
+ break;
+ case State::CACHE_WRITE_TRUNCATED_RESPONSE_COMPLETE:
+ rv = DoCacheWriteTruncatedResponseComplete(rv);
+ break;
+ default:
+ NOTREACHED() << "bad state";
+ rv = ERR_FAILED;
+ break;
+ }
+ } while (next_state_ != State::NONE && rv != ERR_IO_PENDING);
+
+ if (rv != ERR_IO_PENDING && !callback_.is_null()) {
+ read_buf_ = NULL; // Release the buffer before invoking the callback.
+ base::ResetAndReturn(&callback_).Run(rv);
+ }
+ return rv;
+}
+
+int HttpCache::Writers::DoNetworkRead() {
+ next_state_ = State::NETWORK_READ_COMPLETE;
+ return network_transaction_->Read(read_buf_.get(), io_buf_len_, io_callback_);
+}
+
+int HttpCache::Writers::DoNetworkReadComplete(int result) {
+ if (result < 0) {
+ OnNetworkReadFailure(result);
+ return result;
+ }
+
+ next_state_ = State::CACHE_WRITE_DATA;
+ return result;
+}
+
+void HttpCache::Writers::OnNetworkReadFailure(int result) {
+ // At this point active_transaction_ may or may not be alive.
+ // If no consumer, invoke entry processing here itself.
+ cache_->DoneWithEntry(entry_, active_transaction_, true);
+}
+
+int HttpCache::Writers::DoCacheWriteData(int num_bytes) {
+ next_state_ = State::CACHE_WRITE_DATA_COMPLETE;
+ write_len_ = num_bytes;
+ if (!num_bytes || network_read_only_)
+ return num_bytes;
+
+ int current_size = entry_->disk_entry->GetDataSize(kResponseContentIndex);
+ return WriteToEntry(kResponseContentIndex, current_size, read_buf_.get(),
+ num_bytes, io_callback_);
+}
+
+int HttpCache::Writers::WriteToEntry(int index,
+ int offset,
+ IOBuffer* data,
+ int data_len,
+ const CompletionCallback& callback) {
+ int rv = 0;
+
+ PartialData* partial = nullptr;
+ // Transaction must be alive if this is a partial request.
+ // Todo(shivanisha): When partial requests support parallel writing, this
+ // assumption will not be true.
+ if (active_transaction_)
+ partial = active_transaction_->partial();
+
+ if (!partial || !data_len) {
+ rv = entry_->disk_entry->WriteData(index, offset, data, data_len, callback,
+ true);
+ } else {
+ rv = partial->CacheWrite(entry_->disk_entry, data, data_len, callback);
+ }
+ return rv;
+}
+
+int HttpCache::Writers::DoCacheWriteDataComplete(int result) {
+ if (result != write_len_) {
+ OnCacheWriteFailure();
+
+ // |active_transaction_| can continue reading from the network.
+ result = write_len_;
+ } else {
+ OnDataReceived(result);
+ }
+ return result;
+}
+
+int HttpCache::Writers::DoCacheWriteTruncatedResponse() {
+ next_state_ = State::CACHE_WRITE_TRUNCATED_RESPONSE_COMPLETE;
+ const HttpResponseInfo* response = network_transaction_->GetResponseInfo();
+ scoped_refptr<PickledIOBuffer> data(new PickledIOBuffer());
+ response->Persist(data->pickle(), true /* skip_transient_headers*/, true);
+ data->Done();
+ io_buf_len_ = data->pickle()->size();
+ return entry_->disk_entry->WriteData(kResponseInfoIndex, 0, data.get(),
+ io_buf_len_, io_callback_, true);
+}
+
+int HttpCache::Writers::DoCacheWriteTruncatedResponseComplete(int result) {
+ if (result != io_buf_len_) {
+ DLOG(ERROR) << "failed to write response info to cache";
+ }
+ truncated_ = true;
+ return OK;
+}
+
+void HttpCache::Writers::OnDataReceived(int result) {
+ if (result == 0) {
+ // Check if the response is actually completed or if not, attempt to mark
+ // the entry as truncated.
+ bool completed = false;
+ int current_size = entry_->disk_entry->GetDataSize(kResponseContentIndex);
+ const HttpResponseInfo* response_info =
+ network_transaction_->GetResponseInfo();
+ int64_t content_length = response_info->headers->GetContentLength();
+ if ((content_length >= 0 && content_length <= current_size) ||
+ content_length < 0) {
+ completed = true;
+ }
+ if (!completed) {
+ OnNetworkReadFailure(result);
+ return;
+ }
+ }
+
+ // Notify waiting_for_read_. Tasks will be posted for all the
+ // transactions.
+ ProcessWaitingForReadTransactions(write_len_);
+
+ active_transaction_ = nullptr;
+
+ // TODO(shivanisha) Invoke cache_->DoneWritingToEntry if result is 0 implying
+ // response completion.
+}
+
+void HttpCache::Writers::OnCacheWriteFailure() {
+ network_read_only_ = true;
+
+ // Call the cache_ function here even if active_transaction_ is alive because
+ // it wouldn't know if this was an error case, since it gets a positive result
+ // back. This will in turn ProcessFailure.
+ // TODO(shivanisha) : Also pass active_transaction_ and
+ // ERR_CACHE_WRITE_FAILURE to DoneWritingToEntry on integration so that it can
+ // be passed back in ProcessFailure.
+ cache_->DoneWritingToEntry(entry_, false);
+}
+
+void HttpCache::Writers::ProcessWaitingForReadTransactions(int result) {
+ for (auto it = waiting_for_read_.begin(); it != waiting_for_read_.end();
+ it++) {
+ Transaction* transaction = it->transaction;
+ int callback_result = result;
+
+ if (result >= 0) { // success
+ // Save the data in all the waiting transactions' read buffers.
+ for (auto it = waiting_for_read_.begin(); it != waiting_for_read_.end();
+ it++) {
+ it->write_len = std::min(it->read_buf_len, result);
+ memcpy(it->read_buf->data(), read_buf_->data(), it->write_len);
+ }
+ callback_result = it->write_len;
+ }
+
+ // If its response completion or failure, this transaction needs to be
+ // removed.
+ if (result <= 0)
+ all_writers_.erase(transaction);
+
+ // Post task to notify transaction.
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::Bind(it->callback, callback_result));
+ }
+
+ waiting_for_read_.clear();
+}
+
+void HttpCache::Writers::SetIdleWritersFailState(int result) {
+ // Since this is only for idle transactions, all waiting_for_read_ and
+ // active_transaction_ should be empty.
+ DCHECK(waiting_for_read_.empty());
+ DCHECK(!active_transaction_);
+ for (auto* transaction : all_writers_)
+ transaction->SetSharedWritingFailState(result);
+ all_writers_.clear();
+}
+
+void HttpCache::Writers::OnIOComplete(int result) {
+ DoLoop(result);
+}
+
+} // namespace net

Powered by Google App Engine
This is Rietveld 408576698