| Index: net/http/http_cache_writers.cc
|
| diff --git a/net/http/http_cache_writers.cc b/net/http/http_cache_writers.cc
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..cfe5849c699a6b916e59fad774d1d61566888ddc
|
| --- /dev/null
|
| +++ b/net/http/http_cache_writers.cc
|
| @@ -0,0 +1,419 @@
|
| +// Copyright (c) 2017 The Chromium Authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +#include "net/http/http_cache_writers.h"
|
| +
|
| +#include <algorithm>
|
| +#include <utility>
|
| +#include "net/disk_cache/disk_cache.h"
|
| +#include "net/http/http_cache_transaction.h"
|
| +
|
| +namespace net {
|
| +
|
| +HttpCache::Writers::Writers(HttpCache* cache, ActiveEntry* entry)
|
| + : cache_(cache), entry_(entry), weak_factory_(this) {
|
| + io_callback_ =
|
| + base::Bind(&HttpCache::Writers::OnIOComplete, weak_factory_.GetWeakPtr());
|
| +}
|
| +
|
| +HttpCache::Writers::~Writers() {}
|
| +
|
| +int HttpCache::Writers::Read(scoped_refptr<IOBuffer> buf,
|
| + int buf_len,
|
| + const CompletionCallback& callback,
|
| + Transaction* transaction) {
|
| + DCHECK(buf);
|
| + DCHECK_GT(buf_len, 0);
|
| + DCHECK(!callback.is_null());
|
| +
|
| + // If another transaction is already reading from the network, then this
|
| + // transaction waits for the read to complete and gets its buffer filled
|
| + // with the data returned from that read.
|
| + if (active_transaction_) {
|
| + WaitingForRead waiting_transaction(transaction, buf, buf_len, callback);
|
| + waiting_for_read_.push_back(waiting_transaction);
|
| + return ERR_IO_PENDING;
|
| + }
|
| +
|
| + DCHECK_EQ(next_state_, State::NONE);
|
| + DCHECK(callback_.is_null());
|
| + active_transaction_ = transaction;
|
| +
|
| + read_buf_ = std::move(buf);
|
| + io_buf_len_ = buf_len;
|
| + next_state_ = State::NETWORK_READ;
|
| +
|
| + int rv = DoLoop(OK);
|
| + if (rv == ERR_IO_PENDING) {
|
| + DCHECK(callback_.is_null());
|
| + callback_ = callback;
|
| + }
|
| +
|
| + return rv;
|
| +}
|
| +
|
| +bool HttpCache::Writers::StopCaching(Transaction* transaction) {
|
| + // If this is the only transaction in Writers, then stopping will be
|
| + // successful. If not, then we will not stop caching since there are
|
| + // other consumers waiting to read from the cache.
|
| + if (all_writers_.size() == 1) {
|
| + DCHECK(all_writers_.count(transaction));
|
| + network_read_only_ = true;
|
| + return true;
|
| + }
|
| + return false;
|
| +}
|
| +
|
| +void HttpCache::Writers::AddTransaction(Transaction* transaction,
|
| + HttpTransaction* network_transaction) {
|
| + DCHECK(transaction);
|
| + DCHECK(CanAddWriters());
|
| +
|
| + auto return_val = all_writers_.insert(transaction);
|
| + DCHECK_EQ(return_val.second, true);
|
| +
|
| + if (network_transaction) {
|
| + network_transaction_ =
|
| + base::WrapUnique<HttpTransaction>(network_transaction);
|
| + }
|
| + DCHECK(network_transaction_);
|
| +
|
| + priority_ = std::max(transaction->priority(), priority_);
|
| + network_transaction_->SetPriority(priority_);
|
| +}
|
| +
|
| +void HttpCache::Writers::RemoveTransaction(Transaction* transaction) {
|
| + if (!transaction)
|
| + return;
|
| +
|
| + // The transaction should be part of all_writers.
|
| + auto it = all_writers_.find(transaction);
|
| + DCHECK(it != all_writers_.end());
|
| + all_writers_.erase(transaction);
|
| + PriorityChanged();
|
| +
|
| + if (active_transaction_ == transaction) {
|
| + active_transaction_ = nullptr;
|
| + callback_.Reset();
|
| + return;
|
| + }
|
| +
|
| + auto waiting_it = waiting_for_read_.begin();
|
| + for (; waiting_it != waiting_for_read_.end(); waiting_it++) {
|
| + if (transaction == waiting_it->transaction) {
|
| + waiting_for_read_.erase(waiting_it);
|
| + // If a waiting transaction existed, there should have been an
|
| + // active_transaction_.
|
| + DCHECK(active_transaction_);
|
| + return;
|
| + }
|
| + }
|
| +}
|
| +
|
| +void HttpCache::Writers::RemoveAllTransactions() {
|
| + all_writers_.clear();
|
| +}
|
| +
|
| +void HttpCache::Writers::PriorityChanged() {
|
| + RequestPriority current_highest = GetCurrentHighestPriority();
|
| + if (priority_ != current_highest) {
|
| + network_transaction_->SetPriority(current_highest);
|
| + priority_ = current_highest;
|
| + }
|
| +}
|
| +
|
| +void HttpCache::Writers::MoveIdleWritersToReaders() {
|
| + // Should be invoked after |waiting_for_read_| transactions and
|
| + // |active_transaction_| are processed so that all_writers_ only contains idle
|
| + // writers.
|
| + DCHECK(waiting_for_read_.empty());
|
| + DCHECK(!active_transaction_);
|
| + for (auto* idle_writer : all_writers_) {
|
| + entry_->readers.insert(idle_writer);
|
| + }
|
| + all_writers_.clear();
|
| +}
|
| +
|
| +bool HttpCache::Writers::CanAddWriters() const {
|
| + if (all_writers_.empty()) {
|
| + DCHECK(!network_read_only_);
|
| + return true;
|
| + }
|
| +
|
| + return !is_exclusive_ && !network_read_only_;
|
| +}
|
| +
|
| +void HttpCache::Writers::ProcessFailure(Transaction* transaction, int error) {
|
| + DCHECK(!transaction || transaction == active_transaction_);
|
| +
|
| + // Notify waiting_for_read_ of the failure. Tasks will be posted for all the
|
| + // transactions.
|
| + ProcessWaitingForReadTransactions(error);
|
| +
|
| + // Idle readers should know to fail when Read is invoked by their consumers.
|
| + SetIdleWritersFailState(error);
|
| + DCHECK(all_writers_.empty());
|
| +}
|
| +
|
| +void HttpCache::Writers::TruncateEntry() {
|
| + // TODO(shivanisha) On integration, see if the entry really needs to be
|
| + // truncated on the lines of Transaction::AddTruncatedFlag and then proceed.
|
| + DCHECK_EQ(next_state_, State::NONE);
|
| + next_state_ = State::CACHE_WRITE_TRUNCATED_RESPONSE;
|
| + DoLoop(OK);
|
| +}
|
| +
|
| +LoadState HttpCache::Writers::GetWriterLoadState() {
|
| + return network_transaction_->GetLoadState();
|
| +}
|
| +
|
| +HttpCache::Writers::WaitingForRead::WaitingForRead(
|
| + Transaction* cache_transaction,
|
| + scoped_refptr<IOBuffer> buf,
|
| + int len,
|
| + const CompletionCallback& consumer_callback)
|
| + : transaction(cache_transaction),
|
| + read_buf(std::move(buf)),
|
| + read_buf_len(len),
|
| + write_len(0),
|
| + callback(consumer_callback) {
|
| + DCHECK(cache_transaction);
|
| + DCHECK(buf);
|
| + DCHECK_GT(len, 0);
|
| + DCHECK(!consumer_callback.is_null());
|
| +}
|
| +
|
| +HttpCache::Writers::WaitingForRead::~WaitingForRead() {}
|
| +HttpCache::Writers::WaitingForRead::WaitingForRead(const WaitingForRead&) =
|
| + default;
|
| +
|
| +int HttpCache::Writers::DoLoop(int result) {
|
| + DCHECK(next_state_ != State::NONE);
|
| + int rv = result;
|
| + do {
|
| + State state = next_state_;
|
| + next_state_ = State::NONE;
|
| + switch (state) {
|
| + case State::NETWORK_READ:
|
| + DCHECK_EQ(OK, rv);
|
| + rv = DoNetworkRead();
|
| + break;
|
| + case State::NETWORK_READ_COMPLETE:
|
| + rv = DoNetworkReadComplete(rv);
|
| + break;
|
| + case State::CACHE_WRITE_DATA:
|
| + rv = DoCacheWriteData(rv);
|
| + break;
|
| + case State::CACHE_WRITE_DATA_COMPLETE:
|
| + rv = DoCacheWriteDataComplete(rv);
|
| + break;
|
| + case State::CACHE_WRITE_TRUNCATED_RESPONSE:
|
| + rv = DoCacheWriteTruncatedResponse();
|
| + break;
|
| + case State::CACHE_WRITE_TRUNCATED_RESPONSE_COMPLETE:
|
| + rv = DoCacheWriteTruncatedResponseComplete(rv);
|
| + break;
|
| + default:
|
| + NOTREACHED() << "bad state";
|
| + rv = ERR_FAILED;
|
| + break;
|
| + }
|
| + } while (next_state_ != State::NONE && rv != ERR_IO_PENDING);
|
| +
|
| + if (rv != ERR_IO_PENDING && !callback_.is_null()) {
|
| + read_buf_ = NULL; // Release the buffer before invoking the callback.
|
| + base::ResetAndReturn(&callback_).Run(rv);
|
| + }
|
| + return rv;
|
| +}
|
| +
|
| +int HttpCache::Writers::DoNetworkRead() {
|
| + next_state_ = State::NETWORK_READ_COMPLETE;
|
| + return network_transaction_->Read(read_buf_.get(), io_buf_len_, io_callback_);
|
| +}
|
| +
|
| +int HttpCache::Writers::DoNetworkReadComplete(int result) {
|
| + if (result < 0) {
|
| + OnNetworkReadFailure(result);
|
| + return result;
|
| + }
|
| +
|
| + next_state_ = State::CACHE_WRITE_DATA;
|
| + return result;
|
| +}
|
| +
|
| +void HttpCache::Writers::OnNetworkReadFailure(int result) {
|
| + // At this point active_transaction_ may or may not be alive.
|
| + // If no consumer, invoke entry processing here itself.
|
| + if (!active_transaction_)
|
| + cache_->DoneWithEntry(entry_, nullptr, true);
|
| +}
|
| +
|
| +int HttpCache::Writers::DoCacheWriteData(int num_bytes) {
|
| + next_state_ = State::CACHE_WRITE_DATA_COMPLETE;
|
| + write_len_ = num_bytes;
|
| + if (!num_bytes || network_read_only_)
|
| + return num_bytes;
|
| +
|
| + int current_size = entry_->disk_entry->GetDataSize(kResponseContentIndex);
|
| + return WriteToEntry(kResponseContentIndex, current_size, read_buf_.get(),
|
| + num_bytes, io_callback_);
|
| +}
|
| +
|
| +int HttpCache::Writers::WriteToEntry(int index,
|
| + int offset,
|
| + IOBuffer* data,
|
| + int data_len,
|
| + const CompletionCallback& callback) {
|
| + int rv = 0;
|
| +
|
| + PartialData* partial = nullptr;
|
| + // Transaction must be alive if this is a partial request.
|
| + // Todo(shivanisha): When partial requests support parallel writing, this
|
| + // assumption will not be true.
|
| + if (active_transaction_)
|
| + partial = active_transaction_->partial();
|
| +
|
| + if (!partial || !data_len) {
|
| + rv = entry_->disk_entry->WriteData(index, offset, data, data_len, callback,
|
| + true);
|
| + } else {
|
| + rv = partial->CacheWrite(entry_->disk_entry, data, data_len, callback);
|
| + }
|
| + return rv;
|
| +}
|
| +
|
| +int HttpCache::Writers::DoCacheWriteDataComplete(int result) {
|
| + if (result != write_len_) {
|
| + OnCacheWriteFailure();
|
| +
|
| + // |active_transaction_| can continue reading from the network.
|
| + result = write_len_;
|
| + } else {
|
| + OnDataReceived(result);
|
| + }
|
| + return result;
|
| +}
|
| +
|
| +int HttpCache::Writers::DoCacheWriteTruncatedResponse() {
|
| + next_state_ = State::CACHE_WRITE_TRUNCATED_RESPONSE_COMPLETE;
|
| + return WriteResponseInfo(true);
|
| +}
|
| +
|
| +int HttpCache::Writers::DoCacheWriteTruncatedResponseComplete(int result) {
|
| + if (result != io_buf_len_) {
|
| + DLOG(ERROR) << "failed to write response info to cache";
|
| + }
|
| + truncated_ = true;
|
| + return OK;
|
| +}
|
| +
|
| +void HttpCache::Writers::OnDataReceived(int result) {
|
| + if (result == 0 && !active_transaction_) {
|
| + // Check if the response is actually completed or if not, attempt to mark
|
| + // the entry as truncated.
|
| + if (IsResponseCompleted()) {
|
| + ProcessWaitingForReadTransactions(result);
|
| + cache_->DoneWritingToEntry(entry_, true);
|
| + } else {
|
| + OnNetworkReadFailure(result);
|
| + return;
|
| + }
|
| + }
|
| +
|
| + // Save the data in all the waiting transactions' read buffers.
|
| + for (auto it = waiting_for_read_.begin(); it != waiting_for_read_.end();
|
| + it++) {
|
| + it->write_len = std::min(it->read_buf_len, result);
|
| + memcpy(it->read_buf->data(), read_buf_->data(), it->write_len);
|
| + }
|
| +
|
| + // Notify waiting_for_read_. Tasks will be posted for all the
|
| + // transactions.
|
| + ProcessWaitingForReadTransactions(write_len_);
|
| +
|
| + if (result > 0) { // not the end of response
|
| + active_transaction_ = nullptr;
|
| + return;
|
| + }
|
| + DCHECK_EQ(result, 0);
|
| + if (!active_transaction_)
|
| + cache_->DoneWritingToEntry(entry_, true);
|
| +}
|
| +
|
| +void HttpCache::Writers::OnCacheWriteFailure() {
|
| + network_read_only_ = true;
|
| +
|
| + // Call the cache_ function here even if active_transaction_ is alive because
|
| + // it wouldn't know if this was an error case, since it gets a positive result
|
| + // back. This will in turn ProcessFailure.
|
| + // TODO(shivanisha) : Also pass active_transaction_ and
|
| + // ERR_CACHE_WRITE_FAILURE to DoneWritingToEntry on integration so that it can
|
| + // be passed back in ProcessFailure.
|
| + cache_->DoneWritingToEntry(entry_, false);
|
| +}
|
| +
|
| +bool HttpCache::Writers::IsResponseCompleted() {
|
| + int current_size = entry_->disk_entry->GetDataSize(kResponseContentIndex);
|
| + const HttpResponseInfo* response_info =
|
| + network_transaction_->GetResponseInfo();
|
| + int64_t content_length = response_info->headers->GetContentLength();
|
| + if ((content_length >= 0 && content_length <= current_size) ||
|
| + content_length < 0)
|
| + return true;
|
| + return false;
|
| +}
|
| +
|
| +int HttpCache::Writers::WriteResponseInfo(bool truncated) {
|
| + // When writing headers, we normally only write the non-transient headers.
|
| + const HttpResponseInfo* response = network_transaction_->GetResponseInfo();
|
| + bool skip_transient_headers = true;
|
| + scoped_refptr<PickledIOBuffer> data(new PickledIOBuffer());
|
| + response->Persist(data->pickle(), skip_transient_headers, truncated);
|
| + data->Done();
|
| + io_buf_len_ = data->pickle()->size();
|
| + return entry_->disk_entry->WriteData(kResponseInfoIndex, 0, data.get(),
|
| + io_buf_len_, io_callback_, truncated);
|
| +}
|
| +
|
| +void HttpCache::Writers::ProcessWaitingForReadTransactions(int result) {
|
| + for (auto it = waiting_for_read_.begin(); it != waiting_for_read_.end();
|
| + it++) {
|
| + Transaction* transaction = it->transaction;
|
| + if (result > 0) { // success
|
| + result = it->write_len;
|
| + } else {
|
| + // If its response completion or failure, this transaction needs to be
|
| + // removed.
|
| + all_writers_.erase(transaction);
|
| + }
|
| + // Post task to notify transaction.
|
| + base::ThreadTaskRunnerHandle::Get()->PostTask(
|
| + FROM_HERE, base::Bind(it->callback, result));
|
| + }
|
| + waiting_for_read_.clear();
|
| +}
|
| +
|
| +void HttpCache::Writers::SetIdleWritersFailState(int result) {
|
| + // Since this is only for idle transactions, all waiting_for_read_ and
|
| + // active_transaction_ should be empty.
|
| + DCHECK(waiting_for_read_.empty());
|
| + DCHECK(!active_transaction_);
|
| + for (auto* transaction : all_writers_)
|
| + transaction->SetSharedWritingFailState(result);
|
| + all_writers_.clear();
|
| +}
|
| +
|
| +RequestPriority HttpCache::Writers::GetCurrentHighestPriority() {
|
| + RequestPriority priority = MINIMUM_PRIORITY;
|
| + for (auto* transaction : all_writers_)
|
| + priority = std::max(transaction->priority(), priority);
|
| + return priority;
|
| +}
|
| +
|
| +void HttpCache::Writers::OnIOComplete(int result) {
|
| + DoLoop(result);
|
| +}
|
| +
|
| +} // namespace net
|
|
|