Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "net/disk_cache/simple/simple_entry_impl.h" | 5 #include "net/disk_cache/simple/simple_entry_impl.h" |
| 6 | 6 |
| 7 #include "base/bind.h" | 7 #include "base/bind.h" |
| 8 #include "base/bind_helpers.h" | 8 #include "base/bind_helpers.h" |
| 9 #include "base/callback.h" | 9 #include "base/callback.h" |
| 10 #include "base/location.h" | 10 #include "base/location.h" |
| (...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 98 // SimpleSynchronousEntry, and operations can even happen on them. The files | 98 // SimpleSynchronousEntry, and operations can even happen on them. The files |
| 99 // will be removed from the filesystem when they are closed. | 99 // will be removed from the filesystem when they are closed. |
| 100 DoomEntry(index_, path_, key_, CompletionCallback()); | 100 DoomEntry(index_, path_, key_, CompletionCallback()); |
| 101 #else | 101 #else |
| 102 NOTIMPLEMENTED(); | 102 NOTIMPLEMENTED(); |
| 103 #endif | 103 #endif |
| 104 } | 104 } |
| 105 | 105 |
| 106 void SimpleEntryImpl::Close() { | 106 void SimpleEntryImpl::Close() { |
| 107 DCHECK(io_thread_checker_.CalledOnValidThread()); | 107 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 108 Release(); // Balanced in CreationOperationCompleted(). | 108 // Postpone close operation. |
| 109 // Push the close operation to the end of the line. This way we run all | |
| 110 // operations before we are able close. | |
| 111 pending_operations_.push(base::Bind(&SimpleEntryImpl::CloseInternal, this)); | |
| 112 RunNextOperationIfNeeded(); | |
| 109 } | 113 } |
| 110 | 114 |
| 111 std::string SimpleEntryImpl::GetKey() const { | 115 std::string SimpleEntryImpl::GetKey() const { |
| 112 DCHECK(io_thread_checker_.CalledOnValidThread()); | 116 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 113 return key_; | 117 return key_; |
| 114 } | 118 } |
| 115 | 119 |
| 116 Time SimpleEntryImpl::GetLastUsed() const { | 120 Time SimpleEntryImpl::GetLastUsed() const { |
| 117 DCHECK(io_thread_checker_.CalledOnValidThread()); | 121 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 118 return last_used_; | 122 return last_used_; |
| 119 } | 123 } |
| 120 | 124 |
| 121 Time SimpleEntryImpl::GetLastModified() const { | 125 Time SimpleEntryImpl::GetLastModified() const { |
| 122 DCHECK(io_thread_checker_.CalledOnValidThread()); | 126 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 123 return last_modified_; | 127 return last_modified_; |
| 124 } | 128 } |
| 125 | 129 |
| 126 int32 SimpleEntryImpl::GetDataSize(int index) const { | 130 int32 SimpleEntryImpl::GetDataSize(int index) const { |
| 127 DCHECK(io_thread_checker_.CalledOnValidThread()); | 131 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 128 return data_size_[index]; | 132 return data_size_[index]; |
| 129 } | 133 } |
| 130 | 134 |
| 131 int SimpleEntryImpl::ReadData(int index, | 135 int SimpleEntryImpl::ReadData(int index, |
| 132 int offset, | 136 int offset, |
| 133 net::IOBuffer* buf, | 137 net::IOBuffer* buf, |
| 134 int buf_len, | 138 int buf_len, |
| 135 const CompletionCallback& callback) { | 139 const CompletionCallback& callback) { |
| 136 DCHECK(io_thread_checker_.CalledOnValidThread()); | 140 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 137 // TODO(gavinp): Add support for overlapping reads. The net::HttpCache does | 141 if (index < 0 || index >= kSimpleEntryFileCount || buf_len < 0) |
| 138 // make overlapping read requests when multiple transactions access the same | 142 return net::ERR_INVALID_ARGUMENT; |
| 139 // entry as read only. This might make calling SimpleSynchronousEntry::Close() | 143 if (offset >= data_size_[index] || offset < 0 || !buf_len) |
| 140 // correctly more tricky (see SimpleEntryImpl::EntryOperationComplete). | 144 return 0; |
| 141 if (synchronous_entry_in_use_by_worker_) { | 145 // TODO(felipeg): Optimization: Add support for truly parallel read |
| 142 NOTIMPLEMENTED(); | 146 // operations. |
| 143 CHECK(false); | 147 pending_operations_.push( |
| 144 } | 148 base::Bind(&SimpleEntryImpl::ReadDataInternal, |
| 145 synchronous_entry_in_use_by_worker_ = true; | 149 this, |
| 146 if (index_) | 150 index, |
| 147 index_->UseIfExists(key_); | 151 offset, |
| 148 SynchronousOperationCallback sync_operation_callback = | 152 make_scoped_refptr(buf), |
| 149 base::Bind(&SimpleEntryImpl::EntryOperationComplete, | 153 buf_len, |
| 150 this, callback); | 154 callback)); |
| 151 WorkerPool::PostTask(FROM_HERE, | 155 RunNextOperationIfNeeded(); |
| 152 base::Bind(&SimpleSynchronousEntry::ReadData, | |
| 153 base::Unretained(synchronous_entry_), | |
| 154 index, offset, make_scoped_refptr(buf), | |
| 155 buf_len, sync_operation_callback), | |
| 156 true); | |
| 157 return net::ERR_IO_PENDING; | 156 return net::ERR_IO_PENDING; |
| 158 } | 157 } |
| 159 | 158 |
| 160 int SimpleEntryImpl::WriteData(int index, | 159 int SimpleEntryImpl::WriteData(int index, |
| 161 int offset, | 160 int offset, |
| 162 net::IOBuffer* buf, | 161 net::IOBuffer* buf, |
| 163 int buf_len, | 162 int buf_len, |
| 164 const CompletionCallback& callback, | 163 const CompletionCallback& callback, |
| 165 bool truncate) { | 164 bool truncate) { |
| 166 DCHECK(io_thread_checker_.CalledOnValidThread()); | 165 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 167 if (synchronous_entry_in_use_by_worker_) { | 166 if (index < 0 || index >= kSimpleEntryFileCount || offset < 0 || buf_len < 0) |
| 168 NOTIMPLEMENTED(); | 167 return net::ERR_INVALID_ARGUMENT; |
| 169 CHECK(false); | 168 pending_operations_.push( |
| 170 } | 169 base::Bind(&SimpleEntryImpl::WriteDataInternal, |
| 171 synchronous_entry_in_use_by_worker_ = true; | 170 this, |
| 172 if (index_) | 171 index, |
| 173 index_->UseIfExists(key_); | 172 offset, |
| 174 SynchronousOperationCallback sync_operation_callback = | 173 make_scoped_refptr(buf), |
| 175 base::Bind(&SimpleEntryImpl::EntryOperationComplete, | 174 buf_len, |
| 176 this, callback); | 175 callback, |
| 177 WorkerPool::PostTask(FROM_HERE, | 176 truncate)); |
| 178 base::Bind(&SimpleSynchronousEntry::WriteData, | 177 RunNextOperationIfNeeded(); |
| 179 base::Unretained(synchronous_entry_), | 178 // TODO(felipeg): Optimization: Add support for optimistic writes, quickly |
| 180 index, offset, make_scoped_refptr(buf), | 179 // returning net::OK here. |
| 181 buf_len, sync_operation_callback, truncate), | |
| 182 true); | |
| 183 return net::ERR_IO_PENDING; | 180 return net::ERR_IO_PENDING; |
| 184 } | 181 } |
| 185 | 182 |
| 186 int SimpleEntryImpl::ReadSparseData(int64 offset, | 183 int SimpleEntryImpl::ReadSparseData(int64 offset, |
| 187 net::IOBuffer* buf, | 184 net::IOBuffer* buf, |
| 188 int buf_len, | 185 int buf_len, |
| 189 const CompletionCallback& callback) { | 186 const CompletionCallback& callback) { |
| 190 DCHECK(io_thread_checker_.CalledOnValidThread()); | 187 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 191 // TODO(gavinp): Determine if the simple backend should support sparse data. | 188 // TODO(gavinp): Determine if the simple backend should support sparse data. |
| 192 NOTIMPLEMENTED(); | 189 NOTIMPLEMENTED(); |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 228 int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) { | 225 int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) { |
| 229 DCHECK(io_thread_checker_.CalledOnValidThread()); | 226 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 230 // TODO(gavinp): Determine if the simple backend should support sparse data. | 227 // TODO(gavinp): Determine if the simple backend should support sparse data. |
| 231 NOTIMPLEMENTED(); | 228 NOTIMPLEMENTED(); |
| 232 return net::ERR_FAILED; | 229 return net::ERR_FAILED; |
| 233 } | 230 } |
| 234 | 231 |
| 235 SimpleEntryImpl::SimpleEntryImpl(SimpleIndex* index, | 232 SimpleEntryImpl::SimpleEntryImpl(SimpleIndex* index, |
| 236 const FilePath& path, | 233 const FilePath& path, |
| 237 const std::string& key) | 234 const std::string& key) |
| 238 : constructor_thread_(MessageLoopProxy::current()), | 235 : index_(index->AsWeakPtr()), |
| 239 index_(index->AsWeakPtr()), | 236 path_(path), |
| 240 path_(path), | 237 key_(key), |
| 241 key_(key), | 238 synchronous_entry_(NULL), |
| 242 synchronous_entry_(NULL), | 239 operation_running_(false) { |
| 243 synchronous_entry_in_use_by_worker_(false) { | |
| 244 } | 240 } |
| 245 | 241 |
| 246 SimpleEntryImpl::~SimpleEntryImpl() { | 242 SimpleEntryImpl::~SimpleEntryImpl() { |
| 247 if (synchronous_entry_) { | 243 DCHECK_EQ(0U, pending_operations_.size()); |
| 248 base::Closure close_sync_entry = | 244 DCHECK(!operation_running_); |
| 249 base::Bind(&SimpleSynchronousEntry::Close, | 245 } |
| 250 base::Unretained(synchronous_entry_)); | 246 |
| 251 // We aren't guaranteed to be able to run IO on our constructor thread, but | 247 bool SimpleEntryImpl::RunNextOperationIfNeeded() { |
| 252 // we are also not guaranteed to be allowed to run WorkerPool::PostTask on | 248 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 253 // our other threads. | 249 if (pending_operations_.size() <= 0 || operation_running_) |
| 254 if (constructor_thread_->BelongsToCurrentThread()) | 250 return false; |
| 255 WorkerPool::PostTask(FROM_HERE, close_sync_entry, true); | 251 base::Closure operation = pending_operations_.front(); |
| 256 else | 252 pending_operations_.pop(); |
| 257 close_sync_entry.Run(); | 253 operation.Run(); |
| 258 } | 254 return true; |
| 255 } | |
| 256 | |
| 257 void SimpleEntryImpl::CloseInternal() { | |
| 258 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
| 259 DCHECK(pending_operations_.size() == 0); | |
|
gavinp
2013/04/19 05:51:39
Oops.
| |
| 260 DCHECK(!operation_running_); | |
| 261 DCHECK(synchronous_entry_); | |
| 262 WorkerPool::PostTask(FROM_HERE, | |
| 263 base::Bind(&SimpleSynchronousEntry::Close, | |
| 264 base::Unretained(synchronous_entry_)), | |
| 265 true); | |
| 266 // Entry::Close() is expected to delete this entry. See disk_cache.h for | |
| 267 // details. | |
| 268 Release(); // Balanced in CreationOperationCompleted(). | |
| 269 } | |
| 270 | |
| 271 void SimpleEntryImpl::ReadDataInternal(int index, | |
| 272 int offset, | |
| 273 scoped_refptr<net::IOBuffer> buf, | |
| 274 int buf_len, | |
| 275 const CompletionCallback& callback) { | |
| 276 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
| 277 DCHECK(!operation_running_); | |
| 278 operation_running_ = true; | |
| 279 if (index_) | |
| 280 index_->UseIfExists(key_); | |
| 281 SynchronousOperationCallback sync_operation_callback = | |
| 282 base::Bind(&SimpleEntryImpl::EntryOperationComplete, | |
| 283 this, callback); | |
| 284 WorkerPool::PostTask(FROM_HERE, | |
| 285 base::Bind(&SimpleSynchronousEntry::ReadData, | |
| 286 base::Unretained(synchronous_entry_), | |
| 287 index, offset, buf, | |
| 288 buf_len, sync_operation_callback), | |
| 289 true); | |
| 290 } | |
| 291 | |
| 292 void SimpleEntryImpl::WriteDataInternal(int index, | |
| 293 int offset, | |
| 294 scoped_refptr<net::IOBuffer> buf, | |
| 295 int buf_len, | |
| 296 const CompletionCallback& callback, | |
| 297 bool truncate) { | |
| 298 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
| 299 DCHECK(!operation_running_); | |
| 300 operation_running_ = true; | |
| 301 if (index_) | |
| 302 index_->UseIfExists(key_); | |
| 303 | |
| 304 // TODO(felipeg): When adding support for optimistic writes we need to set | |
| 305 // data_size_[index] = buf_len here. | |
| 306 SynchronousOperationCallback sync_operation_callback = | |
| 307 base::Bind(&SimpleEntryImpl::EntryOperationComplete, | |
| 308 this, callback); | |
| 309 WorkerPool::PostTask(FROM_HERE, | |
| 310 base::Bind(&SimpleSynchronousEntry::WriteData, | |
| 311 base::Unretained(synchronous_entry_), | |
| 312 index, offset, buf, | |
| 313 buf_len, sync_operation_callback, truncate), | |
| 314 true); | |
| 259 } | 315 } |
| 260 | 316 |
| 261 void SimpleEntryImpl::CreationOperationComplete( | 317 void SimpleEntryImpl::CreationOperationComplete( |
| 262 Entry** out_entry, | 318 Entry** out_entry, |
| 263 const CompletionCallback& completion_callback, | 319 const CompletionCallback& completion_callback, |
| 264 SimpleSynchronousEntry* sync_entry) { | 320 SimpleSynchronousEntry* sync_entry) { |
| 265 DCHECK(io_thread_checker_.CalledOnValidThread()); | 321 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 266 if (!sync_entry) { | 322 if (!sync_entry) { |
| 267 completion_callback.Run(net::ERR_FAILED); | 323 completion_callback.Run(net::ERR_FAILED); |
| 268 // If OpenEntry failed, we must remove it from our index. | 324 // If OpenEntry failed, we must remove it from our index. |
| 269 if (index_) | 325 if (index_) |
| 270 index_->Remove(key_); | 326 index_->Remove(key_); |
| 271 // The reference held by the Callback calling us will go out of scope and | 327 // The reference held by the Callback calling us will go out of scope and |
| 272 // delete |this| on leaving this scope. | 328 // delete |this| on leaving this scope. |
| 273 return; | 329 return; |
| 274 } | 330 } |
| 275 // The Backend interface requires us to return |this|, and keep the Entry | 331 // The Backend interface requires us to return |this|, and keep the Entry |
| 276 // alive until Entry::Close(). Adding a reference to self will keep |this| | 332 // alive until Entry::Close(). Adding a reference to self will keep |this| |
| 277 // alive after the scope of the Callback calling us is destroyed. | 333 // alive after the scope of the Callback calling us is destroyed. |
| 278 AddRef(); // Balanced in Close(). | 334 AddRef(); // Balanced in CloseInternal(). |
| 279 synchronous_entry_ = sync_entry; | 335 synchronous_entry_ = sync_entry; |
| 280 SetSynchronousData(); | 336 SetSynchronousData(); |
| 281 if (index_) | 337 if (index_) |
| 282 index_->Insert(key_); | 338 index_->Insert(key_); |
| 283 *out_entry = this; | 339 *out_entry = this; |
| 284 completion_callback.Run(net::OK); | 340 completion_callback.Run(net::OK); |
| 285 } | 341 } |
| 286 | 342 |
| 287 void SimpleEntryImpl::EntryOperationComplete( | 343 void SimpleEntryImpl::EntryOperationComplete( |
| 288 const CompletionCallback& completion_callback, | 344 const CompletionCallback& completion_callback, |
| 289 int result) { | 345 int result) { |
| 290 DCHECK(io_thread_checker_.CalledOnValidThread()); | 346 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 291 DCHECK(synchronous_entry_); | 347 DCHECK(synchronous_entry_); |
| 292 DCHECK(synchronous_entry_in_use_by_worker_); | 348 DCHECK(operation_running_); |
| 293 synchronous_entry_in_use_by_worker_ = false; | 349 |
| 350 operation_running_ = false; | |
| 294 SetSynchronousData(); | 351 SetSynchronousData(); |
| 295 if (index_) { | 352 if (index_) { |
| 296 if (result >= 0) { | 353 if (result >= 0) { |
| 297 index_->UpdateEntrySize(synchronous_entry_->key(), | 354 index_->UpdateEntrySize(synchronous_entry_->key(), |
| 298 synchronous_entry_->GetFileSize()); | 355 synchronous_entry_->GetFileSize()); |
| 299 } else { | 356 } else { |
| 300 index_->Remove(synchronous_entry_->key()); | 357 index_->Remove(synchronous_entry_->key()); |
| 301 } | 358 } |
| 302 } | 359 } |
| 303 completion_callback.Run(result); | 360 completion_callback.Run(result); |
| 361 RunNextOperationIfNeeded(); | |
| 304 } | 362 } |
| 305 | 363 |
| 306 void SimpleEntryImpl::SetSynchronousData() { | 364 void SimpleEntryImpl::SetSynchronousData() { |
| 307 DCHECK(io_thread_checker_.CalledOnValidThread()); | 365 DCHECK(io_thread_checker_.CalledOnValidThread()); |
| 308 DCHECK(!synchronous_entry_in_use_by_worker_); | 366 DCHECK(!operation_running_); |
| 309 // TODO(felipeg): These copies to avoid data races are not optimal. While | 367 // TODO(felipeg): These copies to avoid data races are not optimal. While |
| 310 // adding an IO thread index (for fast misses etc...), we can store this data | 368 // adding an IO thread index (for fast misses etc...), we can store this data |
| 311 // in that structure. This also solves problems with last_used() on ext4 | 369 // in that structure. This also solves problems with last_used() on ext4 |
| 312 // filesystems not being accurate. | 370 // filesystems not being accurate. |
| 313 last_used_ = synchronous_entry_->last_used(); | 371 last_used_ = synchronous_entry_->last_used(); |
| 314 last_modified_ = synchronous_entry_->last_modified(); | 372 last_modified_ = synchronous_entry_->last_modified(); |
| 315 for (int i = 0; i < kSimpleEntryFileCount; ++i) | 373 for (int i = 0; i < kSimpleEntryFileCount; ++i) |
| 316 data_size_[i] = synchronous_entry_->data_size(i); | 374 data_size_[i] = synchronous_entry_->data_size(i); |
| 317 } | 375 } |
| 318 | 376 |
| 319 } // namespace disk_cache | 377 } // namespace disk_cache |
| OLD | NEW |