Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(211)

Side by Side Diff: net/disk_cache/v3/entry_operation.cc

Issue 15203004: Disk cache: Reference CL for the implementation of file format version 3. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: Created 7 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « net/disk_cache/v3/entry_operation.h ('k') | net/disk_cache/v3/eviction_v3.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Property Changes:
Added: svn:eol-style
+ LF
OLDNEW
(Empty)
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "net/disk_cache/v3/entry_operation.h"
6
7 #include "base/hash.h"
8 #include "base/message_loop.h"
9 #include "base/metrics/histogram.h"
10 #include "base/string_util.h"
11 #include "net/base/io_buffer.h"
12 #include "net/base/net_errors.h"
13 #include "net/disk_cache/bitmap.h"
14 #include "net/disk_cache/cache_util.h"
15 #include "net/disk_cache/histogram_macros.h"
16 #include "net/disk_cache/net_log_parameters.h"
17 #include "net/disk_cache/sparse_control.h"
18 #include "net/disk_cache/v3/backend_impl_v3.h"
19 #include "net/disk_cache/v3/disk_format_v3.h"
20 #include "net/disk_cache/v3/storage_block-inl.h"
21
22 using base::Time;
23 using base::TimeDelta;
24 using base::TimeTicks;
25
26 namespace {
27
28
29 const int kMaxBufferSize = 1024 * 1024; // 1 MB.
30
31 } // namespace
32
33 namespace disk_cache {
34
35
36 // ------------------------------------------------------------------------
37
38
39 // ------------------------------------------------------------------------
40
41 //int EntryImplV3::InternalReadData(int index, int offset,
42 // IOBuffer* buf, int buf_len,
43 // const CompletionCallback& callback) {
44 // //DCHECK(node_.Data()->dirty || read_only_);
45 // DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len;
46 // if (index < 0 || index >= kNumStreams)
47 // return net::ERR_INVALID_ARGUMENT;
48 //
49 // int entry_size = entry_->data_size[index];
50 // if (offset >= entry_size || offset < 0 || !buf_len)
51 // return 0;
52 //
53 // if (buf_len < 0)
54 // return net::ERR_INVALID_ARGUMENT;
55 //
56 // if (!backend_)
57 // return net::ERR_UNEXPECTED;
58 //
59 // TimeTicks start = TimeTicks::Now();
60 //
61 // if (offset + buf_len > entry_size)
62 // buf_len = entry_size - offset;
63 //
64 // UpdateRank(false);
65 //
66 // backend_->OnEvent(Stats::READ_DATA);
67 // backend_->OnRead(buf_len);
68 //
69 // Addr address(entry_->data_addr[index]);
70 // int eof = address.is_initialized() ? entry_size : 0;
71 // if (user_buffers_[index].get() &&
72 // user_buffers_[index]->PreRead(eof, offset, &buf_len)) {
73 // // Complete the operation locally.
74 // buf_len = user_buffers_[index]->Read(offset, buf, buf_len);
75 // ReportIOTime(kRead, start);
76 // return buf_len;
77 // }
78 //
79 // NOTREACHED();
80 // /*
81 // address.set_value(entry_->data_addr[index]);
82 // DCHECK(address.is_initialized());
83 // if (!address.is_initialized()) {
84 // DoomImpl();
85 // return net::ERR_FAILED;
86 // }
87 //
88 // File* file = GetBackingFile(address, index);
89 // if (!file) {
90 // DoomImpl();
91 // return net::ERR_FAILED;
92 // }
93 //
94 // size_t file_offset = offset;
95 // if (address.is_block_file()) {
96 // DCHECK_LE(offset + buf_len, kMaxBlockSize);
97 // file_offset += address.start_block() * address.BlockSize() +
98 // kBlockHeaderSize;
99 // }
100 //
101 // SyncCallback* io_callback = NULL;
102 // if (!callback.is_null()) {
103 // io_callback = new SyncCallback(this, buf, callback,
104 // net::NetLog::TYPE_ENTRY_READ_DATA);
105 // }
106 //
107 // TimeTicks start_async = TimeTicks::Now();
108 //
109 // bool completed;
110 // if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) {
111 // if (io_callback)
112 // io_callback->Discard();
113 // DoomImpl();
114 // return net::ERR_FAILED;
115 // }
116 //
117 // if (io_callback && completed)
118 // io_callback->Discard();
119 //
120 // if (io_callback)
121 // ReportIOTime(kReadAsync1, start_async);
122 //
123 // ReportIOTime(kRead, start);
124 // return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING;*/
125 // return 0;
126 //}
127 //
128 //int EntryImplV3::InternalWriteData(int index, int offset,
129 // IOBuffer* buf, int buf_len,
130 // const CompletionCallback& callback,
131 // bool truncate) {
132 // //DCHECK(node_.Data()->dirty || read_only_);
133 // DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len;
134 // if (index < 0 || index >= kNumStreams)
135 // return net::ERR_INVALID_ARGUMENT;
136 //
137 // if (offset < 0 || buf_len < 0)
138 // return net::ERR_INVALID_ARGUMENT;
139 //
140 // if (!backend_)
141 // return net::ERR_UNEXPECTED;
142 //
143 // int max_file_size = backend_->MaxFileSize();
144 //
145 // // offset or buf_len could be negative numbers.
146 // if (offset > max_file_size || buf_len > max_file_size ||
147 // offset + buf_len > max_file_size) {
148 // int size = offset + buf_len;
149 // if (size <= max_file_size)
150 // size = kint32max;
151 // backend_->TooMuchStorageRequested(size);
152 // return net::ERR_FAILED;
153 // }
154 //
155 // TimeTicks start = TimeTicks::Now();
156 //
157 // // Read the size at this point (it may change inside prepare).
158 // int entry_size = entry_->data_size[index];
159 // bool extending = entry_size < offset + buf_len;
160 // truncate = truncate && entry_size > offset + buf_len;
161 // Trace("To PrepareTarget 0x%x", address_.value());
162 // if (!PrepareTarget(index, offset, buf_len, truncate))
163 // return net::ERR_FAILED;
164 //
165 // Trace("From PrepareTarget 0x%x", address_.value());
166 // if (extending || truncate)
167 // UpdateSize(index, entry_size, offset + buf_len);
168 //
169 // UpdateRank(true);
170 //
171 // backend_->OnEvent(Stats::WRITE_DATA);
172 // backend_->OnWrite(buf_len);
173 //
174 // if (user_buffers_[index].get()) {
175 // // Complete the operation locally.
176 // user_buffers_[index]->Write(offset, buf, buf_len);
177 // ReportIOTime(kWrite, start);
178 // return buf_len;
179 // }
180 //
181 // NOTREACHED();
182 // /*
183 // Addr address(entry_.Data()->data_addr[index]);
184 // if (offset + buf_len == 0) {
185 // if (truncate) {
186 // DCHECK(!address.is_initialized());
187 // }
188 // return 0;
189 // }
190 //
191 // File* file = GetBackingFile(address, index);
192 // if (!file)
193 // return net::ERR_FAILED;
194 //
195 // size_t file_offset = offset;
196 // if (address.is_block_file()) {
197 // DCHECK_LE(offset + buf_len, kMaxBlockSize);
198 // file_offset += address.start_block() * address.BlockSize() +
199 // kBlockHeaderSize;
200 // } else if (truncate || (extending && !buf_len)) {
201 // if (!file->SetLength(offset + buf_len))
202 // return net::ERR_FAILED;
203 // }
204 //
205 // if (!buf_len)
206 // return 0;
207 //
208 // SyncCallback* io_callback = NULL;
209 // if (!callback.is_null()) {
210 // io_callback = new SyncCallback(this, buf, callback,
211 // net::NetLog::TYPE_ENTRY_WRITE_DATA);
212 // }
213 //
214 // TimeTicks start_async = TimeTicks::Now();
215 //
216 // bool completed;
217 // if (!file->Write(buf->data(), buf_len, file_offset, io_callback,
218 // &completed)) {
219 // if (io_callback)
220 // io_callback->Discard();
221 // return net::ERR_FAILED;
222 // }
223 //
224 // if (io_callback && completed)
225 // io_callback->Discard();
226 //
227 // if (io_callback)
228 // ReportIOTime(kWriteAsync1, start_async);
229 //
230 // ReportIOTime(kWrite, start);
231 // return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING;*/
232 // return 0;
233 //}
234 //
235 //// ------------------------------------------------------------------------
236 //
237 //
238 //// Note that this method may end up modifying a block file so upon return the
239 //// involved block will be free, and could be reused for something else. If the re
240 //// is a crash after that point (and maybe before returning to the caller), the
241 //// entry will be left dirty... and at some point it will be discarded; it is
242 //// important that the entry doesn't keep a reference to this address, or we'll
243 //// end up deleting the contents of |address| once again.
244 //void EntryImplV3::DeleteData(Addr address, int index) {
245 // DCHECK(backend_);
246 // if (!address.is_initialized())
247 // return;
248 // if (address.is_separate_file()) {
249 // int failure = !DeleteCacheFile(backend_->GetFileName(address));
250 // CACHE_UMA(COUNTS, "DeleteFailed", 0, failure);
251 // if (failure) {
252 // LOG(ERROR) << "Failed to delete " <<
253 // backend_->GetFileName(address).value() << " from the cache.";
254 // }
255 // if (files_[index])
256 // files_[index] = NULL; // Releases the object.
257 // } else {
258 // backend_->DeleteBlock(address, true);
259 // }
260 //}
261 //
262 //void EntryImplV3::UpdateRank(bool modified) {
263 // //if (!backend_)
264 // // return;
265 //
266 // //if (!doomed_) {
267 // // // Everything is handled by the backend.
268 // // backend_->UpdateRank(this, modified);
269 // // return;
270 // //}
271 //
272 // //Time current = Time::Now();
273 // //node_.Data()->last_used = current.ToInternalValue();
274 //
275 // //if (modified)
276 // // node_.Data()->last_modified = current.ToInternalValue();
277 //}
278
279 // We keep a memory buffer for everything that ends up stored on a block file
280 // (because we don't know yet the final data size), and for some of the data
281 // that end up on external files. This function will initialize that memory
282 // buffer and / or the files needed to store the data.
283 //
284 // In general, a buffer may overlap data already stored on disk, and in that
285 // case, the contents of the buffer are the most accurate. It may also extend
286 // the file, but we don't want to read from disk just to keep the buffer up to
287 // date. This means that as soon as there is a chance to get confused about what
288 // is the most recent version of some part of a file, we'll flush the buffer and
289 // reuse it for the new data. Keep in mind that the normal use pattern is quite
290 // simple (write sequentially from the beginning), so we optimize for handling
291 // that case.
292 bool EntryImplV3::PrepareTarget(int index, int offset, int buf_len,
293 bool truncate) {
294 if (truncate)
295 return HandleTruncation(index, offset, buf_len);
296
297 if (!offset && !buf_len)
298 return true;
299
300 Addr address(entry_->data_addr[index]);
301 if (address.is_initialized()) {
302 if (address.is_block_file() && !MoveToLocalBuffer(index))
303 return false;
304
305 if (!user_buffers_[index].get() && offset < kMaxBlockSize) {
306 // We are about to create a buffer for the first 16KB, make sure that we
307 // preserve existing data.
308 if (!CopyToLocalBuffer(index))
309 return false;
310 }
311 }
312
313 if (!user_buffers_[index].get())
314 user_buffers_[index].reset(new UserBuffer(backend_));
315
316 return PrepareBuffer(index, offset, buf_len);
317 }
318
319 // We get to this function with some data already stored. If there is a
320 // truncation that results on data stored internally, we'll explicitly
321 // handle the case here.
322 bool EntryImplV3::HandleTruncation(int index, int offset, int buf_len) {
323 Addr address(entry_->data_addr[index]);
324
325 int current_size = entry_->data_size[index];
326 int new_size = offset + buf_len;
327
328 if (!new_size) {
329 // This is by far the most common scenario.
330 backend_->ModifyStorageSize(current_size - unreported_size_[index], 0);
331 entry_->data_addr[index] = 0;
332 entry_->data_size[index] = 0;
333 unreported_size_[index] = 0;
334 //entry_->Store();
335 DeleteData(address, index);
336
337 user_buffers_[index].reset();
338 return true;
339 }
340
341 // We never postpone truncating a file, if there is one, but we may postpone
342 // telling the backend about the size reduction.
343 if (user_buffers_[index].get()) {
344 DCHECK_GE(current_size, user_buffers_[index]->Start());
345 if (!address.is_initialized()) {
346 // There is no overlap between the buffer and disk.
347 if (new_size > user_buffers_[index]->Start()) {
348 // Just truncate our buffer.
349 DCHECK_LT(new_size, user_buffers_[index]->End());
350 user_buffers_[index]->Truncate(new_size);
351 return true;
352 }
353
354 // Just discard our buffer.
355 user_buffers_[index]->Reset();
356 return PrepareBuffer(index, offset, buf_len);
357 }
358
359 // There is some overlap or we need to extend the file before the
360 // truncation.
361 if (offset > user_buffers_[index]->Start())
362 user_buffers_[index]->Truncate(new_size);
363 UpdateSize(index, current_size, new_size);
364 if (!Flush(index, 0))
365 return false;
366 user_buffers_[index].reset();
367 }
368
369 // We have data somewhere, and it is not in a buffer.
370 DCHECK(!user_buffers_[index].get());
371 DCHECK(address.is_initialized());
372
373 if (new_size > kMaxBlockSize)
374 return true; // Let the operation go directly to disk.
375
376 return ImportSeparateFile(index, offset + buf_len);
377 }
378
379 bool EntryImplV3::CopyToLocalBuffer(int index) {
380 Addr address(entry_->data_addr[index]);
381 DCHECK(!user_buffers_[index].get());
382 DCHECK(address.is_initialized());
383
384 int len = std::min(entry_->data_size[index], kMaxBlockSize);
385 user_buffers_[index].reset(new UserBuffer(backend_));
386 user_buffers_[index]->Write(len, NULL, 0);
387
388 //arggg connect the original callback and all arguments for this operation int o
389 //the operation that is going on.
390
391 File* file = GetBackingFile(address, index);
392 int offset = 0;
393
394 if (address.is_block_file())
395 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
396
397 /*if (!file ||
398 !file->Read(user_buffers_[index]->Data(), len, offset, NULL, NULL)) {
399 user_buffers_[index].reset();
400 return false;
401 }*/
402 return true;
403 }
404
405 bool EntryImplV3::MoveToLocalBuffer(int index) {
406 if (!CopyToLocalBuffer(index))
407 return false;
408
409 Addr address(entry_->data_addr[index]);
410 entry_->data_addr[index] = 0;
411 //entry_->Store();
412 DeleteData(address, index);
413
414 // If we lose this entry we'll see it as zero sized.
415 int len = entry_->data_size[index];
416 backend_->ModifyStorageSize(len - unreported_size_[index], 0);
417 unreported_size_[index] = len;
418 return true;
419 }
420
421 bool EntryImplV3::ImportSeparateFile(int index, int new_size) {
422 if (entry_->data_size[index] > new_size)
423 UpdateSize(index, entry_->data_size[index], new_size);
424
425 return MoveToLocalBuffer(index);
426 }
427
428 bool EntryImplV3::PrepareBuffer(int index, int offset, int buf_len) {
429 DCHECK(user_buffers_[index].get());
430 if ((user_buffers_[index]->End() && offset > user_buffers_[index]->End()) ||
431 offset > entry_->data_size[index]) {
432 // We are about to extend the buffer or the file (with zeros), so make sure
433 // that we are not overwriting anything.
434 Addr address(entry_->data_addr[index]);
435 if (address.is_initialized() && address.is_separate_file()) {
436 if (!Flush(index, 0))
437 return false;
438 // There is an actual file already, and we don't want to keep track of
439 // its length so we let this operation go straight to disk.
440 // The only case when a buffer is allowed to extend the file (as in fill
441 // with zeros before the start) is when there is no file yet to extend.
442 user_buffers_[index].reset();
443 return true;
444 }
445 }
446
447 if (!user_buffers_[index]->PreWrite(offset, buf_len)) {
448 if (!Flush(index, offset + buf_len))
449 return false;
450
451 // Lets try again.
452 if (offset > user_buffers_[index]->End() ||
453 !user_buffers_[index]->PreWrite(offset, buf_len)) {
454 // We cannot complete the operation with a buffer.
455 DCHECK(!user_buffers_[index]->Size());
456 DCHECK(!user_buffers_[index]->Start());
457 user_buffers_[index].reset();
458 }
459 }
460 return true;
461 }
462
463 bool EntryImplV3::Flush(int index, int min_len) {
464 Addr address(entry_->data_addr[index]);
465 DCHECK(user_buffers_[index].get());
466 DCHECK(!address.is_initialized() || address.is_separate_file());
467 DVLOG(3) << "Flush";
468
469 int size = std::max(entry_->data_size[index], min_len);
470 if (size && !address.is_initialized() && !CreateDataBlock(index, size))
471 return false;
472
473 if (!entry_->data_size[index]) {
474 DCHECK(!user_buffers_[index]->Size());
475 return true;
476 }
477
478 address.set_value(entry_->data_addr[index]);
479
480 int len = user_buffers_[index]->Size();
481 int offset = user_buffers_[index]->Start();
482 if (!len && !offset)
483 return true;
484
485 backend_->WriteData(this, address, offset, user_buffers_[index]->Get(),
486 len, CompletionCallback());
487 user_buffers_[index]->Reset();
488 return true;
489 }
490
491 void EntryImplV3::UpdateSize(int index, int old_size, int new_size) {
492 if (entry_->data_size[index] == new_size)
493 return;
494
495 unreported_size_[index] += new_size - old_size;
496 entry_->data_size[index] = new_size;
497 modified_ = true;
498 }
499
500 int EntryImplV3::InitSparseData() {
501 //if (sparse_.get())
502 // return net::OK;
503
504 //// Use a local variable so that sparse_ never goes from 'valid' to NULL.
505 //scoped_ptr<SparseControl> sparse(new SparseControl(this));
506 //int result = sparse->Init();
507 //if (net::OK == result)
508 // sparse_.swap(sparse);
509
510 //return result;
511 return 0;
512 }
513
514 void EntryImplV3::SetEntryFlags(uint32 flags) {
515 entry_->flags |= flags;
516 modified_ = true;
517 }
518
519 uint32 EntryImplV3::GetEntryFlags() {
520 return entry_->flags;
521 }
522
523 void EntryImplV3::GetData(int index, char** buffer, Addr* address) {
524 //DCHECK(backend_);
525 //if (user_buffers_[index].get() && user_buffers_[index]->Size() &&
526 // !user_buffers_[index]->Start()) {
527 // // The data is already in memory, just copy it and we're done.
528 // int data_len = entry_->data_size[index];
529 // if (data_len <= user_buffers_[index]->Size()) {
530 // DCHECK(!user_buffers_[index]->Start());
531 // *buffer = new char[data_len];
532 // memcpy(*buffer, user_buffers_[index]->Data(), data_len);
533 // return;
534 // }
535 //}
536
537 //// Bad news: we'd have to read the info from disk so instead we'll just tell
538 //// the caller where to read from.
539 //*buffer = NULL;
540 //address->set_value(entry_->data_addr[index]);
541 //if (address->is_initialized()) {
542 // // Prevent us from deleting the block from the backing store.
543 // backend_->ModifyStorageSize(entry_->data_size[index] -
544 // unreported_size_[index], 0);
545 // entry_->data_addr[index] = 0;
546 // entry_->data_size[index] = 0;
547 //}
548 }
549
550 void EntryImplV3::Log(const char* msg) {
551 /*int dirty = 0;
552 if (node_.HasData()) {
553 dirty = node_.dirty;
554 }
555
556 Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this),
557 entry_->address().value(), node_.address().value());
558
559 Trace(" data: 0x%x 0x%x 0x%x", entry_->data_addr[0],
560 entry_->data_addr[1], entry_->long_key);
561
562 Trace(" doomed: %d 0x%x", doomed_, dirty);*/
563 }
564
565 } // namespace disk_cache
OLDNEW
« no previous file with comments | « net/disk_cache/v3/entry_operation.h ('k') | net/disk_cache/v3/eviction_v3.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698