Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(422)

Side by Side Diff: src/heap/store-buffer.cc

Issue 1086263002: Make store buffer more robust to OOM. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Remove unrelated change to deoptimizer Created 5 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <algorithm> 5 #include <algorithm>
6 6
7 #include "src/v8.h" 7 #include "src/v8.h"
8 8
9 #include "src/counters.h" 9 #include "src/counters.h"
10 #include "src/heap/store-buffer-inl.h" 10 #include "src/heap/store-buffer-inl.h"
(...skipping 15 matching lines...) Expand all
26 store_buffer_rebuilding_enabled_(false), 26 store_buffer_rebuilding_enabled_(false),
27 callback_(NULL), 27 callback_(NULL),
28 may_move_store_buffer_entries_(true), 28 may_move_store_buffer_entries_(true),
29 virtual_memory_(NULL), 29 virtual_memory_(NULL),
30 hash_set_1_(NULL), 30 hash_set_1_(NULL),
31 hash_set_2_(NULL), 31 hash_set_2_(NULL),
32 hash_sets_are_empty_(true) {} 32 hash_sets_are_empty_(true) {}
33 33
34 34
35 void StoreBuffer::SetUp() { 35 void StoreBuffer::SetUp() {
36 // Allocate 3x the buffer size, so that we can start the new store buffer
37 // aligned to 2x the size. This lets us use a bit test to detect the end of
38 // the area.
36 virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3); 39 virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3);
37 uintptr_t start_as_int = 40 uintptr_t start_as_int =
38 reinterpret_cast<uintptr_t>(virtual_memory_->address()); 41 reinterpret_cast<uintptr_t>(virtual_memory_->address());
39 start_ = 42 start_ =
40 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2)); 43 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
41 limit_ = start_ + (kStoreBufferSize / kPointerSize); 44 limit_ = start_ + (kStoreBufferSize / kPointerSize);
42 45
46 // Reserve space for the larger old buffer.
43 old_virtual_memory_ = 47 old_virtual_memory_ =
44 new base::VirtualMemory(kOldStoreBufferLength * kPointerSize); 48 new base::VirtualMemory(kOldStoreBufferLength * kPointerSize);
45 old_top_ = old_start_ = 49 old_top_ = old_start_ =
46 reinterpret_cast<Address*>(old_virtual_memory_->address()); 50 reinterpret_cast<Address*>(old_virtual_memory_->address());
47 // Don't know the alignment requirements of the OS, but it is certainly not 51 // Don't know the alignment requirements of the OS, but it is certainly not
48 // less than 0xfff. 52 // less than 0xfff.
49 DCHECK((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0); 53 CHECK((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
50 int initial_length = 54 CHECK(kStoreBufferSize >= base::OS::CommitPageSize());
51 static_cast<int>(base::OS::CommitPageSize() / kPointerSize); 55 // Initial size of the old buffer is as big as the buffer for new pointers.
52 DCHECK(initial_length > 0); 56 // This means even if we later fail to enlarge the old buffer due to OOM from
53 DCHECK(initial_length <= kOldStoreBufferLength); 57 // the OS, we will still be able to empty the new pointer buffer into the old
58 // buffer.
59 int initial_length = static_cast<int>(kStoreBufferSize / kPointerSize);
60 CHECK(initial_length > 0);
61 CHECK(initial_length <= kOldStoreBufferLength);
54 old_limit_ = old_start_ + initial_length; 62 old_limit_ = old_start_ + initial_length;
55 old_reserved_limit_ = old_start_ + kOldStoreBufferLength; 63 old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
56 64
57 if (!old_virtual_memory_->Commit(reinterpret_cast<void*>(old_start_), 65 if (!old_virtual_memory_->Commit(reinterpret_cast<void*>(old_start_),
58 (old_limit_ - old_start_) * kPointerSize, 66 (old_limit_ - old_start_) * kPointerSize,
59 false)) { 67 false)) {
60 V8::FatalProcessOutOfMemory("StoreBuffer::SetUp"); 68 V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
61 } 69 }
62 70
63 DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address()); 71 DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
109 117
110 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) { 118 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
111 return old_limit_ - old_top_ >= space_needed; 119 return old_limit_ - old_top_ >= space_needed;
112 } 120 }
113 121
114 122
115 void StoreBuffer::EnsureSpace(intptr_t space_needed) { 123 void StoreBuffer::EnsureSpace(intptr_t space_needed) {
116 while (old_limit_ - old_top_ < space_needed && 124 while (old_limit_ - old_top_ < space_needed &&
117 old_limit_ < old_reserved_limit_) { 125 old_limit_ < old_reserved_limit_) {
118 size_t grow = old_limit_ - old_start_; // Double size. 126 size_t grow = old_limit_ - old_start_; // Double size.
119 if (!old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_), 127 if (old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
120 grow * kPointerSize, false)) { 128 grow * kPointerSize, false)) {
121 V8::FatalProcessOutOfMemory("StoreBuffer::EnsureSpace"); 129 old_limit_ += grow;
130 } else {
131 break;
122 } 132 }
123 old_limit_ += grow;
124 } 133 }
125 134
126 if (SpaceAvailable(space_needed)) return; 135 if (SpaceAvailable(space_needed)) return;
127 136
128 if (old_buffer_is_filtered_) return; 137 if (old_buffer_is_filtered_) return;
129 DCHECK(may_move_store_buffer_entries_); 138 DCHECK(may_move_store_buffer_entries_);
130 Compact(); 139 Compact();
131 140
132 old_buffer_is_filtered_ = true; 141 old_buffer_is_filtered_ = true;
133 bool page_has_scan_on_scavenge_flag = false; 142 bool page_has_scan_on_scavenge_flag = false;
(...skipping 432 matching lines...) Expand 10 before | Expand all | Expand 10 after
566 } 575 }
567 old_buffer_is_sorted_ = false; 576 old_buffer_is_sorted_ = false;
568 old_buffer_is_filtered_ = false; 577 old_buffer_is_filtered_ = false;
569 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); 578 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
570 DCHECK(old_top_ <= old_limit_); 579 DCHECK(old_top_ <= old_limit_);
571 } 580 }
572 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); 581 heap_->isolate()->counters()->store_buffer_compactions()->Increment();
573 } 582 }
574 } 583 }
575 } // namespace v8::internal 584 } // namespace v8::internal
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698