OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <algorithm> | 5 #include <algorithm> |
6 | 6 |
7 #include "src/v8.h" | 7 #include "src/v8.h" |
8 | 8 |
9 #include "src/base/atomicops.h" | 9 #include "src/base/atomicops.h" |
10 #include "src/counters.h" | 10 #include "src/counters.h" |
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
101 heap_->public_set_store_buffer_top(start_); | 101 heap_->public_set_store_buffer_top(start_); |
102 } | 102 } |
103 | 103 |
104 | 104 |
105 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) { | 105 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) { |
106 isolate->heap()->store_buffer()->Compact(); | 106 isolate->heap()->store_buffer()->Compact(); |
107 isolate->counters()->store_buffer_overflows()->Increment(); | 107 isolate->counters()->store_buffer_overflows()->Increment(); |
108 } | 108 } |
109 | 109 |
110 | 110 |
111 void StoreBuffer::Uniq() { | |
112 // Remove adjacent duplicates and cells that do not point at new space. | |
113 Address previous = NULL; | |
114 Address* write = old_start_; | |
115 DCHECK(may_move_store_buffer_entries_); | |
116 for (Address* read = old_start_; read < old_top_; read++) { | |
117 Address current = *read; | |
118 if (current != previous) { | |
119 Object* object = reinterpret_cast<Object*>( | |
120 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(current))); | |
121 if (heap_->InNewSpace(object)) { | |
122 *write++ = current; | |
123 } | |
124 } | |
125 previous = current; | |
126 } | |
127 old_top_ = write; | |
128 } | |
129 | |
130 | |
131 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) { | 111 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) { |
132 return old_limit_ - old_top_ >= space_needed; | 112 return old_limit_ - old_top_ >= space_needed; |
133 } | 113 } |
134 | 114 |
135 | 115 |
136 void StoreBuffer::EnsureSpace(intptr_t space_needed) { | 116 void StoreBuffer::EnsureSpace(intptr_t space_needed) { |
137 while (old_limit_ - old_top_ < space_needed && | 117 while (old_limit_ - old_top_ < space_needed && |
138 old_limit_ < old_reserved_limit_) { | 118 old_limit_ < old_reserved_limit_) { |
139 size_t grow = old_limit_ - old_start_; // Double size. | 119 size_t grow = old_limit_ - old_start_; // Double size. |
140 if (!old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_), | 120 if (!old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_), |
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
240 } | 220 } |
241 } | 221 } |
242 old_top_ = new_top; | 222 old_top_ = new_top; |
243 | 223 |
244 // Filtering hash sets are inconsistent with the store buffer after this | 224 // Filtering hash sets are inconsistent with the store buffer after this |
245 // operation. | 225 // operation. |
246 ClearFilteringHashSets(); | 226 ClearFilteringHashSets(); |
247 } | 227 } |
248 | 228 |
249 | 229 |
250 void StoreBuffer::SortUniq() { | |
251 Compact(); | |
252 if (old_buffer_is_sorted_) return; | |
253 std::sort(old_start_, old_top_); | |
254 Uniq(); | |
255 | |
256 old_buffer_is_sorted_ = true; | |
257 | |
258 // Filtering hash sets are inconsistent with the store buffer after this | |
259 // operation. | |
260 ClearFilteringHashSets(); | |
261 } | |
262 | |
263 | |
264 bool StoreBuffer::PrepareForIteration() { | 230 bool StoreBuffer::PrepareForIteration() { |
265 Compact(); | 231 Compact(); |
266 PointerChunkIterator it(heap_); | 232 PointerChunkIterator it(heap_); |
267 MemoryChunk* chunk; | 233 MemoryChunk* chunk; |
268 bool page_has_scan_on_scavenge_flag = false; | 234 bool page_has_scan_on_scavenge_flag = false; |
269 while ((chunk = it.next()) != NULL) { | 235 while ((chunk = it.next()) != NULL) { |
270 if (chunk->scan_on_scavenge()) { | 236 if (chunk->scan_on_scavenge()) { |
271 page_has_scan_on_scavenge_flag = true; | 237 page_has_scan_on_scavenge_flag = true; |
272 break; | 238 break; |
273 } | 239 } |
274 } | 240 } |
275 | 241 |
276 if (page_has_scan_on_scavenge_flag) { | 242 if (page_has_scan_on_scavenge_flag) { |
277 Filter(MemoryChunk::SCAN_ON_SCAVENGE); | 243 Filter(MemoryChunk::SCAN_ON_SCAVENGE); |
278 } | 244 } |
279 | 245 |
280 // Filtering hash sets are inconsistent with the store buffer after | 246 // Filtering hash sets are inconsistent with the store buffer after |
281 // iteration. | 247 // iteration. |
282 ClearFilteringHashSets(); | 248 ClearFilteringHashSets(); |
283 | 249 |
284 return page_has_scan_on_scavenge_flag; | 250 return page_has_scan_on_scavenge_flag; |
285 } | 251 } |
286 | 252 |
287 | 253 |
288 #ifdef DEBUG | |
289 void StoreBuffer::Clean() { | |
290 ClearFilteringHashSets(); | |
291 Uniq(); // Also removes things that no longer point to new space. | |
292 EnsureSpace(kStoreBufferSize / 2); | |
293 } | |
294 | |
295 | |
296 static Address* in_store_buffer_1_element_cache = NULL; | |
297 | |
298 | |
299 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) { | |
300 if (!FLAG_enable_slow_asserts) return true; | |
301 if (in_store_buffer_1_element_cache != NULL && | |
302 *in_store_buffer_1_element_cache == cell_address) { | |
303 return true; | |
304 } | |
305 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); | |
306 for (Address* current = top - 1; current >= start_; current--) { | |
307 if (*current == cell_address) { | |
308 in_store_buffer_1_element_cache = current; | |
309 return true; | |
310 } | |
311 } | |
312 for (Address* current = old_top_ - 1; current >= old_start_; current--) { | |
313 if (*current == cell_address) { | |
314 in_store_buffer_1_element_cache = current; | |
315 return true; | |
316 } | |
317 } | |
318 return false; | |
319 } | |
320 #endif | |
321 | |
322 | |
323 void StoreBuffer::ClearFilteringHashSets() { | 254 void StoreBuffer::ClearFilteringHashSets() { |
324 if (!hash_sets_are_empty_) { | 255 if (!hash_sets_are_empty_) { |
325 memset(reinterpret_cast<void*>(hash_set_1_), 0, | 256 memset(reinterpret_cast<void*>(hash_set_1_), 0, |
326 sizeof(uintptr_t) * kHashSetLength); | 257 sizeof(uintptr_t) * kHashSetLength); |
327 memset(reinterpret_cast<void*>(hash_set_2_), 0, | 258 memset(reinterpret_cast<void*>(hash_set_2_), 0, |
328 sizeof(uintptr_t) * kHashSetLength); | 259 sizeof(uintptr_t) * kHashSetLength); |
329 hash_sets_are_empty_ = true; | 260 hash_sets_are_empty_ = true; |
330 } | 261 } |
331 } | 262 } |
332 | 263 |
(...skipping 302 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
635 } | 566 } |
636 old_buffer_is_sorted_ = false; | 567 old_buffer_is_sorted_ = false; |
637 old_buffer_is_filtered_ = false; | 568 old_buffer_is_filtered_ = false; |
638 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); | 569 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); |
639 DCHECK(old_top_ <= old_limit_); | 570 DCHECK(old_top_ <= old_limit_); |
640 } | 571 } |
641 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); | 572 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); |
642 } | 573 } |
643 } | 574 } |
644 } // namespace v8::internal | 575 } // namespace v8::internal |
OLD | NEW |