OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
42 old_limit_(NULL), | 42 old_limit_(NULL), |
43 old_top_(NULL), | 43 old_top_(NULL), |
44 old_reserved_limit_(NULL), | 44 old_reserved_limit_(NULL), |
45 old_buffer_is_sorted_(false), | 45 old_buffer_is_sorted_(false), |
46 old_buffer_is_filtered_(false), | 46 old_buffer_is_filtered_(false), |
47 during_gc_(false), | 47 during_gc_(false), |
48 store_buffer_rebuilding_enabled_(false), | 48 store_buffer_rebuilding_enabled_(false), |
49 callback_(NULL), | 49 callback_(NULL), |
50 may_move_store_buffer_entries_(true), | 50 may_move_store_buffer_entries_(true), |
51 virtual_memory_(NULL), | 51 virtual_memory_(NULL), |
52 hash_map_1_(NULL), | 52 hash_set_1_(NULL), |
53 hash_map_2_(NULL) { | 53 hash_set_2_(NULL), |
| 54 hash_sets_are_empty_(true) { |
54 } | 55 } |
55 | 56 |
56 | 57 |
57 void StoreBuffer::Setup() { | 58 void StoreBuffer::Setup() { |
58 virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3); | 59 virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3); |
59 uintptr_t start_as_int = | 60 uintptr_t start_as_int = |
60 reinterpret_cast<uintptr_t>(virtual_memory_->address()); | 61 reinterpret_cast<uintptr_t>(virtual_memory_->address()); |
61 start_ = | 62 start_ = |
62 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2)); | 63 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2)); |
63 limit_ = start_ + (kStoreBufferSize / kPointerSize); | 64 limit_ = start_ + (kStoreBufferSize / kPointerSize); |
(...skipping 26 matching lines...) Expand all Loading... |
90 USE(vm_limit); | 91 USE(vm_limit); |
91 ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0); | 92 ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0); |
92 ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) == | 93 ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) == |
93 0); | 94 0); |
94 | 95 |
95 CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_), | 96 CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_), |
96 kStoreBufferSize, | 97 kStoreBufferSize, |
97 false)); // Not executable. | 98 false)); // Not executable. |
98 heap_->public_set_store_buffer_top(start_); | 99 heap_->public_set_store_buffer_top(start_); |
99 | 100 |
100 hash_map_1_ = new uintptr_t[kHashMapLength]; | 101 hash_set_1_ = new uintptr_t[kHashSetLength]; |
101 hash_map_2_ = new uintptr_t[kHashMapLength]; | 102 hash_set_2_ = new uintptr_t[kHashSetLength]; |
| 103 hash_sets_are_empty_ = false; |
102 | 104 |
103 ZapHashTables(); | 105 ClearFilteringHashSets(); |
104 } | 106 } |
105 | 107 |
106 | 108 |
107 void StoreBuffer::TearDown() { | 109 void StoreBuffer::TearDown() { |
108 delete virtual_memory_; | 110 delete virtual_memory_; |
109 delete old_virtual_memory_; | 111 delete old_virtual_memory_; |
110 delete[] hash_map_1_; | 112 delete[] hash_set_1_; |
111 delete[] hash_map_2_; | 113 delete[] hash_set_2_; |
112 old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL; | 114 old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL; |
113 start_ = limit_ = NULL; | 115 start_ = limit_ = NULL; |
114 heap_->public_set_store_buffer_top(start_); | 116 heap_->public_set_store_buffer_top(start_); |
115 } | 117 } |
116 | 118 |
117 | 119 |
118 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) { | 120 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) { |
119 isolate->heap()->store_buffer()->Compact(); | 121 isolate->heap()->store_buffer()->Compact(); |
120 } | 122 } |
121 | 123 |
(...skipping 19 matching lines...) Expand all Loading... |
141 intptr_t b = | 143 intptr_t b = |
142 reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b)); | 144 reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b)); |
143 ASSERT(sizeof(1) == sizeof(a)); | 145 ASSERT(sizeof(1) == sizeof(a)); |
144 // Shift down to avoid wraparound. | 146 // Shift down to avoid wraparound. |
145 return (a >> kPointerSizeLog2) - (b >> kPointerSizeLog2); | 147 return (a >> kPointerSizeLog2) - (b >> kPointerSizeLog2); |
146 } | 148 } |
147 #endif | 149 #endif |
148 | 150 |
149 | 151 |
150 void StoreBuffer::Uniq() { | 152 void StoreBuffer::Uniq() { |
151 ASSERT(HashTablesAreZapped()); | |
152 // Remove adjacent duplicates and cells that do not point at new space. | 153 // Remove adjacent duplicates and cells that do not point at new space. |
153 Address previous = NULL; | 154 Address previous = NULL; |
154 Address* write = old_start_; | 155 Address* write = old_start_; |
155 ASSERT(may_move_store_buffer_entries_); | 156 ASSERT(may_move_store_buffer_entries_); |
156 for (Address* read = old_start_; read < old_top_; read++) { | 157 for (Address* read = old_start_; read < old_top_; read++) { |
157 Address current = *read; | 158 Address current = *read; |
158 if (current != previous) { | 159 if (current != previous) { |
159 if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) { | 160 if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) { |
160 *write++ = current; | 161 *write++ = current; |
161 } | 162 } |
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
265 containing_chunk = previous_chunk; | 266 containing_chunk = previous_chunk; |
266 } else { | 267 } else { |
267 containing_chunk = MemoryChunk::FromAnyPointerAddress(addr); | 268 containing_chunk = MemoryChunk::FromAnyPointerAddress(addr); |
268 previous_chunk = containing_chunk; | 269 previous_chunk = containing_chunk; |
269 } | 270 } |
270 if (!containing_chunk->IsFlagSet(flag)) { | 271 if (!containing_chunk->IsFlagSet(flag)) { |
271 *new_top++ = addr; | 272 *new_top++ = addr; |
272 } | 273 } |
273 } | 274 } |
274 old_top_ = new_top; | 275 old_top_ = new_top; |
| 276 |
| 277 // Hash tables are inconsistent with the store buffer after this operation. |
| 278 ClearFilteringHashSets(); |
275 } | 279 } |
276 | 280 |
277 | 281 |
278 void StoreBuffer::SortUniq() { | 282 void StoreBuffer::SortUniq() { |
279 Compact(); | 283 Compact(); |
280 if (old_buffer_is_sorted_) return; | 284 if (old_buffer_is_sorted_) return; |
281 ZapHashTables(); | |
282 qsort(reinterpret_cast<void*>(old_start_), | 285 qsort(reinterpret_cast<void*>(old_start_), |
283 old_top_ - old_start_, | 286 old_top_ - old_start_, |
284 sizeof(*old_top_), | 287 sizeof(*old_top_), |
285 &CompareAddresses); | 288 &CompareAddresses); |
286 Uniq(); | 289 Uniq(); |
287 | 290 |
288 old_buffer_is_sorted_ = true; | 291 old_buffer_is_sorted_ = true; |
| 292 |
| 293 // Hash tables are inconsistent with the store buffer after this operation. |
| 294 ClearFilteringHashSets(); |
289 } | 295 } |
290 | 296 |
291 | 297 |
292 bool StoreBuffer::PrepareForIteration() { | 298 bool StoreBuffer::PrepareForIteration() { |
293 Compact(); | 299 Compact(); |
294 PointerChunkIterator it(heap_); | 300 PointerChunkIterator it(heap_); |
295 MemoryChunk* chunk; | 301 MemoryChunk* chunk; |
296 bool page_has_scan_on_scavenge_flag = false; | 302 bool page_has_scan_on_scavenge_flag = false; |
297 while ((chunk = it.next()) != NULL) { | 303 while ((chunk = it.next()) != NULL) { |
298 if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true; | 304 if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true; |
299 } | 305 } |
300 | 306 |
301 if (page_has_scan_on_scavenge_flag) { | 307 if (page_has_scan_on_scavenge_flag) { |
302 Filter(MemoryChunk::SCAN_ON_SCAVENGE); | 308 Filter(MemoryChunk::SCAN_ON_SCAVENGE); |
303 } | 309 } |
304 ZapHashTables(); | 310 |
| 311 // Hash tables are inconsistent with the store buffer after iteration. |
| 312 ClearFilteringHashSets(); |
| 313 |
305 return page_has_scan_on_scavenge_flag; | 314 return page_has_scan_on_scavenge_flag; |
306 } | 315 } |
307 | 316 |
308 | 317 |
309 #ifdef DEBUG | 318 #ifdef DEBUG |
310 void StoreBuffer::Clean() { | 319 void StoreBuffer::Clean() { |
311 ZapHashTables(); | 320 ClearFilteringHashSets(); |
312 Uniq(); // Also removes things that no longer point to new space. | 321 Uniq(); // Also removes things that no longer point to new space. |
313 CheckForFullBuffer(); | 322 CheckForFullBuffer(); |
314 } | 323 } |
315 | 324 |
316 | 325 |
317 static bool Zapped(char* start, int size) { | |
318 for (int i = 0; i < size; i++) { | |
319 if (start[i] != 0) return false; | |
320 } | |
321 return true; | |
322 } | |
323 | |
324 | |
325 bool StoreBuffer::HashTablesAreZapped() { | |
326 return Zapped(reinterpret_cast<char*>(hash_map_1_), | |
327 sizeof(uintptr_t) * kHashMapLength) && | |
328 Zapped(reinterpret_cast<char*>(hash_map_2_), | |
329 sizeof(uintptr_t) * kHashMapLength); | |
330 } | |
331 | |
332 | |
333 static Address* in_store_buffer_1_element_cache = NULL; | 326 static Address* in_store_buffer_1_element_cache = NULL; |
334 | 327 |
335 | 328 |
336 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) { | 329 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) { |
337 if (!FLAG_enable_slow_asserts) return true; | 330 if (!FLAG_enable_slow_asserts) return true; |
338 if (in_store_buffer_1_element_cache != NULL && | 331 if (in_store_buffer_1_element_cache != NULL && |
339 *in_store_buffer_1_element_cache == cell_address) { | 332 *in_store_buffer_1_element_cache == cell_address) { |
340 return true; | 333 return true; |
341 } | 334 } |
342 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); | 335 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); |
343 for (Address* current = top - 1; current >= start_; current--) { | 336 for (Address* current = top - 1; current >= start_; current--) { |
344 if (*current == cell_address) { | 337 if (*current == cell_address) { |
345 in_store_buffer_1_element_cache = current; | 338 in_store_buffer_1_element_cache = current; |
346 return true; | 339 return true; |
347 } | 340 } |
348 } | 341 } |
349 for (Address* current = old_top_ - 1; current >= old_start_; current--) { | 342 for (Address* current = old_top_ - 1; current >= old_start_; current--) { |
350 if (*current == cell_address) { | 343 if (*current == cell_address) { |
351 in_store_buffer_1_element_cache = current; | 344 in_store_buffer_1_element_cache = current; |
352 return true; | 345 return true; |
353 } | 346 } |
354 } | 347 } |
355 return false; | 348 return false; |
356 } | 349 } |
357 #endif | 350 #endif |
358 | 351 |
359 | 352 |
360 void StoreBuffer::ZapHashTables() { | 353 void StoreBuffer::ClearFilteringHashSets() { |
361 memset(reinterpret_cast<void*>(hash_map_1_), | 354 if (!hash_sets_are_empty_) { |
362 0, | 355 memset(reinterpret_cast<void*>(hash_set_1_), |
363 sizeof(uintptr_t) * kHashMapLength); | 356 0, |
364 memset(reinterpret_cast<void*>(hash_map_2_), | 357 sizeof(uintptr_t) * kHashSetLength); |
365 0, | 358 memset(reinterpret_cast<void*>(hash_set_2_), |
366 sizeof(uintptr_t) * kHashMapLength); | 359 0, |
| 360 sizeof(uintptr_t) * kHashSetLength); |
| 361 hash_sets_are_empty_ = true; |
| 362 } |
367 } | 363 } |
368 | 364 |
369 | 365 |
370 void StoreBuffer::GCPrologue() { | 366 void StoreBuffer::GCPrologue() { |
371 ZapHashTables(); | 367 ClearFilteringHashSets(); |
372 during_gc_ = true; | 368 during_gc_ = true; |
373 } | 369 } |
374 | 370 |
375 | 371 |
376 #ifdef DEBUG | 372 #ifdef DEBUG |
377 static void DummyScavengePointer(HeapObject** p, HeapObject* o) { | 373 static void DummyScavengePointer(HeapObject** p, HeapObject* o) { |
378 // Do nothing. | 374 // Do nothing. |
379 } | 375 } |
380 | 376 |
381 | 377 |
(...skipping 287 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
669 if (top == start_) return; | 665 if (top == start_) return; |
670 | 666 |
671 // There's no check of the limit in the loop below so we check here for | 667 // There's no check of the limit in the loop below so we check here for |
672 // the worst case (compaction doesn't eliminate any pointers). | 668 // the worst case (compaction doesn't eliminate any pointers). |
673 ASSERT(top <= limit_); | 669 ASSERT(top <= limit_); |
674 heap_->public_set_store_buffer_top(start_); | 670 heap_->public_set_store_buffer_top(start_); |
675 EnsureSpace(top - start_); | 671 EnsureSpace(top - start_); |
676 ASSERT(may_move_store_buffer_entries_); | 672 ASSERT(may_move_store_buffer_entries_); |
677 // Goes through the addresses in the store buffer attempting to remove | 673 // Goes through the addresses in the store buffer attempting to remove |
678 // duplicates. In the interest of speed this is a lossy operation. Some | 674 // duplicates. In the interest of speed this is a lossy operation. Some |
679 // duplicates will remain. We have two hash tables with different hash | 675 // duplicates will remain. We have two hash sets with different hash |
680 // functions to reduce the number of unnecessary clashes. | 676 // functions to reduce the number of unnecessary clashes. |
| 677 hash_sets_are_empty_ = false; // Hash sets are in use. |
681 for (Address* current = start_; current < top; current++) { | 678 for (Address* current = start_; current < top; current++) { |
682 ASSERT(!heap_->cell_space()->Contains(*current)); | 679 ASSERT(!heap_->cell_space()->Contains(*current)); |
683 ASSERT(!heap_->code_space()->Contains(*current)); | 680 ASSERT(!heap_->code_space()->Contains(*current)); |
684 ASSERT(!heap_->old_data_space()->Contains(*current)); | 681 ASSERT(!heap_->old_data_space()->Contains(*current)); |
685 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); | 682 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); |
686 // Shift out the last bits including any tags. | 683 // Shift out the last bits including any tags. |
687 int_addr >>= kPointerSizeLog2; | 684 int_addr >>= kPointerSizeLog2; |
688 int hash1 = | 685 int hash1 = |
689 ((int_addr ^ (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1)); | 686 ((int_addr ^ (int_addr >> kHashSetLengthLog2)) & (kHashSetLength - 1)); |
690 if (hash_map_1_[hash1] == int_addr) continue; | 687 if (hash_set_1_[hash1] == int_addr) continue; |
691 int hash2 = | 688 int hash2 = |
692 ((int_addr - (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1)); | 689 ((int_addr - (int_addr >> kHashSetLengthLog2)) & (kHashSetLength - 1)); |
693 hash2 ^= hash2 >> (kHashMapLengthLog2 * 2); | 690 hash2 ^= hash2 >> (kHashSetLengthLog2 * 2); |
694 if (hash_map_2_[hash2] == int_addr) continue; | 691 if (hash_set_2_[hash2] == int_addr) continue; |
695 if (hash_map_1_[hash1] == 0) { | 692 if (hash_set_1_[hash1] == 0) { |
696 hash_map_1_[hash1] = int_addr; | 693 hash_set_1_[hash1] = int_addr; |
697 } else if (hash_map_2_[hash2] == 0) { | 694 } else if (hash_set_2_[hash2] == 0) { |
698 hash_map_2_[hash2] = int_addr; | 695 hash_set_2_[hash2] = int_addr; |
699 } else { | 696 } else { |
700 // Rather than slowing down we just throw away some entries. This will | 697 // Rather than slowing down we just throw away some entries. This will |
701 // cause some duplicates to remain undetected. | 698 // cause some duplicates to remain undetected. |
702 hash_map_1_[hash1] = int_addr; | 699 hash_set_1_[hash1] = int_addr; |
703 hash_map_2_[hash2] = 0; | 700 hash_set_2_[hash2] = 0; |
704 } | 701 } |
705 old_buffer_is_sorted_ = false; | 702 old_buffer_is_sorted_ = false; |
706 old_buffer_is_filtered_ = false; | 703 old_buffer_is_filtered_ = false; |
707 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); | 704 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); |
708 ASSERT(old_top_ <= old_limit_); | 705 ASSERT(old_top_ <= old_limit_); |
709 } | 706 } |
710 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); | 707 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); |
711 CheckForFullBuffer(); | 708 CheckForFullBuffer(); |
712 } | 709 } |
713 | 710 |
714 | 711 |
715 void StoreBuffer::CheckForFullBuffer() { | 712 void StoreBuffer::CheckForFullBuffer() { |
716 EnsureSpace(kStoreBufferSize * 2); | 713 EnsureSpace(kStoreBufferSize * 2); |
717 } | 714 } |
718 | 715 |
719 } } // namespace v8::internal | 716 } } // namespace v8::internal |
OLD | NEW |