| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 30 matching lines...) Expand all Loading... |
| 41 old_start_(NULL), | 41 old_start_(NULL), |
| 42 old_limit_(NULL), | 42 old_limit_(NULL), |
| 43 old_top_(NULL), | 43 old_top_(NULL), |
| 44 old_buffer_is_sorted_(false), | 44 old_buffer_is_sorted_(false), |
| 45 old_buffer_is_filtered_(false), | 45 old_buffer_is_filtered_(false), |
| 46 during_gc_(false), | 46 during_gc_(false), |
| 47 store_buffer_rebuilding_enabled_(false), | 47 store_buffer_rebuilding_enabled_(false), |
| 48 callback_(NULL), | 48 callback_(NULL), |
| 49 may_move_store_buffer_entries_(true), | 49 may_move_store_buffer_entries_(true), |
| 50 virtual_memory_(NULL), | 50 virtual_memory_(NULL), |
| 51 hash_map_1_(NULL), | 51 hash_set_1_(NULL), |
| 52 hash_map_2_(NULL) { | 52 hash_set_2_(NULL), |
| 53 hash_sets_are_empty_(true) { |
| 53 } | 54 } |
| 54 | 55 |
| 55 | 56 |
| 56 void StoreBuffer::Setup() { | 57 void StoreBuffer::Setup() { |
| 57 virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3); | 58 virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3); |
| 58 uintptr_t start_as_int = | 59 uintptr_t start_as_int = |
| 59 reinterpret_cast<uintptr_t>(virtual_memory_->address()); | 60 reinterpret_cast<uintptr_t>(virtual_memory_->address()); |
| 60 start_ = | 61 start_ = |
| 61 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2)); | 62 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2)); |
| 62 limit_ = start_ + (kStoreBufferSize / sizeof(*start_)); | 63 limit_ = start_ + (kStoreBufferSize / sizeof(*start_)); |
| (...skipping 11 matching lines...) Expand all Loading... |
| 74 USE(vm_limit); | 75 USE(vm_limit); |
| 75 ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0); | 76 ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0); |
| 76 ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) == | 77 ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) == |
| 77 0); | 78 0); |
| 78 | 79 |
| 79 virtual_memory_->Commit(reinterpret_cast<Address>(start_), | 80 virtual_memory_->Commit(reinterpret_cast<Address>(start_), |
| 80 kStoreBufferSize, | 81 kStoreBufferSize, |
| 81 false); // Not executable. | 82 false); // Not executable. |
| 82 heap_->public_set_store_buffer_top(start_); | 83 heap_->public_set_store_buffer_top(start_); |
| 83 | 84 |
| 84 hash_map_1_ = new uintptr_t[kHashMapLength]; | 85 hash_set_1_ = new uintptr_t[kHashSetLength]; |
| 85 hash_map_2_ = new uintptr_t[kHashMapLength]; | 86 hash_set_2_ = new uintptr_t[kHashSetLength]; |
| 87 hash_sets_are_empty_ = false; |
| 86 | 88 |
| 87 ZapHashTables(); | 89 ClearFilteringHashSets(); |
| 88 } | 90 } |
| 89 | 91 |
| 90 | 92 |
| 91 void StoreBuffer::TearDown() { | 93 void StoreBuffer::TearDown() { |
| 92 delete virtual_memory_; | 94 delete virtual_memory_; |
| 93 delete[] hash_map_1_; | 95 delete[] hash_set_1_; |
| 94 delete[] hash_map_2_; | 96 delete[] hash_set_2_; |
| 95 delete[] old_start_; | 97 delete[] old_start_; |
| 96 old_start_ = old_top_ = old_limit_ = NULL; | 98 old_start_ = old_top_ = old_limit_ = NULL; |
| 97 start_ = limit_ = NULL; | 99 start_ = limit_ = NULL; |
| 98 heap_->public_set_store_buffer_top(start_); | 100 heap_->public_set_store_buffer_top(start_); |
| 99 } | 101 } |
| 100 | 102 |
| 101 | 103 |
| 102 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) { | 104 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) { |
| 103 isolate->heap()->store_buffer()->Compact(); | 105 isolate->heap()->store_buffer()->Compact(); |
| 104 } | 106 } |
| (...skipping 20 matching lines...) Expand all Loading... |
| 125 intptr_t b = | 127 intptr_t b = |
| 126 reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b)); | 128 reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b)); |
| 127 ASSERT(sizeof(1) == sizeof(a)); | 129 ASSERT(sizeof(1) == sizeof(a)); |
| 128 // Shift down to avoid wraparound. | 130 // Shift down to avoid wraparound. |
| 129 return (a >> kPointerSizeLog2) - (b >> kPointerSizeLog2); | 131 return (a >> kPointerSizeLog2) - (b >> kPointerSizeLog2); |
| 130 } | 132 } |
| 131 #endif | 133 #endif |
| 132 | 134 |
| 133 | 135 |
| 134 void StoreBuffer::Uniq() { | 136 void StoreBuffer::Uniq() { |
| 135 ASSERT(HashTablesAreZapped()); | |
| 136 // Remove adjacent duplicates and cells that do not point at new space. | 137 // Remove adjacent duplicates and cells that do not point at new space. |
| 137 Address previous = NULL; | 138 Address previous = NULL; |
| 138 Address* write = old_start_; | 139 Address* write = old_start_; |
| 139 ASSERT(may_move_store_buffer_entries_); | 140 ASSERT(may_move_store_buffer_entries_); |
| 140 for (Address* read = old_start_; read < old_top_; read++) { | 141 for (Address* read = old_start_; read < old_top_; read++) { |
| 141 Address current = *read; | 142 Address current = *read; |
| 142 if (current != previous) { | 143 if (current != previous) { |
| 143 if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) { | 144 if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) { |
| 144 *write++ = current; | 145 *write++ = current; |
| 145 } | 146 } |
| (...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 238 containing_chunk = previous_chunk; | 239 containing_chunk = previous_chunk; |
| 239 } else { | 240 } else { |
| 240 containing_chunk = MemoryChunk::FromAnyPointerAddress(addr); | 241 containing_chunk = MemoryChunk::FromAnyPointerAddress(addr); |
| 241 previous_chunk = containing_chunk; | 242 previous_chunk = containing_chunk; |
| 242 } | 243 } |
| 243 if (!containing_chunk->IsFlagSet(flag)) { | 244 if (!containing_chunk->IsFlagSet(flag)) { |
| 244 *new_top++ = addr; | 245 *new_top++ = addr; |
| 245 } | 246 } |
| 246 } | 247 } |
| 247 old_top_ = new_top; | 248 old_top_ = new_top; |
| 249 |
| 250 // Filtering hash sets are inconsistent with the store buffer after this |
| 251 // operation. |
| 252 ClearFilteringHashSets(); |
| 248 } | 253 } |
| 249 | 254 |
| 250 | 255 |
| 251 void StoreBuffer::SortUniq() { | 256 void StoreBuffer::SortUniq() { |
| 252 Compact(); | 257 Compact(); |
| 253 if (old_buffer_is_sorted_) return; | 258 if (old_buffer_is_sorted_) return; |
| 254 ZapHashTables(); | |
| 255 qsort(reinterpret_cast<void*>(old_start_), | 259 qsort(reinterpret_cast<void*>(old_start_), |
| 256 old_top_ - old_start_, | 260 old_top_ - old_start_, |
| 257 sizeof(*old_top_), | 261 sizeof(*old_top_), |
| 258 &CompareAddresses); | 262 &CompareAddresses); |
| 259 Uniq(); | 263 Uniq(); |
| 260 | 264 |
| 261 old_buffer_is_sorted_ = true; | 265 old_buffer_is_sorted_ = true; |
| 266 |
| 267 // Filtering hash sets are inconsistent with the store buffer after this |
| 268 // operation. |
| 269 ClearFilteringHashSets(); |
| 262 } | 270 } |
| 263 | 271 |
| 264 | 272 |
| 265 bool StoreBuffer::PrepareForIteration() { | 273 bool StoreBuffer::PrepareForIteration() { |
| 266 Compact(); | 274 Compact(); |
| 267 PointerChunkIterator it(heap_); | 275 PointerChunkIterator it(heap_); |
| 268 MemoryChunk* chunk; | 276 MemoryChunk* chunk; |
| 269 bool page_has_scan_on_scavenge_flag = false; | 277 bool page_has_scan_on_scavenge_flag = false; |
| 270 while ((chunk = it.next()) != NULL) { | 278 while ((chunk = it.next()) != NULL) { |
| 271 if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true; | 279 if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true; |
| 272 } | 280 } |
| 273 | 281 |
| 274 if (page_has_scan_on_scavenge_flag) { | 282 if (page_has_scan_on_scavenge_flag) { |
| 275 Filter(MemoryChunk::SCAN_ON_SCAVENGE); | 283 Filter(MemoryChunk::SCAN_ON_SCAVENGE); |
| 276 } | 284 } |
| 277 ZapHashTables(); | 285 |
| 286 // Filtering hash sets are inconsistent with the store buffer after |
| 287 // iteration. |
| 288 ClearFilteringHashSets(); |
| 289 |
| 278 return page_has_scan_on_scavenge_flag; | 290 return page_has_scan_on_scavenge_flag; |
| 279 } | 291 } |
| 280 | 292 |
| 281 | 293 |
| 282 #ifdef DEBUG | 294 #ifdef DEBUG |
| 283 void StoreBuffer::Clean() { | 295 void StoreBuffer::Clean() { |
| 284 ZapHashTables(); | 296 ClearFilteringHashSets(); |
| 285 Uniq(); // Also removes things that no longer point to new space. | 297 Uniq(); // Also removes things that no longer point to new space. |
| 286 CheckForFullBuffer(); | 298 CheckForFullBuffer(); |
| 287 } | 299 } |
| 288 | 300 |
| 289 | 301 |
| 290 static bool Zapped(char* start, int size) { | |
| 291 for (int i = 0; i < size; i++) { | |
| 292 if (start[i] != 0) return false; | |
| 293 } | |
| 294 return true; | |
| 295 } | |
| 296 | |
| 297 | |
| 298 bool StoreBuffer::HashTablesAreZapped() { | |
| 299 return Zapped(reinterpret_cast<char*>(hash_map_1_), | |
| 300 sizeof(uintptr_t) * kHashMapLength) && | |
| 301 Zapped(reinterpret_cast<char*>(hash_map_2_), | |
| 302 sizeof(uintptr_t) * kHashMapLength); | |
| 303 } | |
| 304 | |
| 305 | |
| 306 static Address* in_store_buffer_1_element_cache = NULL; | 302 static Address* in_store_buffer_1_element_cache = NULL; |
| 307 | 303 |
| 308 | 304 |
| 309 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) { | 305 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) { |
| 310 if (!FLAG_enable_slow_asserts) return true; | 306 if (!FLAG_enable_slow_asserts) return true; |
| 311 if (in_store_buffer_1_element_cache != NULL && | 307 if (in_store_buffer_1_element_cache != NULL && |
| 312 *in_store_buffer_1_element_cache == cell_address) { | 308 *in_store_buffer_1_element_cache == cell_address) { |
| 313 return true; | 309 return true; |
| 314 } | 310 } |
| 315 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); | 311 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); |
| 316 for (Address* current = top - 1; current >= start_; current--) { | 312 for (Address* current = top - 1; current >= start_; current--) { |
| 317 if (*current == cell_address) { | 313 if (*current == cell_address) { |
| 318 in_store_buffer_1_element_cache = current; | 314 in_store_buffer_1_element_cache = current; |
| 319 return true; | 315 return true; |
| 320 } | 316 } |
| 321 } | 317 } |
| 322 for (Address* current = old_top_ - 1; current >= old_start_; current--) { | 318 for (Address* current = old_top_ - 1; current >= old_start_; current--) { |
| 323 if (*current == cell_address) { | 319 if (*current == cell_address) { |
| 324 in_store_buffer_1_element_cache = current; | 320 in_store_buffer_1_element_cache = current; |
| 325 return true; | 321 return true; |
| 326 } | 322 } |
| 327 } | 323 } |
| 328 return false; | 324 return false; |
| 329 } | 325 } |
| 330 #endif | 326 #endif |
| 331 | 327 |
| 332 | 328 |
| 333 void StoreBuffer::ZapHashTables() { | 329 void StoreBuffer::ClearFilteringHashSets() { |
| 334 memset(reinterpret_cast<void*>(hash_map_1_), | 330 if (!hash_sets_are_empty_) { |
| 335 0, | 331 memset(reinterpret_cast<void*>(hash_set_1_), |
| 336 sizeof(uintptr_t) * kHashMapLength); | 332 0, |
| 337 memset(reinterpret_cast<void*>(hash_map_2_), | 333 sizeof(uintptr_t) * kHashSetLength); |
| 338 0, | 334 memset(reinterpret_cast<void*>(hash_set_2_), |
| 339 sizeof(uintptr_t) * kHashMapLength); | 335 0, |
| 336 sizeof(uintptr_t) * kHashSetLength); |
| 337 hash_sets_are_empty_ = true; |
| 338 } |
| 340 } | 339 } |
| 341 | 340 |
| 342 | 341 |
| 343 void StoreBuffer::GCPrologue() { | 342 void StoreBuffer::GCPrologue() { |
| 344 ZapHashTables(); | 343 ClearFilteringHashSets(); |
| 345 during_gc_ = true; | 344 during_gc_ = true; |
| 346 } | 345 } |
| 347 | 346 |
| 348 | 347 |
| 349 #ifdef DEBUG | 348 #ifdef DEBUG |
| 350 static void DummyScavengePointer(HeapObject** p, HeapObject* o) { | 349 static void DummyScavengePointer(HeapObject** p, HeapObject* o) { |
| 351 // Do nothing. | 350 // Do nothing. |
| 352 } | 351 } |
| 353 | 352 |
| 354 | 353 |
| (...skipping 289 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 644 // There's no check of the limit in the loop below so we check here for | 643 // There's no check of the limit in the loop below so we check here for |
| 645 // the worst case (compaction doesn't eliminate any pointers). | 644 // the worst case (compaction doesn't eliminate any pointers). |
| 646 ASSERT(top <= limit_); | 645 ASSERT(top <= limit_); |
| 647 heap_->public_set_store_buffer_top(start_); | 646 heap_->public_set_store_buffer_top(start_); |
| 648 if (top - start_ > old_limit_ - old_top_) { | 647 if (top - start_ > old_limit_ - old_top_) { |
| 649 HandleFullness(); | 648 HandleFullness(); |
| 650 } | 649 } |
| 651 ASSERT(may_move_store_buffer_entries_); | 650 ASSERT(may_move_store_buffer_entries_); |
| 652 // Goes through the addresses in the store buffer attempting to remove | 651 // Goes through the addresses in the store buffer attempting to remove |
| 653 // duplicates. In the interest of speed this is a lossy operation. Some | 652 // duplicates. In the interest of speed this is a lossy operation. Some |
| 654 // duplicates will remain. We have two hash tables with different hash | 653 // duplicates will remain. We have two hash sets with different hash |
| 655 // functions to reduce the number of unnecessary clashes. | 654 // functions to reduce the number of unnecessary clashes. |
| 655 hash_sets_are_empty_ = false; // Hash sets are in use. |
| 656 for (Address* current = start_; current < top; current++) { | 656 for (Address* current = start_; current < top; current++) { |
| 657 ASSERT(!heap_->cell_space()->Contains(*current)); | 657 ASSERT(!heap_->cell_space()->Contains(*current)); |
| 658 ASSERT(!heap_->code_space()->Contains(*current)); | 658 ASSERT(!heap_->code_space()->Contains(*current)); |
| 659 ASSERT(!heap_->old_data_space()->Contains(*current)); | 659 ASSERT(!heap_->old_data_space()->Contains(*current)); |
| 660 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); | 660 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); |
| 661 // Shift out the last bits including any tags. | 661 // Shift out the last bits including any tags. |
| 662 int_addr >>= kPointerSizeLog2; | 662 int_addr >>= kPointerSizeLog2; |
| 663 int hash1 = | 663 int hash1 = |
| 664 ((int_addr ^ (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1)); | 664 ((int_addr ^ (int_addr >> kHashSetLengthLog2)) & (kHashSetLength - 1)); |
| 665 if (hash_map_1_[hash1] == int_addr) continue; | 665 if (hash_set_1_[hash1] == int_addr) continue; |
| 666 int hash2 = | 666 int hash2 = |
| 667 ((int_addr - (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1)); | 667 ((int_addr - (int_addr >> kHashSetLengthLog2)) & (kHashSetLength - 1)); |
| 668 hash2 ^= hash2 >> (kHashMapLengthLog2 * 2); | 668 hash2 ^= hash2 >> (kHashSetLengthLog2 * 2); |
| 669 if (hash_map_2_[hash2] == int_addr) continue; | 669 if (hash_set_2_[hash2] == int_addr) continue; |
| 670 if (hash_map_1_[hash1] == 0) { | 670 if (hash_set_1_[hash1] == 0) { |
| 671 hash_map_1_[hash1] = int_addr; | 671 hash_set_1_[hash1] = int_addr; |
| 672 } else if (hash_map_2_[hash2] == 0) { | 672 } else if (hash_set_2_[hash2] == 0) { |
| 673 hash_map_2_[hash2] = int_addr; | 673 hash_set_2_[hash2] = int_addr; |
| 674 } else { | 674 } else { |
| 675 // Rather than slowing down we just throw away some entries. This will | 675 // Rather than slowing down we just throw away some entries. This will |
| 676 // cause some duplicates to remain undetected. | 676 // cause some duplicates to remain undetected. |
| 677 hash_map_1_[hash1] = int_addr; | 677 hash_set_1_[hash1] = int_addr; |
| 678 hash_map_2_[hash2] = 0; | 678 hash_set_2_[hash2] = 0; |
| 679 } | 679 } |
| 680 old_buffer_is_sorted_ = false; | 680 old_buffer_is_sorted_ = false; |
| 681 old_buffer_is_filtered_ = false; | 681 old_buffer_is_filtered_ = false; |
| 682 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); | 682 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); |
| 683 ASSERT(old_top_ <= old_limit_); | 683 ASSERT(old_top_ <= old_limit_); |
| 684 } | 684 } |
| 685 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); | 685 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); |
| 686 CheckForFullBuffer(); | 686 CheckForFullBuffer(); |
| 687 } | 687 } |
| 688 | 688 |
| 689 | 689 |
| 690 void StoreBuffer::CheckForFullBuffer() { | 690 void StoreBuffer::CheckForFullBuffer() { |
| 691 if (old_limit_ - old_top_ < kStoreBufferSize * 2) { | 691 if (old_limit_ - old_top_ < kStoreBufferSize * 2) { |
| 692 HandleFullness(); | 692 HandleFullness(); |
| 693 } | 693 } |
| 694 } | 694 } |
| 695 | 695 |
| 696 } } // namespace v8::internal | 696 } } // namespace v8::internal |
| OLD | NEW |