Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(266)

Side by Side Diff: src/store-buffer.cc

Issue 9113010: Merge r10334 from bleeding_edge to trunk. (Closed) Base URL: http://v8.googlecode.com/svn/trunk/
Patch Set: Created 8 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/store-buffer.h ('k') | src/version.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
42 old_limit_(NULL), 42 old_limit_(NULL),
43 old_top_(NULL), 43 old_top_(NULL),
44 old_reserved_limit_(NULL), 44 old_reserved_limit_(NULL),
45 old_buffer_is_sorted_(false), 45 old_buffer_is_sorted_(false),
46 old_buffer_is_filtered_(false), 46 old_buffer_is_filtered_(false),
47 during_gc_(false), 47 during_gc_(false),
48 store_buffer_rebuilding_enabled_(false), 48 store_buffer_rebuilding_enabled_(false),
49 callback_(NULL), 49 callback_(NULL),
50 may_move_store_buffer_entries_(true), 50 may_move_store_buffer_entries_(true),
51 virtual_memory_(NULL), 51 virtual_memory_(NULL),
52 hash_map_1_(NULL), 52 hash_set_1_(NULL),
53 hash_map_2_(NULL) { 53 hash_set_2_(NULL),
54 hash_sets_are_empty_(true) {
54 } 55 }
55 56
56 57
57 void StoreBuffer::Setup() { 58 void StoreBuffer::Setup() {
58 virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3); 59 virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3);
59 uintptr_t start_as_int = 60 uintptr_t start_as_int =
60 reinterpret_cast<uintptr_t>(virtual_memory_->address()); 61 reinterpret_cast<uintptr_t>(virtual_memory_->address());
61 start_ = 62 start_ =
62 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2)); 63 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
63 limit_ = start_ + (kStoreBufferSize / kPointerSize); 64 limit_ = start_ + (kStoreBufferSize / kPointerSize);
(...skipping 26 matching lines...) Expand all
90 USE(vm_limit); 91 USE(vm_limit);
91 ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0); 92 ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
92 ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) == 93 ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
93 0); 94 0);
94 95
95 CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_), 96 CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
96 kStoreBufferSize, 97 kStoreBufferSize,
97 false)); // Not executable. 98 false)); // Not executable.
98 heap_->public_set_store_buffer_top(start_); 99 heap_->public_set_store_buffer_top(start_);
99 100
100 hash_map_1_ = new uintptr_t[kHashMapLength]; 101 hash_set_1_ = new uintptr_t[kHashSetLength];
101 hash_map_2_ = new uintptr_t[kHashMapLength]; 102 hash_set_2_ = new uintptr_t[kHashSetLength];
103 hash_sets_are_empty_ = false;
102 104
103 ZapHashTables(); 105 ClearFilteringHashSets();
104 } 106 }
105 107
106 108
107 void StoreBuffer::TearDown() { 109 void StoreBuffer::TearDown() {
108 delete virtual_memory_; 110 delete virtual_memory_;
109 delete old_virtual_memory_; 111 delete old_virtual_memory_;
110 delete[] hash_map_1_; 112 delete[] hash_set_1_;
111 delete[] hash_map_2_; 113 delete[] hash_set_2_;
112 old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL; 114 old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
113 start_ = limit_ = NULL; 115 start_ = limit_ = NULL;
114 heap_->public_set_store_buffer_top(start_); 116 heap_->public_set_store_buffer_top(start_);
115 } 117 }
116 118
117 119
118 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) { 120 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
119 isolate->heap()->store_buffer()->Compact(); 121 isolate->heap()->store_buffer()->Compact();
120 } 122 }
121 123
(...skipping 19 matching lines...) Expand all
141 intptr_t b = 143 intptr_t b =
142 reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b)); 144 reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b));
143 ASSERT(sizeof(1) == sizeof(a)); 145 ASSERT(sizeof(1) == sizeof(a));
144 // Shift down to avoid wraparound. 146 // Shift down to avoid wraparound.
145 return (a >> kPointerSizeLog2) - (b >> kPointerSizeLog2); 147 return (a >> kPointerSizeLog2) - (b >> kPointerSizeLog2);
146 } 148 }
147 #endif 149 #endif
148 150
149 151
150 void StoreBuffer::Uniq() { 152 void StoreBuffer::Uniq() {
151 ASSERT(HashTablesAreZapped());
152 // Remove adjacent duplicates and cells that do not point at new space. 153 // Remove adjacent duplicates and cells that do not point at new space.
153 Address previous = NULL; 154 Address previous = NULL;
154 Address* write = old_start_; 155 Address* write = old_start_;
155 ASSERT(may_move_store_buffer_entries_); 156 ASSERT(may_move_store_buffer_entries_);
156 for (Address* read = old_start_; read < old_top_; read++) { 157 for (Address* read = old_start_; read < old_top_; read++) {
157 Address current = *read; 158 Address current = *read;
158 if (current != previous) { 159 if (current != previous) {
159 if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) { 160 if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
160 *write++ = current; 161 *write++ = current;
161 } 162 }
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after
265 containing_chunk = previous_chunk; 266 containing_chunk = previous_chunk;
266 } else { 267 } else {
267 containing_chunk = MemoryChunk::FromAnyPointerAddress(addr); 268 containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
268 previous_chunk = containing_chunk; 269 previous_chunk = containing_chunk;
269 } 270 }
270 if (!containing_chunk->IsFlagSet(flag)) { 271 if (!containing_chunk->IsFlagSet(flag)) {
271 *new_top++ = addr; 272 *new_top++ = addr;
272 } 273 }
273 } 274 }
274 old_top_ = new_top; 275 old_top_ = new_top;
276
277 // Filtering hash sets are inconsistent with the store buffer after this
278 // operation.
279 ClearFilteringHashSets();
275 } 280 }
276 281
277 282
278 void StoreBuffer::SortUniq() { 283 void StoreBuffer::SortUniq() {
279 Compact(); 284 Compact();
280 if (old_buffer_is_sorted_) return; 285 if (old_buffer_is_sorted_) return;
281 ZapHashTables();
282 qsort(reinterpret_cast<void*>(old_start_), 286 qsort(reinterpret_cast<void*>(old_start_),
283 old_top_ - old_start_, 287 old_top_ - old_start_,
284 sizeof(*old_top_), 288 sizeof(*old_top_),
285 &CompareAddresses); 289 &CompareAddresses);
286 Uniq(); 290 Uniq();
287 291
288 old_buffer_is_sorted_ = true; 292 old_buffer_is_sorted_ = true;
293
294 // Filtering hash sets are inconsistent with the store buffer after this
295 // operation.
296 ClearFilteringHashSets();
289 } 297 }
290 298
291 299
292 bool StoreBuffer::PrepareForIteration() { 300 bool StoreBuffer::PrepareForIteration() {
293 Compact(); 301 Compact();
294 PointerChunkIterator it(heap_); 302 PointerChunkIterator it(heap_);
295 MemoryChunk* chunk; 303 MemoryChunk* chunk;
296 bool page_has_scan_on_scavenge_flag = false; 304 bool page_has_scan_on_scavenge_flag = false;
297 while ((chunk = it.next()) != NULL) { 305 while ((chunk = it.next()) != NULL) {
298 if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true; 306 if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
299 } 307 }
300 308
301 if (page_has_scan_on_scavenge_flag) { 309 if (page_has_scan_on_scavenge_flag) {
302 Filter(MemoryChunk::SCAN_ON_SCAVENGE); 310 Filter(MemoryChunk::SCAN_ON_SCAVENGE);
303 } 311 }
304 ZapHashTables(); 312
313 // Filtering hash sets are inconsistent with the store buffer after
314 // iteration.
315 ClearFilteringHashSets();
316
305 return page_has_scan_on_scavenge_flag; 317 return page_has_scan_on_scavenge_flag;
306 } 318 }
307 319
308 320
309 #ifdef DEBUG 321 #ifdef DEBUG
310 void StoreBuffer::Clean() { 322 void StoreBuffer::Clean() {
311 ZapHashTables(); 323 ClearFilteringHashSets();
312 Uniq(); // Also removes things that no longer point to new space. 324 Uniq(); // Also removes things that no longer point to new space.
313 CheckForFullBuffer(); 325 CheckForFullBuffer();
314 } 326 }
315 327
316 328
317 static bool Zapped(char* start, int size) {
318 for (int i = 0; i < size; i++) {
319 if (start[i] != 0) return false;
320 }
321 return true;
322 }
323
324
325 bool StoreBuffer::HashTablesAreZapped() {
326 return Zapped(reinterpret_cast<char*>(hash_map_1_),
327 sizeof(uintptr_t) * kHashMapLength) &&
328 Zapped(reinterpret_cast<char*>(hash_map_2_),
329 sizeof(uintptr_t) * kHashMapLength);
330 }
331
332
333 static Address* in_store_buffer_1_element_cache = NULL; 329 static Address* in_store_buffer_1_element_cache = NULL;
334 330
335 331
336 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) { 332 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
337 if (!FLAG_enable_slow_asserts) return true; 333 if (!FLAG_enable_slow_asserts) return true;
338 if (in_store_buffer_1_element_cache != NULL && 334 if (in_store_buffer_1_element_cache != NULL &&
339 *in_store_buffer_1_element_cache == cell_address) { 335 *in_store_buffer_1_element_cache == cell_address) {
340 return true; 336 return true;
341 } 337 }
342 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); 338 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
343 for (Address* current = top - 1; current >= start_; current--) { 339 for (Address* current = top - 1; current >= start_; current--) {
344 if (*current == cell_address) { 340 if (*current == cell_address) {
345 in_store_buffer_1_element_cache = current; 341 in_store_buffer_1_element_cache = current;
346 return true; 342 return true;
347 } 343 }
348 } 344 }
349 for (Address* current = old_top_ - 1; current >= old_start_; current--) { 345 for (Address* current = old_top_ - 1; current >= old_start_; current--) {
350 if (*current == cell_address) { 346 if (*current == cell_address) {
351 in_store_buffer_1_element_cache = current; 347 in_store_buffer_1_element_cache = current;
352 return true; 348 return true;
353 } 349 }
354 } 350 }
355 return false; 351 return false;
356 } 352 }
357 #endif 353 #endif
358 354
359 355
360 void StoreBuffer::ZapHashTables() { 356 void StoreBuffer::ClearFilteringHashSets() {
361 memset(reinterpret_cast<void*>(hash_map_1_), 357 if (!hash_sets_are_empty_) {
362 0, 358 memset(reinterpret_cast<void*>(hash_set_1_),
363 sizeof(uintptr_t) * kHashMapLength); 359 0,
364 memset(reinterpret_cast<void*>(hash_map_2_), 360 sizeof(uintptr_t) * kHashSetLength);
365 0, 361 memset(reinterpret_cast<void*>(hash_set_2_),
366 sizeof(uintptr_t) * kHashMapLength); 362 0,
363 sizeof(uintptr_t) * kHashSetLength);
364 hash_sets_are_empty_ = true;
365 }
367 } 366 }
368 367
369 368
370 void StoreBuffer::GCPrologue() { 369 void StoreBuffer::GCPrologue() {
371 ZapHashTables(); 370 ClearFilteringHashSets();
372 during_gc_ = true; 371 during_gc_ = true;
373 } 372 }
374 373
375 374
376 #ifdef DEBUG 375 #ifdef DEBUG
377 static void DummyScavengePointer(HeapObject** p, HeapObject* o) { 376 static void DummyScavengePointer(HeapObject** p, HeapObject* o) {
378 // Do nothing. 377 // Do nothing.
379 } 378 }
380 379
381 380
(...skipping 287 matching lines...) Expand 10 before | Expand all | Expand 10 after
669 if (top == start_) return; 668 if (top == start_) return;
670 669
671 // There's no check of the limit in the loop below so we check here for 670 // There's no check of the limit in the loop below so we check here for
672 // the worst case (compaction doesn't eliminate any pointers). 671 // the worst case (compaction doesn't eliminate any pointers).
673 ASSERT(top <= limit_); 672 ASSERT(top <= limit_);
674 heap_->public_set_store_buffer_top(start_); 673 heap_->public_set_store_buffer_top(start_);
675 EnsureSpace(top - start_); 674 EnsureSpace(top - start_);
676 ASSERT(may_move_store_buffer_entries_); 675 ASSERT(may_move_store_buffer_entries_);
677 // Goes through the addresses in the store buffer attempting to remove 676 // Goes through the addresses in the store buffer attempting to remove
678 // duplicates. In the interest of speed this is a lossy operation. Some 677 // duplicates. In the interest of speed this is a lossy operation. Some
679 // duplicates will remain. We have two hash tables with different hash 678 // duplicates will remain. We have two hash sets with different hash
680 // functions to reduce the number of unnecessary clashes. 679 // functions to reduce the number of unnecessary clashes.
680 hash_sets_are_empty_ = false; // Hash sets are in use.
681 for (Address* current = start_; current < top; current++) { 681 for (Address* current = start_; current < top; current++) {
682 ASSERT(!heap_->cell_space()->Contains(*current)); 682 ASSERT(!heap_->cell_space()->Contains(*current));
683 ASSERT(!heap_->code_space()->Contains(*current)); 683 ASSERT(!heap_->code_space()->Contains(*current));
684 ASSERT(!heap_->old_data_space()->Contains(*current)); 684 ASSERT(!heap_->old_data_space()->Contains(*current));
685 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); 685 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
686 // Shift out the last bits including any tags. 686 // Shift out the last bits including any tags.
687 int_addr >>= kPointerSizeLog2; 687 int_addr >>= kPointerSizeLog2;
688 int hash1 = 688 int hash1 =
689 ((int_addr ^ (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1)); 689 ((int_addr ^ (int_addr >> kHashSetLengthLog2)) & (kHashSetLength - 1));
690 if (hash_map_1_[hash1] == int_addr) continue; 690 if (hash_set_1_[hash1] == int_addr) continue;
691 int hash2 = 691 int hash2 =
692 ((int_addr - (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1)); 692 ((int_addr - (int_addr >> kHashSetLengthLog2)) & (kHashSetLength - 1));
693 hash2 ^= hash2 >> (kHashMapLengthLog2 * 2); 693 hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
694 if (hash_map_2_[hash2] == int_addr) continue; 694 if (hash_set_2_[hash2] == int_addr) continue;
695 if (hash_map_1_[hash1] == 0) { 695 if (hash_set_1_[hash1] == 0) {
696 hash_map_1_[hash1] = int_addr; 696 hash_set_1_[hash1] = int_addr;
697 } else if (hash_map_2_[hash2] == 0) { 697 } else if (hash_set_2_[hash2] == 0) {
698 hash_map_2_[hash2] = int_addr; 698 hash_set_2_[hash2] = int_addr;
699 } else { 699 } else {
700 // Rather than slowing down we just throw away some entries. This will 700 // Rather than slowing down we just throw away some entries. This will
701 // cause some duplicates to remain undetected. 701 // cause some duplicates to remain undetected.
702 hash_map_1_[hash1] = int_addr; 702 hash_set_1_[hash1] = int_addr;
703 hash_map_2_[hash2] = 0; 703 hash_set_2_[hash2] = 0;
704 } 704 }
705 old_buffer_is_sorted_ = false; 705 old_buffer_is_sorted_ = false;
706 old_buffer_is_filtered_ = false; 706 old_buffer_is_filtered_ = false;
707 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); 707 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
708 ASSERT(old_top_ <= old_limit_); 708 ASSERT(old_top_ <= old_limit_);
709 } 709 }
710 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); 710 heap_->isolate()->counters()->store_buffer_compactions()->Increment();
711 CheckForFullBuffer(); 711 CheckForFullBuffer();
712 } 712 }
713 713
714 714
715 void StoreBuffer::CheckForFullBuffer() { 715 void StoreBuffer::CheckForFullBuffer() {
716 EnsureSpace(kStoreBufferSize * 2); 716 EnsureSpace(kStoreBufferSize * 2);
717 } 717 }
718 718
719 } } // namespace v8::internal 719 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/store-buffer.h ('k') | src/version.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698