Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(579)

Side by Side Diff: src/store-buffer.cc

Issue 8776032: Let store buffer start out small for a 1Mbyte saving in boot (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 9 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/store-buffer.h ('k') | test/cctest/test-mark-compact.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 23 matching lines...) Expand all
34 namespace v8 { 34 namespace v8 {
35 namespace internal { 35 namespace internal {
36 36
37 StoreBuffer::StoreBuffer(Heap* heap) 37 StoreBuffer::StoreBuffer(Heap* heap)
38 : heap_(heap), 38 : heap_(heap),
39 start_(NULL), 39 start_(NULL),
40 limit_(NULL), 40 limit_(NULL),
41 old_start_(NULL), 41 old_start_(NULL),
42 old_limit_(NULL), 42 old_limit_(NULL),
43 old_top_(NULL), 43 old_top_(NULL),
44 old_reserved_limit_(NULL),
44 old_buffer_is_sorted_(false), 45 old_buffer_is_sorted_(false),
45 old_buffer_is_filtered_(false), 46 old_buffer_is_filtered_(false),
46 during_gc_(false), 47 during_gc_(false),
47 store_buffer_rebuilding_enabled_(false), 48 store_buffer_rebuilding_enabled_(false),
48 callback_(NULL), 49 callback_(NULL),
49 may_move_store_buffer_entries_(true), 50 may_move_store_buffer_entries_(true),
50 virtual_memory_(NULL), 51 virtual_memory_(NULL),
51 hash_map_1_(NULL), 52 hash_map_1_(NULL),
52 hash_map_2_(NULL) { 53 hash_map_2_(NULL) {
53 } 54 }
54 55
55 56
56 void StoreBuffer::Setup() { 57 void StoreBuffer::Setup() {
57 virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3); 58 virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3);
58 uintptr_t start_as_int = 59 uintptr_t start_as_int =
59 reinterpret_cast<uintptr_t>(virtual_memory_->address()); 60 reinterpret_cast<uintptr_t>(virtual_memory_->address());
60 start_ = 61 start_ =
61 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2)); 62 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
62 limit_ = start_ + (kStoreBufferSize / sizeof(*start_)); 63 limit_ = start_ + (kStoreBufferSize / kPointerSize);
63 64
64 old_top_ = old_start_ = new Address[kOldStoreBufferLength]; 65 old_virtual_memory_ =
65 old_limit_ = old_start_ + kOldStoreBufferLength; 66 new VirtualMemory(kOldStoreBufferLength * kPointerSize);
67 old_top_ = old_start_ =
68 reinterpret_cast<Address*>(old_virtual_memory_->address());
69 // Don't know the alignment requirements of the OS, but it is certainly not
70 // less than 0xfff.
71 ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
72 int initial_length = kInitialOldStoreBufferLength;
73 if (initial_length == 0) initial_length = 1;
Michael Starzinger 2011/12/02 13:38:52 Can we change this into ASSERT(initial_length != 0
Erik Corry 2011/12/02 14:07:27 Refactored in a slightly different way.
74 while (initial_length * kPointerSize < OS::CommitPageSize()) {
75 initial_length *= 2;
76 }
77 old_limit_ = old_start_ + initial_length;
78 old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
79
80 CHECK(old_virtual_memory_->Commit(
81 reinterpret_cast<void*>(old_start_),
82 (old_limit_ - old_start_) * kPointerSize,
83 false));
66 84
67 ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address()); 85 ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
68 ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address()); 86 ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
69 Address* vm_limit = reinterpret_cast<Address*>( 87 Address* vm_limit = reinterpret_cast<Address*>(
70 reinterpret_cast<char*>(virtual_memory_->address()) + 88 reinterpret_cast<char*>(virtual_memory_->address()) +
71 virtual_memory_->size()); 89 virtual_memory_->size());
72 ASSERT(start_ <= vm_limit); 90 ASSERT(start_ <= vm_limit);
73 ASSERT(limit_ <= vm_limit); 91 ASSERT(limit_ <= vm_limit);
74 USE(vm_limit); 92 USE(vm_limit);
75 ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0); 93 ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
76 ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) == 94 ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
77 0); 95 0);
78 96
79 virtual_memory_->Commit(reinterpret_cast<Address>(start_), 97 CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
80 kStoreBufferSize, 98 kStoreBufferSize,
81 false); // Not executable. 99 false)); // Not executable.
82 heap_->public_set_store_buffer_top(start_); 100 heap_->public_set_store_buffer_top(start_);
83 101
84 hash_map_1_ = new uintptr_t[kHashMapLength]; 102 hash_map_1_ = new uintptr_t[kHashMapLength];
85 hash_map_2_ = new uintptr_t[kHashMapLength]; 103 hash_map_2_ = new uintptr_t[kHashMapLength];
86 104
87 ZapHashTables(); 105 ZapHashTables();
88 } 106 }
89 107
90 108
91 void StoreBuffer::TearDown() { 109 void StoreBuffer::TearDown() {
92 delete virtual_memory_; 110 delete virtual_memory_;
111 delete old_virtual_memory_;
93 delete[] hash_map_1_; 112 delete[] hash_map_1_;
94 delete[] hash_map_2_; 113 delete[] hash_map_2_;
95 delete[] old_start_; 114 old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
96 old_start_ = old_top_ = old_limit_ = NULL;
97 start_ = limit_ = NULL; 115 start_ = limit_ = NULL;
98 heap_->public_set_store_buffer_top(start_); 116 heap_->public_set_store_buffer_top(start_);
99 } 117 }
100 118
101 119
102 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) { 120 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
103 isolate->heap()->store_buffer()->Compact(); 121 isolate->heap()->store_buffer()->Compact();
104 } 122 }
105 123
106 124
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
143 if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) { 161 if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
144 *write++ = current; 162 *write++ = current;
145 } 163 }
146 } 164 }
147 previous = current; 165 previous = current;
148 } 166 }
149 old_top_ = write; 167 old_top_ = write;
150 } 168 }
151 169
152 170
153 void StoreBuffer::HandleFullness() { 171 void StoreBuffer::EnsureSpace(intptr_t space_needed) {
172 while (old_limit_ - old_top_ < space_needed &&
173 old_limit_ < old_reserved_limit_) {
174 size_t grow = old_limit_ - old_start_; // Double size.
175 CHECK(old_virtual_memory_->Commit(
176 reinterpret_cast<void*>(old_limit_), grow * kPointerSize, false));
Michael Starzinger 2011/12/02 13:38:52 Can we put each argument on a separate line like w
Erik Corry 2011/12/02 14:07:27 Done.
177 old_limit_ += grow;
178 }
179
180 if (old_limit_ - old_top_ >= space_needed) return;
181
154 if (old_buffer_is_filtered_) return; 182 if (old_buffer_is_filtered_) return;
155 ASSERT(may_move_store_buffer_entries_); 183 ASSERT(may_move_store_buffer_entries_);
156 Compact(); 184 Compact();
157 185
158 old_buffer_is_filtered_ = true; 186 old_buffer_is_filtered_ = true;
159 bool page_has_scan_on_scavenge_flag = false; 187 bool page_has_scan_on_scavenge_flag = false;
160 188
161 PointerChunkIterator it(heap_); 189 PointerChunkIterator it(heap_);
162 MemoryChunk* chunk; 190 MemoryChunk* chunk;
163 while ((chunk = it.next()) != NULL) { 191 while ((chunk = it.next()) != NULL) {
(...skipping 474 matching lines...) Expand 10 before | Expand all | Expand 10 after
638 666
639 void StoreBuffer::Compact() { 667 void StoreBuffer::Compact() {
640 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); 668 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
641 669
642 if (top == start_) return; 670 if (top == start_) return;
643 671
644 // There's no check of the limit in the loop below so we check here for 672 // There's no check of the limit in the loop below so we check here for
645 // the worst case (compaction doesn't eliminate any pointers). 673 // the worst case (compaction doesn't eliminate any pointers).
646 ASSERT(top <= limit_); 674 ASSERT(top <= limit_);
647 heap_->public_set_store_buffer_top(start_); 675 heap_->public_set_store_buffer_top(start_);
648 if (top - start_ > old_limit_ - old_top_) { 676 EnsureSpace(top - start_);
649 HandleFullness();
650 }
651 ASSERT(may_move_store_buffer_entries_); 677 ASSERT(may_move_store_buffer_entries_);
652 // Goes through the addresses in the store buffer attempting to remove 678 // Goes through the addresses in the store buffer attempting to remove
653 // duplicates. In the interest of speed this is a lossy operation. Some 679 // duplicates. In the interest of speed this is a lossy operation. Some
654 // duplicates will remain. We have two hash tables with different hash 680 // duplicates will remain. We have two hash tables with different hash
655 // functions to reduce the number of unnecessary clashes. 681 // functions to reduce the number of unnecessary clashes.
656 for (Address* current = start_; current < top; current++) { 682 for (Address* current = start_; current < top; current++) {
657 ASSERT(!heap_->cell_space()->Contains(*current)); 683 ASSERT(!heap_->cell_space()->Contains(*current));
658 ASSERT(!heap_->code_space()->Contains(*current)); 684 ASSERT(!heap_->code_space()->Contains(*current));
659 ASSERT(!heap_->old_data_space()->Contains(*current)); 685 ASSERT(!heap_->old_data_space()->Contains(*current));
660 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); 686 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
(...skipping 20 matching lines...) Expand all
681 old_buffer_is_filtered_ = false; 707 old_buffer_is_filtered_ = false;
682 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); 708 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
683 ASSERT(old_top_ <= old_limit_); 709 ASSERT(old_top_ <= old_limit_);
684 } 710 }
685 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); 711 heap_->isolate()->counters()->store_buffer_compactions()->Increment();
686 CheckForFullBuffer(); 712 CheckForFullBuffer();
687 } 713 }
688 714
689 715
690 void StoreBuffer::CheckForFullBuffer() { 716 void StoreBuffer::CheckForFullBuffer() {
691 if (old_limit_ - old_top_ < kStoreBufferSize * 2) { 717 EnsureSpace(kStoreBufferSize * 2);
692 HandleFullness();
693 }
694 } 718 }
695 719
696 } } // namespace v8::internal 720 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/store-buffer.h ('k') | test/cctest/test-mark-compact.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698