Chromium Code Reviews

Side by Side Diff: src/store-buffer.cc

Issue 6250076: Start using store buffers. Handle store buffer overflow situation.... (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/gc/
Patch Set: Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments.
Jump to:
View unified diff | | Annotate | Revision Log
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 22 matching lines...)
33 namespace internal { 33 namespace internal {
34 34
35 Address* StoreBuffer::start_ = NULL; 35 Address* StoreBuffer::start_ = NULL;
36 Address* StoreBuffer::limit_ = NULL; 36 Address* StoreBuffer::limit_ = NULL;
37 Address* StoreBuffer::old_start_ = NULL; 37 Address* StoreBuffer::old_start_ = NULL;
38 Address* StoreBuffer::old_limit_ = NULL; 38 Address* StoreBuffer::old_limit_ = NULL;
39 Address* StoreBuffer::old_top_ = NULL; 39 Address* StoreBuffer::old_top_ = NULL;
40 uintptr_t* StoreBuffer::hash_map_1_ = NULL; 40 uintptr_t* StoreBuffer::hash_map_1_ = NULL;
41 uintptr_t* StoreBuffer::hash_map_2_ = NULL; 41 uintptr_t* StoreBuffer::hash_map_2_ = NULL;
42 VirtualMemory* StoreBuffer::virtual_memory_ = NULL; 42 VirtualMemory* StoreBuffer::virtual_memory_ = NULL;
43 bool StoreBuffer::must_scan_entire_memory_ = false; 43 StoreBuffer::StoreBufferMode StoreBuffer::store_buffer_mode_ =
44 kStoreBufferFunctional;
44 bool StoreBuffer::old_buffer_is_sorted_ = false; 45 bool StoreBuffer::old_buffer_is_sorted_ = false;
45 bool StoreBuffer::during_gc_ = false; 46 bool StoreBuffer::during_gc_ = false;
47 bool StoreBuffer::store_buffer_rebuilding_enabled_ = false;
48 bool StoreBuffer::may_move_store_buffer_entries_ = true;
46 49
47 void StoreBuffer::Setup() { 50 void StoreBuffer::Setup() {
48 virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3); 51 virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3);
49 uintptr_t start_as_int = 52 uintptr_t start_as_int =
50 reinterpret_cast<uintptr_t>(virtual_memory_->address()); 53 reinterpret_cast<uintptr_t>(virtual_memory_->address());
51 start_ = 54 start_ =
52 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2)); 55 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
53 limit_ = start_ + (kStoreBufferSize / sizeof(*start_)); 56 limit_ = start_ + (kStoreBufferSize / sizeof(*start_));
54 57
55 old_top_ = old_start_ = new Address[kOldStoreBufferLength]; 58 old_top_ = old_start_ = new Address[kOldStoreBufferLength];
(...skipping 63 matching lines...)
119 return (a >> kPointerSizeLog2) - (b >> kPointerSizeLog2); 122 return (a >> kPointerSizeLog2) - (b >> kPointerSizeLog2);
120 } 123 }
121 #endif 124 #endif
122 125
123 126
124 void StoreBuffer::Uniq() { 127 void StoreBuffer::Uniq() {
125 ASSERT(HashTablesAreZapped()); 128 ASSERT(HashTablesAreZapped());
126 // Remove adjacent duplicates and cells that do not point at new space. 129 // Remove adjacent duplicates and cells that do not point at new space.
127 Address previous = NULL; 130 Address previous = NULL;
128 Address* write = old_start_; 131 Address* write = old_start_;
132 ASSERT(may_move_store_buffer_entries_);
129 for (Address* read = old_start_; read < old_top_; read++) { 133 for (Address* read = old_start_; read < old_top_; read++) {
130 Address current = *read; 134 Address current = *read;
131 if (current != previous) { 135 if (current != previous) {
132 if (Heap::InNewSpace(*reinterpret_cast<Address*>(current))) { 136 if (Heap::InNewSpace(*reinterpret_cast<Object**>(current))) {
133 *write++ = current; 137 *write++ = current;
134 } 138 }
135 } 139 }
136 previous = current; 140 previous = current;
137 } 141 }
138 old_top_ = write; 142 old_top_ = write;
139 } 143 }
140 144
141 145
142 void StoreBuffer::SortUniq() { 146 void StoreBuffer::SortUniq() {
143 Compact(); 147 Compact();
144 if (old_buffer_is_sorted_) return; 148 if (old_buffer_is_sorted_) return;
149 if (store_buffer_mode_ == kStoreBufferDisabled) {
150 old_top_ = old_start_;
151 return;
152 }
145 ZapHashTables(); 153 ZapHashTables();
146 qsort(reinterpret_cast<void*>(old_start_), 154 qsort(reinterpret_cast<void*>(old_start_),
147 old_top_ - old_start_, 155 old_top_ - old_start_,
148 sizeof(*old_top_), 156 sizeof(*old_top_),
149 &CompareAddresses); 157 &CompareAddresses);
150 Uniq(); 158 Uniq();
151 159
152 old_buffer_is_sorted_ = true; 160 old_buffer_is_sorted_ = true;
153 } 161 }
154 162
155 163
156 #ifdef DEBUG 164 #ifdef DEBUG
157 void StoreBuffer::Clean() { 165 void StoreBuffer::Clean() {
158 if (must_scan_entire_memory_) { 166 if (store_buffer_mode_ == kStoreBufferDisabled) {
159 // We don't currently have a way to go back to using the store buffer.
160 // TODO(gc): We should rebuild the store buffer during GC.
161 old_top_ = old_start_; // Just clear the cache. 167 old_top_ = old_start_; // Just clear the cache.
162 return; 168 return;
163 } 169 }
164 ZapHashTables(); 170 ZapHashTables();
165 Uniq(); // Also removes things that no longer point to new space. 171 Uniq(); // Also removes things that no longer point to new space.
166 CheckForFullBuffer(); 172 CheckForFullBuffer();
167 } 173 }
168 174
169 175
170 static bool Zapped(char* start, int size) { 176 static bool Zapped(char* start, int size) {
(...skipping 21 matching lines...)
192 sizeof(uintptr_t) * kHashMapLength); 198 sizeof(uintptr_t) * kHashMapLength);
193 memset(reinterpret_cast<void*>(hash_map_2_), 199 memset(reinterpret_cast<void*>(hash_map_2_),
194 0, 200 0,
195 sizeof(uintptr_t) * kHashMapLength); 201 sizeof(uintptr_t) * kHashMapLength);
196 } 202 }
197 203
198 204
199 void StoreBuffer::GCPrologue(GCType type, GCCallbackFlags flags) { 205 void StoreBuffer::GCPrologue(GCType type, GCCallbackFlags flags) {
200 ZapHashTables(); 206 ZapHashTables();
201 during_gc_ = true; 207 during_gc_ = true;
202 if (type != kGCTypeScavenge) {
203 old_top_ = old_start_;
204 Heap::public_set_store_buffer_top(start_);
205 }
206 } 208 }
207 209
208 210
209 void StoreBuffer::Verify() { 211 void StoreBuffer::Verify() {
210 #ifdef DEBUG 212 #ifdef DEBUG
211 if (FLAG_verify_heap && !StoreBuffer::must_scan_entire_memory()) { 213 if (FLAG_verify_heap &&
214 StoreBuffer::store_buffer_mode_ == kStoreBufferFunctional) {
212 Heap::OldPointerSpaceCheckStoreBuffer(Heap::WATERMARK_SHOULD_BE_VALID); 215 Heap::OldPointerSpaceCheckStoreBuffer(Heap::WATERMARK_SHOULD_BE_VALID);
213 Heap::MapSpaceCheckStoreBuffer(Heap::WATERMARK_SHOULD_BE_VALID); 216 Heap::MapSpaceCheckStoreBuffer(Heap::WATERMARK_SHOULD_BE_VALID);
214 Heap::LargeObjectSpaceCheckStoreBuffer(); 217 Heap::LargeObjectSpaceCheckStoreBuffer();
215 } 218 }
216 #endif 219 #endif
217 } 220 }
218 221
219 222
220 void StoreBuffer::GCEpilogue(GCType type, GCCallbackFlags flags) { 223 void StoreBuffer::GCEpilogue(GCType type, GCCallbackFlags flags) {
221 during_gc_ = false; 224 during_gc_ = false;
225 if (store_buffer_mode_ == kStoreBufferBeingRebuilt) {
226 store_buffer_mode_ = kStoreBufferFunctional;
227 }
222 Verify(); 228 Verify();
223 } 229 }
224 230
225 231
232 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback callback) {
233 if (store_buffer_mode_ != kStoreBufferFunctional) {
234 old_top_ = old_start_;
235 ZapHashTables();
236 Heap::public_set_store_buffer_top(start_);
237 store_buffer_mode_ = kStoreBufferBeingRebuilt;
238 Heap::IteratePointers(Heap::old_pointer_space(),
239 &Heap::IteratePointersToNewSpace,
240 callback,
241 Heap::WATERMARK_SHOULD_BE_VALID);
242
243 Heap::IteratePointers(Heap::map_space(),
244 &Heap::IteratePointersFromMapsToNewSpace,
245 callback,
246 Heap::WATERMARK_SHOULD_BE_VALID);
247
248 Heap::lo_space()->IteratePointersToNewSpace(callback);
249 } else {
250 SortUniq();
251 Address* limit = old_top_;
252 old_top_ = old_start_;
253 {
254 DontMoveStoreBufferEntriesScope scope;
255 for (Address* current = old_start_; current < limit; current++) {
256 #ifdef DEBUG
257 Address* saved_top = old_top_;
258 #endif
259 Object** cell = reinterpret_cast<Object**>(*current);
260 Object* object = *cell;
261 // May be invalid if object is not in new space.
262 HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
263 if (Heap::InNewSpace(object)) {
264 callback(reinterpret_cast<HeapObject**>(cell), heap_object);
265 }
266 ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top);
267 ASSERT((old_top_ == saved_top + 1) ==
268 (Heap::InNewSpace(*cell) &&
269 !Heap::InNewSpace(reinterpret_cast<Address>(cell)) &&
270 Memory::Address_at(heap_object->address()) != NULL));
271 }
272 }
273 }
274 }
275
276
226 void StoreBuffer::Compact() { 277 void StoreBuffer::Compact() {
227 Address* top = reinterpret_cast<Address*>(Heap::store_buffer_top()); 278 Address* top = reinterpret_cast<Address*>(Heap::store_buffer_top());
279
228 if (top == start_) return; 280 if (top == start_) return;
281
282 // There's no check of the limit in the loop below so we check here for
283 // the worst case (compaction doesn't eliminate any pointers).
229 ASSERT(top <= limit_); 284 ASSERT(top <= limit_);
230 Heap::public_set_store_buffer_top(start_); 285 Heap::public_set_store_buffer_top(start_);
231 if (must_scan_entire_memory_) return; 286 if (top - start_ > old_limit_ - old_top_) {
287 CheckForFullBuffer();
288 }
289 if (store_buffer_mode_ == kStoreBufferDisabled) return;
290 ASSERT(may_move_store_buffer_entries_);
232 // Goes through the addresses in the store buffer attempting to remove 291 // Goes through the addresses in the store buffer attempting to remove
233 // duplicates. In the interest of speed this is a lossy operation. Some 292 // duplicates. In the interest of speed this is a lossy operation. Some
234 // duplicates will remain. We have two hash tables with different hash 293 // duplicates will remain. We have two hash tables with different hash
235 // functions to reduce the number of unnecessary clashes. 294 // functions to reduce the number of unnecessary clashes.
236 for (Address* current = start_; current < top; current++) { 295 for (Address* current = start_; current < top; current++) {
237 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); 296 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
238 // Shift out the last bits including any tags. 297 // Shift out the last bits including any tags.
239 int_addr >>= kPointerSizeLog2; 298 int_addr >>= kPointerSizeLog2;
240 int hash1 = 299 int hash1 =
241 ((int_addr ^ (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1)); 300 ((int_addr ^ (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1));
(...skipping 32 matching lines...)
274 return; 333 return;
275 } 334 }
276 // TODO(gc): Set an interrupt to do a GC on the next back edge. 335 // TODO(gc): Set an interrupt to do a GC on the next back edge.
277 // TODO(gc): Allocate the rest of new space to force a GC on the next 336 // TODO(gc): Allocate the rest of new space to force a GC on the next
278 // allocation. 337 // allocation.
279 if (old_limit_ - old_top_ < kStoreBufferSize) { 338 if (old_limit_ - old_top_ < kStoreBufferSize) {
280 // After compression we don't even have enough space for the next 339 // After compression we don't even have enough space for the next
281 // compression to be guaranteed to succeed. 340 // compression to be guaranteed to succeed.
282 // TODO(gc): Set a flag to scan all of memory. 341 // TODO(gc): Set a flag to scan all of memory.
283 Counters::store_buffer_overflows.Increment(); 342 Counters::store_buffer_overflows.Increment();
284 must_scan_entire_memory_ = true; 343 store_buffer_mode_ = kStoreBufferDisabled;
285 } 344 }
286 } 345 }
287 } 346 }
288 347
289 } } // namespace v8::internal 348 } } // namespace v8::internal
OLDNEW
« src/store-buffer.h ('K') | « src/store-buffer.h ('k') | src/store-buffer-inl.h » ('j') | no next file with comments »

Powered by Google App Engine