| OLD | NEW |
| 1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 74 kStoreBufferSize, | 74 kStoreBufferSize, |
| 75 false); // Not executable. | 75 false); // Not executable. |
| 76 Heap::public_set_store_buffer_top(start_); | 76 Heap::public_set_store_buffer_top(start_); |
| 77 | 77 |
| 78 hash_map_1_ = new uintptr_t[kHashMapLength]; | 78 hash_map_1_ = new uintptr_t[kHashMapLength]; |
| 79 hash_map_2_ = new uintptr_t[kHashMapLength]; | 79 hash_map_2_ = new uintptr_t[kHashMapLength]; |
| 80 | 80 |
| 81 Heap::AddGCPrologueCallback(&GCPrologue, kGCTypeAll); | 81 Heap::AddGCPrologueCallback(&GCPrologue, kGCTypeAll); |
| 82 Heap::AddGCEpilogueCallback(&GCEpilogue, kGCTypeAll); | 82 Heap::AddGCEpilogueCallback(&GCEpilogue, kGCTypeAll); |
| 83 | 83 |
| 84 GCPrologue(kGCTypeMarkSweepCompact, kNoGCCallbackFlags); | 84 ZapHashTables(); |
| 85 } | 85 } |
| 86 | 86 |
| 87 | 87 |
| 88 void StoreBuffer::TearDown() { | 88 void StoreBuffer::TearDown() { |
| 89 delete virtual_memory_; | 89 delete virtual_memory_; |
| 90 delete[] hash_map_1_; | 90 delete[] hash_map_1_; |
| 91 delete[] hash_map_2_; | 91 delete[] hash_map_2_; |
| 92 delete[] old_start_; | 92 delete[] old_start_; |
| 93 old_start_ = old_top_ = old_limit_ = NULL; | 93 old_start_ = old_top_ = old_limit_ = NULL; |
| 94 start_ = limit_ = NULL; | 94 start_ = limit_ = NULL; |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 139 } | 139 } |
| 140 previous = current; | 140 previous = current; |
| 141 } | 141 } |
| 142 old_top_ = write; | 142 old_top_ = write; |
| 143 } | 143 } |
| 144 | 144 |
| 145 | 145 |
| 146 void StoreBuffer::SortUniq() { | 146 void StoreBuffer::SortUniq() { |
| 147 Compact(); | 147 Compact(); |
| 148 if (old_buffer_is_sorted_) return; | 148 if (old_buffer_is_sorted_) return; |
| 149 if (store_buffer_mode_ == kStoreBufferDisabled) { | 149 if (store_buffer_mode() == kStoreBufferDisabled) { |
| 150 old_top_ = old_start_; | 150 old_top_ = old_start_; |
| 151 return; | 151 return; |
| 152 } | 152 } |
| 153 ZapHashTables(); | 153 ZapHashTables(); |
| 154 qsort(reinterpret_cast<void*>(old_start_), | 154 qsort(reinterpret_cast<void*>(old_start_), |
| 155 old_top_ - old_start_, | 155 old_top_ - old_start_, |
| 156 sizeof(*old_top_), | 156 sizeof(*old_top_), |
| 157 &CompareAddresses); | 157 &CompareAddresses); |
| 158 Uniq(); | 158 Uniq(); |
| 159 | 159 |
| 160 old_buffer_is_sorted_ = true; | 160 old_buffer_is_sorted_ = true; |
| 161 } | 161 } |
| 162 | 162 |
| 163 | 163 |
| 164 #ifdef DEBUG | 164 #ifdef DEBUG |
| 165 void StoreBuffer::Clean() { | 165 void StoreBuffer::Clean() { |
| 166 if (store_buffer_mode_ == kStoreBufferDisabled) { | 166 if (store_buffer_mode() == kStoreBufferDisabled) { |
| 167 old_top_ = old_start_; // Just clear the cache. | 167 old_top_ = old_start_; // Just clear the cache. |
| 168 return; | 168 return; |
| 169 } | 169 } |
| 170 ZapHashTables(); | 170 ZapHashTables(); |
| 171 Uniq(); // Also removes things that no longer point to new space. | 171 Uniq(); // Also removes things that no longer point to new space. |
| 172 CheckForFullBuffer(); | 172 CheckForFullBuffer(); |
| 173 } | 173 } |
| 174 | 174 |
| 175 | 175 |
| 176 static bool Zapped(char* start, int size) { | 176 static bool Zapped(char* start, int size) { |
| (...skipping 10 matching lines...) Expand all Loading... |
| 187 Zapped(reinterpret_cast<char*>(hash_map_2_), | 187 Zapped(reinterpret_cast<char*>(hash_map_2_), |
| 188 sizeof(uintptr_t) * kHashMapLength); | 188 sizeof(uintptr_t) * kHashMapLength); |
| 189 } | 189 } |
| 190 | 190 |
| 191 | 191 |
| 192 static Address* in_store_buffer_1_element_cache = NULL; | 192 static Address* in_store_buffer_1_element_cache = NULL; |
| 193 | 193 |
| 194 | 194 |
| 195 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) { | 195 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) { |
| 196 if (!FLAG_enable_slow_asserts) return true; | 196 if (!FLAG_enable_slow_asserts) return true; |
| 197 if (store_buffer_mode_ != kStoreBufferFunctional) return true; | 197 if (store_buffer_mode() != kStoreBufferFunctional) return true; |
| 198 if (in_store_buffer_1_element_cache != NULL && | 198 if (in_store_buffer_1_element_cache != NULL && |
| 199 *in_store_buffer_1_element_cache == cell_address) { | 199 *in_store_buffer_1_element_cache == cell_address) { |
| 200 return true; | 200 return true; |
| 201 } | 201 } |
| 202 Address* top = reinterpret_cast<Address*>(Heap::store_buffer_top()); | 202 Address* top = reinterpret_cast<Address*>(Heap::store_buffer_top()); |
| 203 for (Address* current = top - 1; current >= start_; current--) { | 203 for (Address* current = top - 1; current >= start_; current--) { |
| 204 if (*current == cell_address) { | 204 if (*current == cell_address) { |
| 205 in_store_buffer_1_element_cache = current; | 205 in_store_buffer_1_element_cache = current; |
| 206 return true; | 206 return true; |
| 207 } | 207 } |
| (...skipping 21 matching lines...) Expand all Loading... |
| 229 | 229 |
| 230 void StoreBuffer::GCPrologue(GCType type, GCCallbackFlags flags) { | 230 void StoreBuffer::GCPrologue(GCType type, GCCallbackFlags flags) { |
| 231 ZapHashTables(); | 231 ZapHashTables(); |
| 232 during_gc_ = true; | 232 during_gc_ = true; |
| 233 } | 233 } |
| 234 | 234 |
| 235 | 235 |
| 236 void StoreBuffer::Verify() { | 236 void StoreBuffer::Verify() { |
| 237 #ifdef DEBUG | 237 #ifdef DEBUG |
| 238 if (FLAG_verify_heap && | 238 if (FLAG_verify_heap && |
| 239 StoreBuffer::store_buffer_mode_ == kStoreBufferFunctional) { | 239 StoreBuffer::store_buffer_mode() == kStoreBufferFunctional) { |
| 240 Heap::OldPointerSpaceCheckStoreBuffer(Heap::WATERMARK_SHOULD_BE_VALID); | 240 Heap::OldPointerSpaceCheckStoreBuffer(Heap::WATERMARK_SHOULD_BE_VALID); |
| 241 Heap::MapSpaceCheckStoreBuffer(Heap::WATERMARK_SHOULD_BE_VALID); | 241 Heap::MapSpaceCheckStoreBuffer(Heap::WATERMARK_SHOULD_BE_VALID); |
| 242 Heap::LargeObjectSpaceCheckStoreBuffer(); | 242 Heap::LargeObjectSpaceCheckStoreBuffer(); |
| 243 } | 243 } |
| 244 #endif | 244 #endif |
| 245 } | 245 } |
| 246 | 246 |
| 247 | 247 |
| 248 void StoreBuffer::GCEpilogue(GCType type, GCCallbackFlags flags) { | 248 void StoreBuffer::GCEpilogue(GCType type, GCCallbackFlags flags) { |
| 249 during_gc_ = false; | 249 during_gc_ = false; |
| 250 if (store_buffer_mode_ == kStoreBufferBeingRebuilt) { | 250 if (store_buffer_mode() == kStoreBufferBeingRebuilt) { |
| 251 store_buffer_mode_ = kStoreBufferFunctional; | 251 set_store_buffer_mode(kStoreBufferFunctional); |
| 252 } | 252 } |
| 253 Verify(); | 253 Verify(); |
| 254 } | 254 } |
| 255 | 255 |
| 256 | 256 |
| 257 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback callback) { | 257 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback callback) { |
| 258 if (store_buffer_mode_ != kStoreBufferFunctional) { | 258 if (store_buffer_mode() == kStoreBufferFunctional) { |
| 259 SortUniq(); |
| 260 } |
| 261 if (store_buffer_mode() != kStoreBufferFunctional) { |
| 259 old_top_ = old_start_; | 262 old_top_ = old_start_; |
| 260 ZapHashTables(); | 263 ZapHashTables(); |
| 261 Heap::public_set_store_buffer_top(start_); | 264 Heap::public_set_store_buffer_top(start_); |
| 262 store_buffer_mode_ = kStoreBufferBeingRebuilt; | 265 set_store_buffer_mode(kStoreBufferBeingRebuilt); |
| 263 Heap::IteratePointers(Heap::old_pointer_space(), | 266 Heap::IteratePointers(Heap::old_pointer_space(), |
| 264 &Heap::IteratePointersToNewSpace, | 267 &Heap::IteratePointersToNewSpace, |
| 265 callback, | 268 callback, |
| 266 Heap::WATERMARK_SHOULD_BE_VALID); | 269 Heap::WATERMARK_SHOULD_BE_VALID); |
| 267 | 270 |
| 268 Heap::IteratePointers(Heap::map_space(), | 271 Heap::IteratePointers(Heap::map_space(), |
| 269 &Heap::IteratePointersFromMapsToNewSpace, | 272 &Heap::IteratePointersFromMapsToNewSpace, |
| 270 callback, | 273 callback, |
| 271 Heap::WATERMARK_SHOULD_BE_VALID); | 274 Heap::WATERMARK_SHOULD_BE_VALID); |
| 272 | 275 |
| 273 Heap::lo_space()->IteratePointersToNewSpace(callback); | 276 Heap::lo_space()->IteratePointersToNewSpace(callback); |
| 274 } else { | 277 } else { |
| 275 SortUniq(); | |
| 276 Address* limit = old_top_; | 278 Address* limit = old_top_; |
| 277 old_top_ = old_start_; | 279 old_top_ = old_start_; |
| 278 { | 280 { |
| 279 DontMoveStoreBufferEntriesScope scope; | 281 DontMoveStoreBufferEntriesScope scope; |
| 280 for (Address* current = old_start_; current < limit; current++) { | 282 for (Address* current = old_start_; current < limit; current++) { |
| 281 #ifdef DEBUG | 283 #ifdef DEBUG |
| 282 Address* saved_top = old_top_; | 284 Address* saved_top = old_top_; |
| 283 #endif | 285 #endif |
| 284 Object** cell = reinterpret_cast<Object**>(*current); | 286 Object** cell = reinterpret_cast<Object**>(*current); |
| 285 Object* object = *cell; | 287 Object* object = *cell; |
| (...skipping 18 matching lines...) Expand all Loading... |
| 304 | 306 |
| 305 if (top == start_) return; | 307 if (top == start_) return; |
| 306 | 308 |
| 307 // There's no check of the limit in the loop below so we check here for | 309 // There's no check of the limit in the loop below so we check here for |
| 308 // the worst case (compaction doesn't eliminate any pointers). | 310 // the worst case (compaction doesn't eliminate any pointers). |
| 309 ASSERT(top <= limit_); | 311 ASSERT(top <= limit_); |
| 310 Heap::public_set_store_buffer_top(start_); | 312 Heap::public_set_store_buffer_top(start_); |
| 311 if (top - start_ > old_limit_ - old_top_) { | 313 if (top - start_ > old_limit_ - old_top_) { |
| 312 CheckForFullBuffer(); | 314 CheckForFullBuffer(); |
| 313 } | 315 } |
| 314 if (store_buffer_mode_ == kStoreBufferDisabled) return; | 316 if (store_buffer_mode() == kStoreBufferDisabled) return; |
| 315 ASSERT(may_move_store_buffer_entries_); | 317 ASSERT(may_move_store_buffer_entries_); |
| 316 // Goes through the addresses in the store buffer attempting to remove | 318 // Goes through the addresses in the store buffer attempting to remove |
| 317 // duplicates. In the interest of speed this is a lossy operation. Some | 319 // duplicates. In the interest of speed this is a lossy operation. Some |
| 318 // duplicates will remain. We have two hash tables with different hash | 320 // duplicates will remain. We have two hash tables with different hash |
| 319 // functions to reduce the number of unnecessary clashes. | 321 // functions to reduce the number of unnecessary clashes. |
| 320 for (Address* current = start_; current < top; current++) { | 322 for (Address* current = start_; current < top; current++) { |
| 321 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); | 323 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); |
| 322 // Shift out the last bits including any tags. | 324 // Shift out the last bits including any tags. |
| 323 int_addr >>= kPointerSizeLog2; | 325 int_addr >>= kPointerSizeLog2; |
| 324 int hash1 = | 326 int hash1 = |
| (...skipping 21 matching lines...) Expand all Loading... |
| 346 CheckForFullBuffer(); | 348 CheckForFullBuffer(); |
| 347 } | 349 } |
| 348 | 350 |
| 349 | 351 |
| 350 void StoreBuffer::CheckForFullBuffer() { | 352 void StoreBuffer::CheckForFullBuffer() { |
| 351 if (old_limit_ - old_top_ < kStoreBufferSize * 2) { | 353 if (old_limit_ - old_top_ < kStoreBufferSize * 2) { |
| 352 // After compression we don't have enough space that we can be sure that | 354 // After compression we don't have enough space that we can be sure that |
| 353 // the next two compressions will have enough space in the buffer. We | 355 // the next two compressions will have enough space in the buffer. We |
| 354 // start by trying a more agressive compression. If this frees up at least | 356 // start by trying a more agressive compression. If this frees up at least |
| 355 // half the space then we can keep going, otherwise it is time to brake. | 357 // half the space then we can keep going, otherwise it is time to brake. |
| 356 SortUniq(); | 358 if (!during_gc_) { |
| 359 SortUniq(); |
| 360 } |
| 357 if (old_limit_ - old_top_ < old_limit_ - old_top_) { | 361 if (old_limit_ - old_top_ < old_limit_ - old_top_) { |
| 358 return; | 362 return; |
| 359 } | 363 } |
| 360 // TODO(gc): Set an interrupt to do a GC on the next back edge. | 364 // TODO(gc): Set an interrupt to do a GC on the next back edge. |
| 361 // TODO(gc): Allocate the rest of new space to force a GC on the next | 365 // TODO(gc): Allocate the rest of new space to force a GC on the next |
| 362 // allocation. | 366 // allocation. |
| 363 if (old_limit_ - old_top_ < kStoreBufferSize) { | 367 if (old_limit_ - old_top_ < kStoreBufferSize) { |
| 364 // After compression we don't even have enough space for the next | 368 // After compression we don't even have enough space for the next |
| 365 // compression to be guaranteed to succeed. | 369 // compression to be guaranteed to succeed. |
| 366 // TODO(gc): Set a flag to scan all of memory. | 370 // TODO(gc): Set a flag to scan all of memory. |
| 367 Counters::store_buffer_overflows.Increment(); | 371 Counters::store_buffer_overflows.Increment(); |
| 368 store_buffer_mode_ = kStoreBufferDisabled; | 372 set_store_buffer_mode(kStoreBufferDisabled); |
| 369 } | 373 } |
| 370 } | 374 } |
| 371 } | 375 } |
| 372 | 376 |
| 373 } } // namespace v8::internal | 377 } } // namespace v8::internal |
| OLD | NEW |