Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(280)

Side by Side Diff: src/store-buffer.cc

Issue 6880010: Merge (7265, 7271] from bleeding_edge to experimental/gc branch.... (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/gc/
Patch Set: '' Created 9 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 16 matching lines...) Expand all
27 27
28 #include "v8.h" 28 #include "v8.h"
29 29
30 #include "store-buffer.h" 30 #include "store-buffer.h"
31 #include "store-buffer-inl.h" 31 #include "store-buffer-inl.h"
32 #include "v8-counters.h" 32 #include "v8-counters.h"
33 33
34 namespace v8 { 34 namespace v8 {
35 namespace internal { 35 namespace internal {
36 36
37 Address* StoreBuffer::start_ = NULL; 37 StoreBuffer::StoreBuffer(Heap* heap)
38 Address* StoreBuffer::limit_ = NULL; 38 : heap_(heap),
39 Address* StoreBuffer::old_start_ = NULL; 39 start_(NULL),
40 Address* StoreBuffer::old_limit_ = NULL; 40 limit_(NULL),
41 Address* StoreBuffer::old_top_ = NULL; 41 old_start_(NULL),
42 uintptr_t* StoreBuffer::hash_map_1_ = NULL; 42 old_limit_(NULL),
43 uintptr_t* StoreBuffer::hash_map_2_ = NULL; 43 old_top_(NULL),
44 VirtualMemory* StoreBuffer::virtual_memory_ = NULL; 44 old_buffer_is_sorted_(false),
45 bool StoreBuffer::old_buffer_is_sorted_ = false; 45 old_buffer_is_filtered_(false),
46 bool StoreBuffer::old_buffer_is_filtered_ = false; 46 during_gc_(false),
47 bool StoreBuffer::during_gc_ = false; 47 store_buffer_rebuilding_enabled_(false),
48 bool StoreBuffer::may_move_store_buffer_entries_ = true; 48 callback_(NULL),
49 bool StoreBuffer::store_buffer_rebuilding_enabled_ = false; 49 may_move_store_buffer_entries_(true),
50 StoreBufferCallback StoreBuffer::callback_ = NULL; 50 virtual_memory_(NULL),
51 hash_map_1_(NULL),
52 hash_map_2_(NULL) {
53 }
54
51 55
52 void StoreBuffer::Setup() { 56 void StoreBuffer::Setup() {
53 virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3); 57 virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3);
54 uintptr_t start_as_int = 58 uintptr_t start_as_int =
55 reinterpret_cast<uintptr_t>(virtual_memory_->address()); 59 reinterpret_cast<uintptr_t>(virtual_memory_->address());
56 start_ = 60 start_ =
57 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2)); 61 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
58 limit_ = start_ + (kStoreBufferSize / sizeof(*start_)); 62 limit_ = start_ + (kStoreBufferSize / sizeof(*start_));
59 63
60 old_top_ = old_start_ = new Address[kOldStoreBufferLength]; 64 old_top_ = old_start_ = new Address[kOldStoreBufferLength];
61 old_limit_ = old_start_ + kOldStoreBufferLength; 65 old_limit_ = old_start_ + kOldStoreBufferLength;
62 66
63 ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address()); 67 ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
64 ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address()); 68 ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
65 Address* vm_limit = reinterpret_cast<Address*>( 69 Address* vm_limit = reinterpret_cast<Address*>(
66 reinterpret_cast<char*>(virtual_memory_->address()) + 70 reinterpret_cast<char*>(virtual_memory_->address()) +
67 virtual_memory_->size()); 71 virtual_memory_->size());
68 ASSERT(start_ <= vm_limit); 72 ASSERT(start_ <= vm_limit);
69 ASSERT(limit_ <= vm_limit); 73 ASSERT(limit_ <= vm_limit);
70 USE(vm_limit); 74 USE(vm_limit);
71 ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0); 75 ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
72 ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) == 76 ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
73 0); 77 0);
74 78
75 virtual_memory_->Commit(reinterpret_cast<Address>(start_), 79 virtual_memory_->Commit(reinterpret_cast<Address>(start_),
76 kStoreBufferSize, 80 kStoreBufferSize,
77 false); // Not executable. 81 false); // Not executable.
78 Heap::public_set_store_buffer_top(start_); 82 heap_->public_set_store_buffer_top(start_);
79 83
80 hash_map_1_ = new uintptr_t[kHashMapLength]; 84 hash_map_1_ = new uintptr_t[kHashMapLength];
81 hash_map_2_ = new uintptr_t[kHashMapLength]; 85 hash_map_2_ = new uintptr_t[kHashMapLength];
82 86
83 Heap::AddGCPrologueCallback(&GCPrologue, kGCTypeAll); 87 heap_->AddGCPrologueCallback(&GCPrologue, kGCTypeAll);
84 Heap::AddGCEpilogueCallback(&GCEpilogue, kGCTypeAll); 88 heap_->AddGCEpilogueCallback(&GCEpilogue, kGCTypeAll);
85 89
86 ZapHashTables(); 90 ZapHashTables();
87 } 91 }
88 92
89 93
90 void StoreBuffer::TearDown() { 94 void StoreBuffer::TearDown() {
91 delete virtual_memory_; 95 delete virtual_memory_;
92 delete[] hash_map_1_; 96 delete[] hash_map_1_;
93 delete[] hash_map_2_; 97 delete[] hash_map_2_;
94 delete[] old_start_; 98 delete[] old_start_;
95 old_start_ = old_top_ = old_limit_ = NULL; 99 old_start_ = old_top_ = old_limit_ = NULL;
96 start_ = limit_ = NULL; 100 start_ = limit_ = NULL;
97 Heap::public_set_store_buffer_top(start_); 101 heap_->public_set_store_buffer_top(start_);
102 }
103
104
105 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
106 isolate->heap()->store_buffer()->Compact();
98 } 107 }
99 108
100 109
101 #if V8_TARGET_ARCH_X64 110 #if V8_TARGET_ARCH_X64
102 static int CompareAddresses(const void* void_a, const void* void_b) { 111 static int CompareAddresses(const void* void_a, const void* void_b) {
103 intptr_t a = 112 intptr_t a =
104 reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a)); 113 reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a));
105 intptr_t b = 114 intptr_t b =
106 reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b)); 115 reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b));
107 // Unfortunately if int is smaller than intptr_t there is no branch-free 116 // Unfortunately if int is smaller than intptr_t there is no branch-free
(...skipping 19 matching lines...) Expand all
127 136
128 void StoreBuffer::Uniq() { 137 void StoreBuffer::Uniq() {
129 ASSERT(HashTablesAreZapped()); 138 ASSERT(HashTablesAreZapped());
130 // Remove adjacent duplicates and cells that do not point at new space. 139 // Remove adjacent duplicates and cells that do not point at new space.
131 Address previous = NULL; 140 Address previous = NULL;
132 Address* write = old_start_; 141 Address* write = old_start_;
133 ASSERT(may_move_store_buffer_entries_); 142 ASSERT(may_move_store_buffer_entries_);
134 for (Address* read = old_start_; read < old_top_; read++) { 143 for (Address* read = old_start_; read < old_top_; read++) {
135 Address current = *read; 144 Address current = *read;
136 if (current != previous) { 145 if (current != previous) {
137 if (Heap::InNewSpace(*reinterpret_cast<Object**>(current))) { 146 if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
138 *write++ = current; 147 *write++ = current;
139 } 148 }
140 } 149 }
141 previous = current; 150 previous = current;
142 } 151 }
143 old_top_ = write; 152 old_top_ = write;
144 } 153 }
145 154
146 155
147 void StoreBuffer::HandleFullness() { 156 void StoreBuffer::HandleFullness() {
(...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after
299 308
300 static Address* in_store_buffer_1_element_cache = NULL; 309 static Address* in_store_buffer_1_element_cache = NULL;
301 310
302 311
303 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) { 312 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
304 if (!FLAG_enable_slow_asserts) return true; 313 if (!FLAG_enable_slow_asserts) return true;
305 if (in_store_buffer_1_element_cache != NULL && 314 if (in_store_buffer_1_element_cache != NULL &&
306 *in_store_buffer_1_element_cache == cell_address) { 315 *in_store_buffer_1_element_cache == cell_address) {
307 return true; 316 return true;
308 } 317 }
309 Address* top = reinterpret_cast<Address*>(Heap::store_buffer_top()); 318 Address* top = reinterpret_cast<Address*>(HEAP->store_buffer_top());
Erik Corry 2011/04/20 20:07:40 Use heap_ here and below
Vyacheslav Egorov (Chromium) 2011/04/24 11:24:08 here and below done where possible. GCPrologue/GCE
310 for (Address* current = top - 1; current >= start_; current--) { 319 for (Address* current = top - 1; current >= start_; current--) {
311 if (*current == cell_address) { 320 if (*current == cell_address) {
312 in_store_buffer_1_element_cache = current; 321 in_store_buffer_1_element_cache = current;
313 return true; 322 return true;
314 } 323 }
315 } 324 }
316 for (Address* current = old_top_ - 1; current >= old_start_; current--) { 325 for (Address* current = old_top_ - 1; current >= old_start_; current--) {
317 if (*current == cell_address) { 326 if (*current == cell_address) {
318 in_store_buffer_1_element_cache = current; 327 in_store_buffer_1_element_cache = current;
319 return true; 328 return true;
320 } 329 }
321 } 330 }
322 return false; 331 return false;
323 } 332 }
324 #endif 333 #endif
325 334
326 335
327 void StoreBuffer::ZapHashTables() { 336 void StoreBuffer::ZapHashTables() {
328 memset(reinterpret_cast<void*>(hash_map_1_), 337 memset(reinterpret_cast<void*>(hash_map_1_),
329 0, 338 0,
330 sizeof(uintptr_t) * kHashMapLength); 339 sizeof(uintptr_t) * kHashMapLength);
331 memset(reinterpret_cast<void*>(hash_map_2_), 340 memset(reinterpret_cast<void*>(hash_map_2_),
332 0, 341 0,
333 sizeof(uintptr_t) * kHashMapLength); 342 sizeof(uintptr_t) * kHashMapLength);
334 } 343 }
335 344
336 345
337 void StoreBuffer::GCPrologue(GCType type, GCCallbackFlags flags) { 346 void StoreBuffer::GCPrologue(GCType type, GCCallbackFlags flags) {
338 ZapHashTables(); 347 HEAP->store_buffer()->ZapHashTables();
339 during_gc_ = true; 348 HEAP->store_buffer()->during_gc_ = true;
340 } 349 }
341 350
342 351
343 void StoreBuffer::Verify() { 352 void StoreBuffer::Verify() {
344 } 353 }
345 354
346 355
347 void StoreBuffer::GCEpilogue(GCType type, GCCallbackFlags flags) { 356 void StoreBuffer::GCEpilogue(GCType type, GCCallbackFlags flags) {
348 during_gc_ = false; 357 HEAP->store_buffer()->during_gc_ = false;
349 Verify(); 358 HEAP->store_buffer()->Verify();
350 } 359 }
351 360
352 361
353 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback callback) { 362 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback callback) {
354 // We do not sort or remove duplicated entries from the store buffer because 363 // We do not sort or remove duplicated entries from the store buffer because
355 // we expect that callback will rebuild the store buffer thus removing 364 // we expect that callback will rebuild the store buffer thus removing
356 // all duplicates and pointers to old space. 365 // all duplicates and pointers to old space.
357 bool some_pages_to_scan = PrepareForIteration(); 366 bool some_pages_to_scan = PrepareForIteration();
358 367
359 Address* limit = old_top_; 368 Address* limit = old_top_;
360 old_top_ = old_start_; 369 old_top_ = old_start_;
361 { 370 {
362 DontMoveStoreBufferEntriesScope scope; 371 DontMoveStoreBufferEntriesScope scope(this);
363 for (Address* current = old_start_; current < limit; current++) { 372 for (Address* current = old_start_; current < limit; current++) {
364 #ifdef DEBUG 373 #ifdef DEBUG
365 Address* saved_top = old_top_; 374 Address* saved_top = old_top_;
366 #endif 375 #endif
367 Object** cell = reinterpret_cast<Object**>(*current); 376 Object** cell = reinterpret_cast<Object**>(*current);
368 Object* object = *cell; 377 Object* object = *cell;
369 // May be invalid if object is not in new space. 378 // May be invalid if object is not in new space.
370 HeapObject* heap_object = reinterpret_cast<HeapObject*>(object); 379 HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
371 if (Heap::InFromSpace(object)) { 380 if (heap_->InFromSpace(object)) {
372 callback(reinterpret_cast<HeapObject**>(cell), heap_object); 381 callback(reinterpret_cast<HeapObject**>(cell), heap_object);
373 } 382 }
374 ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top); 383 ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top);
375 } 384 }
376 } 385 }
377 // We are done scanning all the pointers that were in the store buffer, but 386 // We are done scanning all the pointers that were in the store buffer, but
378 // there may be some pages marked scan_on_scavenge that have pointers to new 387 // there may be some pages marked scan_on_scavenge that have pointers to new
379 // space that are not in the store buffer. We must scan them now. As we 388 // space that are not in the store buffer. We must scan them now. As we
380 // scan, the surviving pointers to new space will be added to the store 389 // scan, the surviving pointers to new space will be added to the store
381 // buffer. If there are still a lot of pointers to new space then we will 390 // buffer. If there are still a lot of pointers to new space then we will
382 // keep the scan_on_scavenge flag on the page and discard the pointers that 391 // keep the scan_on_scavenge flag on the page and discard the pointers that
383 // were added to the store buffer. If there are not many pointers to new 392 // were added to the store buffer. If there are not many pointers to new
384 // space left on the page we will keep the pointers in the store buffer and 393 // space left on the page we will keep the pointers in the store buffer and
385 // remove the flag from the page. 394 // remove the flag from the page.
386 if (some_pages_to_scan) { 395 if (some_pages_to_scan) {
387 if (callback_ != NULL) { 396 if (callback_ != NULL) {
388 (*callback_)(NULL, kStoreBufferStartScanningPagesEvent); 397 (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
389 } 398 }
390 PointerChunkIterator it; 399 PointerChunkIterator it;
391 MemoryChunk* chunk; 400 MemoryChunk* chunk;
392 while ((chunk = it.next()) != NULL) { 401 while ((chunk = it.next()) != NULL) {
393 if (chunk->scan_on_scavenge()) { 402 if (chunk->scan_on_scavenge()) {
394 if (callback_ != NULL) { 403 if (callback_ != NULL) {
395 (*callback_)(chunk, kStoreBufferScanningPageEvent); 404 (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
396 } 405 }
397 if (chunk->owner() == Heap::lo_space()) { 406 if (chunk->owner() == heap_->lo_space()) {
398 LargePage* large_page = reinterpret_cast<LargePage*>(chunk); 407 LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
399 HeapObject* array = large_page->GetObject(); 408 HeapObject* array = large_page->GetObject();
400 ASSERT(array->IsFixedArray()); 409 ASSERT(array->IsFixedArray());
401 Address start = array->address(); 410 Address start = array->address();
402 Address object_end = start + array->Size(); 411 Address object_end = start + array->Size();
403 Heap::IteratePointersToNewSpace(start, object_end, callback); 412 HEAP->IteratePointersToNewSpace(HEAP, start, object_end, callback);
404 } else { 413 } else {
405 Page* page = reinterpret_cast<Page*>(chunk); 414 Page* page = reinterpret_cast<Page*>(chunk);
406 Heap::IteratePointersOnPage( 415 HEAP->IteratePointersOnPage(
407 reinterpret_cast<PagedSpace*>(page->owner()), 416 reinterpret_cast<PagedSpace*>(page->owner()),
408 &Heap::IteratePointersToNewSpace, 417 &Heap::IteratePointersToNewSpace,
409 callback, 418 callback,
410 page); 419 page);
411 } 420 }
412 } 421 }
413 } 422 }
414 (*callback_)(NULL, kStoreBufferScanningPageEvent); 423 (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
415 } 424 }
416 } 425 }
417 426
418 427
419 void StoreBuffer::Compact() { 428 void StoreBuffer::Compact() {
420 Address* top = reinterpret_cast<Address*>(Heap::store_buffer_top()); 429 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
421 430
422 if (top == start_) return; 431 if (top == start_) return;
423 432
424 // There's no check of the limit in the loop below so we check here for 433 // There's no check of the limit in the loop below so we check here for
425 // the worst case (compaction doesn't eliminate any pointers). 434 // the worst case (compaction doesn't eliminate any pointers).
426 ASSERT(top <= limit_); 435 ASSERT(top <= limit_);
427 Heap::public_set_store_buffer_top(start_); 436 heap_->public_set_store_buffer_top(start_);
428 if (top - start_ > old_limit_ - old_top_) { 437 if (top - start_ > old_limit_ - old_top_) {
429 HandleFullness(); 438 HandleFullness();
430 } 439 }
431 ASSERT(may_move_store_buffer_entries_); 440 ASSERT(may_move_store_buffer_entries_);
432 // Goes through the addresses in the store buffer attempting to remove 441 // Goes through the addresses in the store buffer attempting to remove
433 // duplicates. In the interest of speed this is a lossy operation. Some 442 // duplicates. In the interest of speed this is a lossy operation. Some
434 // duplicates will remain. We have two hash tables with different hash 443 // duplicates will remain. We have two hash tables with different hash
435 // functions to reduce the number of unnecessary clashes. 444 // functions to reduce the number of unnecessary clashes.
436 for (Address* current = start_; current < top; current++) { 445 for (Address* current = start_; current < top; current++) {
437 ASSERT(!Heap::cell_space()->Contains(*current)); 446 ASSERT(!heap_->cell_space()->Contains(*current));
438 ASSERT(!Heap::code_space()->Contains(*current)); 447 ASSERT(!heap_->code_space()->Contains(*current));
439 ASSERT(!Heap::old_data_space()->Contains(*current)); 448 ASSERT(!heap_->old_data_space()->Contains(*current));
440 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); 449 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
441 // Shift out the last bits including any tags. 450 // Shift out the last bits including any tags.
442 int_addr >>= kPointerSizeLog2; 451 int_addr >>= kPointerSizeLog2;
443 int hash1 = 452 int hash1 =
444 ((int_addr ^ (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1)); 453 ((int_addr ^ (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1));
445 if (hash_map_1_[hash1] == int_addr) continue; 454 if (hash_map_1_[hash1] == int_addr) continue;
446 int hash2 = 455 int hash2 =
447 ((int_addr - (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1)); 456 ((int_addr - (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1));
448 hash2 ^= hash2 >> (kHashMapLengthLog2 * 2); 457 hash2 ^= hash2 >> (kHashMapLengthLog2 * 2);
449 if (hash_map_2_[hash2] == int_addr) continue; 458 if (hash_map_2_[hash2] == int_addr) continue;
450 if (hash_map_1_[hash1] == 0) { 459 if (hash_map_1_[hash1] == 0) {
451 hash_map_1_[hash1] = int_addr; 460 hash_map_1_[hash1] = int_addr;
452 } else if (hash_map_2_[hash2] == 0) { 461 } else if (hash_map_2_[hash2] == 0) {
453 hash_map_2_[hash2] = int_addr; 462 hash_map_2_[hash2] = int_addr;
454 } else { 463 } else {
455 // Rather than slowing down we just throw away some entries. This will 464 // Rather than slowing down we just throw away some entries. This will
456 // cause some duplicates to remain undetected. 465 // cause some duplicates to remain undetected.
457 hash_map_1_[hash1] = int_addr; 466 hash_map_1_[hash1] = int_addr;
458 hash_map_2_[hash2] = 0; 467 hash_map_2_[hash2] = 0;
459 } 468 }
460 old_buffer_is_sorted_ = false; 469 old_buffer_is_sorted_ = false;
461 old_buffer_is_filtered_ = false; 470 old_buffer_is_filtered_ = false;
462 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); 471 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
463 ASSERT(old_top_ <= old_limit_); 472 ASSERT(old_top_ <= old_limit_);
464 } 473 }
465 Counters::store_buffer_compactions.Increment(); 474 COUNTERS->store_buffer_compactions()->Increment();
466 CheckForFullBuffer(); 475 CheckForFullBuffer();
467 } 476 }
468 477
469 478
470 void StoreBuffer::CheckForFullBuffer() { 479 void StoreBuffer::CheckForFullBuffer() {
471 if (old_limit_ - old_top_ < kStoreBufferSize * 2) { 480 if (old_limit_ - old_top_ < kStoreBufferSize * 2) {
472 HandleFullness(); 481 HandleFullness();
473 } 482 }
474 } 483 }
475 484
476 } } // namespace v8::internal 485 } } // namespace v8::internal
OLDNEW
« src/store-buffer.h ('K') | « src/store-buffer.h ('k') | src/store-buffer-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698