Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(150)

Side by Side Diff: src/store-buffer.cc

Issue 7044082: Minor cleanup of StoreBuffer related heap iteration methods. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/gc
Patch Set: Created 9 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 332 matching lines...) Expand 10 before | Expand all | Expand 10 after
343 } 343 }
344 344
345 345
346 void StoreBuffer::GCPrologue(GCType type, GCCallbackFlags flags) { 346 void StoreBuffer::GCPrologue(GCType type, GCCallbackFlags flags) {
347 // TODO(gc) ISOLATES MERGE 347 // TODO(gc) ISOLATES MERGE
348 HEAP->store_buffer()->ZapHashTables(); 348 HEAP->store_buffer()->ZapHashTables();
349 HEAP->store_buffer()->during_gc_ = true; 349 HEAP->store_buffer()->during_gc_ = true;
350 } 350 }
351 351
352 352
353 void StoreBuffer::Verify() { 353 #ifdef DEBUG
354 static void DummyScavengePointer(HeapObject** p, HeapObject* o) {
355 // Do nothing.
354 } 356 }
355 357
356 358
359 void StoreBuffer::VerifyPointers(PagedSpace* space,
360 RegionCallback region_callback) {
361 PageIterator it(space);
362
363 while (it.has_next()) {
364 Page* page = it.next();
365 FindPointersToNewSpaceOnPage(
366 reinterpret_cast<PagedSpace*>(page->owner()),
367 page,
368 region_callback,
369 &DummyScavengePointer);
370 }
371 }
372
373
374 void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
375 LargeObjectIterator it(space);
376 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
377 if (object->IsFixedArray()) {
378 Address slot_address = object->address();
379 Address end = object->address() + object->Size();
380
381 while (slot_address < end) {
382 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
383 // When we are not in GC the Heap::InNewSpace() predicate
384 // checks that pointers which satisfy predicate point into
385 // the active semispace.
386 heap_->InNewSpace(*slot);
387 slot_address += kPointerSize;
388 }
389 }
390 }
391 }
392 #endif
393
394
395 void StoreBuffer::Verify() {
396 #ifdef DEBUG
397 VerifyPointers(heap_->old_pointer_space(),
398 &StoreBuffer::FindPointersToNewSpaceInRegion);
399 VerifyPointers(heap_->map_space(),
400 &StoreBuffer::FindPointersToNewSpaceInMapsRegion);
401 VerifyPointers(heap_->lo_space());
402 #endif
403 }
404
405
357 void StoreBuffer::GCEpilogue(GCType type, GCCallbackFlags flags) { 406 void StoreBuffer::GCEpilogue(GCType type, GCCallbackFlags flags) {
358 // TODO(gc) ISOLATES MERGE 407 // TODO(gc) ISOLATES MERGE
Erik Corry 2011/06/13 08:16:39 Let's just delete this comment.
359 HEAP->store_buffer()->during_gc_ = false; 408 HEAP->store_buffer()->during_gc_ = false;
360 HEAP->store_buffer()->Verify(); 409 HEAP->store_buffer()->Verify();
361 } 410 }
362 411
363 412
364 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback callback) { 413 void StoreBuffer::FindPointersToNewSpaceInRegion(
414 Address start, Address end, ObjectSlotCallback slot_callback) {
415 for (Address slot_address = start;
416 slot_address < end;
417 slot_address += kPointerSize) {
418 Object** slot = reinterpret_cast<Object**>(slot_address);
419 if (heap_->InNewSpace(*slot)) {
420 HeapObject* object = reinterpret_cast<HeapObject*>(*slot);
421 ASSERT(object->IsHeapObject());
422 slot_callback(reinterpret_cast<HeapObject**>(slot), object);
423 if (heap_->InNewSpace(*slot)) {
424 EnterDirectlyIntoStoreBuffer(slot_address);
425 }
426 }
427 }
428 }
429
430
431 // Compute start address of the first map following given addr.
432 static inline Address MapStartAlign(Address addr) {
433 Address page = Page::FromAddress(addr)->ObjectAreaStart();
434 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
435 }
436
437
438 // Compute end address of the first map preceding given addr.
439 static inline Address MapEndAlign(Address addr) {
440 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
441 return page + ((addr - page) / Map::kSize * Map::kSize);
442 }
443
444
445 void StoreBuffer::FindPointersToNewSpaceInMaps(
446 Address start,
447 Address end,
448 ObjectSlotCallback slot_callback) {
449 ASSERT(MapStartAlign(start) == start);
450 ASSERT(MapEndAlign(end) == end);
451
452 Address map_address = start;
453 while (map_address < end) {
454 ASSERT(!heap_->InNewSpace(Memory::Object_at(map_address)));
455 ASSERT(Memory::Object_at(map_address)->IsMap());
456
457 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
458 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
459
460 FindPointersToNewSpaceInRegion(pointer_fields_start,
461 pointer_fields_end,
462 slot_callback);
463 map_address += Map::kSize;
464 }
465 }
466
467
468 void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
469 Address start,
470 Address end,
471 ObjectSlotCallback slot_callback) {
472 Address map_aligned_start = MapStartAlign(start);
473 Address map_aligned_end = MapEndAlign(end);
474
475 ASSERT(map_aligned_start == start);
476 ASSERT(map_aligned_end == end);
477
478 FindPointersToNewSpaceInMaps(map_aligned_start,
479 map_aligned_end,
480 slot_callback);
481 }
482
483
484 // This function iterates over all the pointers in a paged space in the heap,
485 // looking for pointers into new space. Within the pages there may be dead
486 // objects that have not been overwritten by free spaces or fillers because of
487 // lazy sweeping. These dead objects may not contain pointers to new space.
488 // The garbage areas that have been swept properly (these will normally be the
489 // large ones) will be marked with free space and filler map words. In
490 // addition any area that has never been used at all for object allocation must
491 // be marked with a free space or filler. Because the free space and filler
492 // maps do not move we can always recognize these even after a compaction.
493 // Normal objects like FixedArrays and JSObjects should not contain references
494 // to these maps. The special garbage section (see comment in spaces.h) is
495 // skipped since it can contain absolutely anything. Any objects that are
496 // allocated during iteration may or may not be visited by the iteration, but
497 // they will not be partially visited.
498 void StoreBuffer::FindPointersToNewSpaceOnPage(
499 PagedSpace* space,
500 Page* page,
501 RegionCallback region_callback,
502 ObjectSlotCallback slot_callback) {
503 Address visitable_start = page->ObjectAreaStart();
504 Address end_of_page = page->ObjectAreaEnd();
505
506 Address visitable_end = visitable_start;
507
508 Object* free_space_map = heap_->free_space_map();
509 Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
510
511 while (visitable_end < end_of_page) {
512 Object* o = *reinterpret_cast<Object**>(visitable_end);
513 // Skip fillers but not things that look like fillers in the special
514 // garbage section which can contain anything.
515 if (o == free_space_map ||
516 o == two_pointer_filler_map ||
517 visitable_end == space->top()) {
518 if (visitable_start != visitable_end) {
519 // After calling this the special garbage section may have moved.
520 (this->*region_callback)(visitable_start,
521 visitable_end,
522 slot_callback);
523 if (visitable_end >= space->top() && visitable_end < space->limit()) {
524 visitable_end = space->limit();
525 visitable_start = visitable_end;
526 continue;
527 }
528 }
529 if (visitable_end == space->top() && visitable_end != space->limit()) {
530 visitable_start = visitable_end = space->limit();
531 } else {
532 // At this point we are either at the start of a filler or we are at
533 // the point where the space->top() used to be before the
534 // visit_pointer_region call above. Either way we can skip the
535 // object at the current spot: We don't promise to visit objects
536 // allocated during heap traversal, and if space->top() moved then it
537 // must be because an object was allocated at this point.
538 visitable_start =
539 visitable_end + HeapObject::FromAddress(visitable_end)->Size();
540 visitable_end = visitable_start;
541 }
542 } else {
543 ASSERT(o != free_space_map);
544 ASSERT(o != two_pointer_filler_map);
545 ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
546 visitable_end += kPointerSize;
547 }
548 }
549 ASSERT(visitable_end == end_of_page);
550 if (visitable_start != visitable_end) {
551 (this->*region_callback)(visitable_start,
552 visitable_end,
553 slot_callback);
554 }
555 }
556
557
558
559 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
365 // We do not sort or remove duplicated entries from the store buffer because 560 // We do not sort or remove duplicated entries from the store buffer because
366 // we expect that callback will rebuild the store buffer thus removing 561 // we expect that callback will rebuild the store buffer thus removing
367 // all duplicates and pointers to old space. 562 // all duplicates and pointers to old space.
368 bool some_pages_to_scan = PrepareForIteration(); 563 bool some_pages_to_scan = PrepareForIteration();
369 564
370 Address* limit = old_top_; 565 Address* limit = old_top_;
371 old_top_ = old_start_; 566 old_top_ = old_start_;
372 { 567 {
373 DontMoveStoreBufferEntriesScope scope(this); 568 DontMoveStoreBufferEntriesScope scope(this);
374 for (Address* current = old_start_; current < limit; current++) { 569 for (Address* current = old_start_; current < limit; current++) {
375 #ifdef DEBUG 570 #ifdef DEBUG
376 Address* saved_top = old_top_; 571 Address* saved_top = old_top_;
377 #endif 572 #endif
378 Object** cell = reinterpret_cast<Object**>(*current); 573 Object** slot = reinterpret_cast<Object**>(*current);
379 Object* object = *cell; 574 Object* object = *slot;
380 // May be invalid if object is not in new space. 575 // May be invalid if object is not in new space.
Erik Corry 2011/06/13 08:16:39 It may well be me that wrote this, but I can't rem
381 HeapObject* heap_object = reinterpret_cast<HeapObject*>(object); 576 HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
382 if (heap_->InFromSpace(object)) { 577 if (heap_->InFromSpace(object)) {
383 callback(reinterpret_cast<HeapObject**>(cell), heap_object); 578 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
579 if (heap_->InNewSpace(*slot)) {
580 EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
581 }
384 } 582 }
385 ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top); 583 ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top);
386 } 584 }
387 } 585 }
388 // We are done scanning all the pointers that were in the store buffer, but 586 // We are done scanning all the pointers that were in the store buffer, but
389 // there may be some pages marked scan_on_scavenge that have pointers to new 587 // there may be some pages marked scan_on_scavenge that have pointers to new
390 // space that are not in the store buffer. We must scan them now. As we 588 // space that are not in the store buffer. We must scan them now. As we
391 // scan, the surviving pointers to new space will be added to the store 589 // scan, the surviving pointers to new space will be added to the store
392 // buffer. If there are still a lot of pointers to new space then we will 590 // buffer. If there are still a lot of pointers to new space then we will
393 // keep the scan_on_scavenge flag on the page and discard the pointers that 591 // keep the scan_on_scavenge flag on the page and discard the pointers that
394 // were added to the store buffer. If there are not many pointers to new 592 // were added to the store buffer. If there are not many pointers to new
395 // space left on the page we will keep the pointers in the store buffer and 593 // space left on the page we will keep the pointers in the store buffer and
396 // remove the flag from the page. 594 // remove the flag from the page.
397 if (some_pages_to_scan) { 595 if (some_pages_to_scan) {
398 if (callback_ != NULL) { 596 if (callback_ != NULL) {
399 (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent); 597 (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
400 } 598 }
401 PointerChunkIterator it; 599 PointerChunkIterator it;
402 MemoryChunk* chunk; 600 MemoryChunk* chunk;
403 while ((chunk = it.next()) != NULL) { 601 while ((chunk = it.next()) != NULL) {
404 if (chunk->scan_on_scavenge()) { 602 if (chunk->scan_on_scavenge()) {
405 if (callback_ != NULL) { 603 if (callback_ != NULL) {
406 (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent); 604 (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
407 } 605 }
408 if (chunk->owner() == heap_->lo_space()) { 606 if (chunk->owner() == heap_->lo_space()) {
409 LargePage* large_page = reinterpret_cast<LargePage*>(chunk); 607 LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
410 HeapObject* array = large_page->GetObject(); 608 HeapObject* array = large_page->GetObject();
411 ASSERT(array->IsFixedArray()); 609 ASSERT(array->IsFixedArray());
412 Address start = array->address(); 610 Address start = array->address();
413 Address object_end = start + array->Size(); 611 Address end = start + array->Size();
414 heap_->IteratePointersToNewSpace(heap_, start, object_end, callback); 612 FindPointersToNewSpaceInRegion(start, end, slot_callback);
415 } else { 613 } else {
416 Page* page = reinterpret_cast<Page*>(chunk); 614 Page* page = reinterpret_cast<Page*>(chunk);
417 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); 615 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
418 heap_->IteratePointersOnPage( 616 FindPointersToNewSpaceOnPage(
419 owner, 617 owner,
618 page,
420 (owner == heap_->map_space() ? 619 (owner == heap_->map_space() ?
421 &Heap::IteratePointersFromMapsToNewSpace : 620 &StoreBuffer::FindPointersToNewSpaceInMapsRegion :
422 &Heap::IteratePointersToNewSpace), 621 &StoreBuffer::FindPointersToNewSpaceInRegion),
423 callback, 622 slot_callback);
424 page);
425 } 623 }
426 } 624 }
427 } 625 }
428 (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent); 626 (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
429 } 627 }
430 } 628 }
431 629
432 630
433 void StoreBuffer::Compact() { 631 void StoreBuffer::Compact() {
434 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); 632 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
481 } 679 }
482 680
483 681
484 void StoreBuffer::CheckForFullBuffer() { 682 void StoreBuffer::CheckForFullBuffer() {
485 if (old_limit_ - old_top_ < kStoreBufferSize * 2) { 683 if (old_limit_ - old_top_ < kStoreBufferSize * 2) {
486 HandleFullness(); 684 HandleFullness();
487 } 685 }
488 } 686 }
489 687
490 } } // namespace v8::internal 688 } } // namespace v8::internal
OLDNEW
« src/store-buffer.h ('K') | « src/store-buffer.h ('k') | src/v8globals.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698