Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(196)

Side by Side Diff: src/mark-compact.cc

Issue 7870003: Guard against rare case of allocation failure during evacuation. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/gc
Patch Set: Created 9 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | src/spaces.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 2558 matching lines...) Expand 10 before | Expand all | Expand 10 after
2569 if (cells[cell_index] == 0) continue; 2569 if (cells[cell_index] == 0) continue;
2570 2570
2571 int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets); 2571 int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
2572 for (int i = 0; i < live_objects; i++) { 2572 for (int i = 0; i < live_objects; i++) {
2573 Address object_addr = cell_base + offsets[i] * kPointerSize; 2573 Address object_addr = cell_base + offsets[i] * kPointerSize;
2574 HeapObject* object = HeapObject::FromAddress(object_addr); 2574 HeapObject* object = HeapObject::FromAddress(object_addr);
2575 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object))); 2575 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
2576 2576
2577 int size = object->Size(); 2577 int size = object->Size();
2578 2578
2579 // This should never fail as we are in always allocate scope. 2579 MaybeObject* target = space->AllocateRaw(size);
2580 Object* target = space->AllocateRaw(size)->ToObjectUnchecked(); 2580 if (target->IsFailure()) {
2581 // OS refused to give us memory.
2582 V8::FatalProcessOutOfMemory("Evacuation");
2583 return;
2584 }
2581 2585
2582 MigrateObject(HeapObject::cast(target)->address(), 2586 Object* target_object = target->ToObjectUnchecked();
2587
2588 MigrateObject(HeapObject::cast(target_object)->address(),
2583 object_addr, 2589 object_addr,
2584 size, 2590 size,
2585 space->identity()); 2591 space->identity());
2586 ASSERT(object->map_word().IsForwardingAddress()); 2592 ASSERT(object->map_word().IsForwardingAddress());
2587 } 2593 }
2588 2594
2589 // Clear marking bits for current cell. 2595 // Clear marking bits for current cell.
2590 cells[cell_index] = 0; 2596 cells[cell_index] = 0;
2591 } 2597 }
2592 } 2598 }
2593 2599
2594 2600
2595 void MarkCompactCollector::EvacuatePages() { 2601 void MarkCompactCollector::EvacuatePages() {
2596 int npages = evacuation_candidates_.length(); 2602 int npages = evacuation_candidates_.length();
2597 for (int i = 0; i < npages; i++) { 2603 for (int i = 0; i < npages; i++) {
2598 Page* p = evacuation_candidates_[i]; 2604 Page* p = evacuation_candidates_[i];
2599 ASSERT(p->IsEvacuationCandidate() || 2605 ASSERT(p->IsEvacuationCandidate() ||
2600 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 2606 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
2601 if (p->IsEvacuationCandidate()) { 2607 if (p->IsEvacuationCandidate()) {
2602 EvacuateLiveObjectsFromPage(p); 2608 // During compaction we might have to request a new page.
2609 // Check that space still have room for that.
2610 if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
2611 EvacuateLiveObjectsFromPage(p);
2612 } else {
2613 // Without room for expansion evacuation is not guaranteed to succeed.
2614 // Pessimistically abandon unevacuated pages.
2615 for (int j = i; j < npages; j++) {
2616 evacuation_candidates_[j]->ClearEvacuationCandidate();
2617 evacuation_candidates_[j]->SetFlag(Page::RESCAN_ON_EVACUATION);
2618 }
2619 return;
2620 }
2603 } 2621 }
2604 } 2622 }
2605 } 2623 }
2606 2624
2607 2625
2608 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { 2626 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
2609 public: 2627 public:
2610 virtual Object* RetainAs(Object* object) { 2628 virtual Object* RetainAs(Object* object) {
2611 if (object->IsHeapObject()) { 2629 if (object->IsHeapObject()) {
2612 HeapObject* heap_object = HeapObject::cast(object); 2630 HeapObject* heap_object = HeapObject::cast(object);
(...skipping 187 matching lines...) Expand 10 before | Expand all | Expand 10 after
2800 SlotsBuffer::SizeOfChain(p->slots_buffer())); 2818 SlotsBuffer::SizeOfChain(p->slots_buffer()));
2801 } 2819 }
2802 } else { 2820 } else {
2803 if (FLAG_gc_verbose) { 2821 if (FLAG_gc_verbose) {
2804 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", 2822 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
2805 reinterpret_cast<intptr_t>(p)); 2823 reinterpret_cast<intptr_t>(p));
2806 } 2824 }
2807 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 2825 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
2808 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); 2826 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
2809 2827
2810 SweepPrecisely(space, 2828 if (space->identity() == OLD_DATA_SPACE) {
2811 p, 2829 SweepConservatively(space, p);
2812 SWEEP_AND_VISIT_LIVE_OBJECTS, 2830 } else {
2813 &updating_visitor); 2831 SweepPrecisely(space,
2832 p,
2833 SWEEP_AND_VISIT_LIVE_OBJECTS,
2834 &updating_visitor);
2835 }
2814 } 2836 }
2815 } 2837 }
2816 2838
2817 // Update pointers from cells. 2839 // Update pointers from cells.
2818 HeapObjectIterator cell_iterator(heap_->cell_space()); 2840 HeapObjectIterator cell_iterator(heap_->cell_space());
2819 for (HeapObject* cell = cell_iterator.Next(); 2841 for (HeapObject* cell = cell_iterator.Next();
2820 cell != NULL; 2842 cell != NULL;
2821 cell = cell_iterator.Next()) { 2843 cell = cell_iterator.Next()) {
2822 if (cell->IsJSGlobalPropertyCell()) { 2844 if (cell->IsJSGlobalPropertyCell()) {
2823 Address value_address = 2845 Address value_address =
(...skipping 695 matching lines...) Expand 10 before | Expand all | Expand 10 after
3519 while (buffer != NULL) { 3541 while (buffer != NULL) {
3520 SlotsBuffer* next_buffer = buffer->next(); 3542 SlotsBuffer* next_buffer = buffer->next();
3521 DeallocateBuffer(buffer); 3543 DeallocateBuffer(buffer);
3522 buffer = next_buffer; 3544 buffer = next_buffer;
3523 } 3545 }
3524 *buffer_address = NULL; 3546 *buffer_address = NULL;
3525 } 3547 }
3526 3548
3527 3549
3528 } } // namespace v8::internal 3550 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « no previous file | src/spaces.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698