Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(52)

Side by Side Diff: src/heap/heap-inl.h

Issue 1303393004: Revert of [heap] More flag cleanup. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/heap.cc ('k') | src/heap/memory-reducer.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_HEAP_INL_H_ 5 #ifndef V8_HEAP_HEAP_INL_H_
6 #define V8_HEAP_HEAP_INL_H_ 6 #define V8_HEAP_HEAP_INL_H_
7 7
8 #include <cmath> 8 #include <cmath>
9 9
10 #include "src/base/platform/platform.h" 10 #include "src/base/platform/platform.h"
(...skipping 484 matching lines...) Expand 10 before | Expand all | Expand 10 after
495 memento_address + HeapObject::kHeaderSize <= top || 495 memento_address + HeapObject::kHeaderSize <= top ||
496 !NewSpacePage::OnSamePage(memento_address, top - 1)); 496 !NewSpacePage::OnSamePage(memento_address, top - 1));
497 if (memento_address == top) return NULL; 497 if (memento_address == top) return NULL;
498 498
499 AllocationMemento* memento = AllocationMemento::cast(candidate); 499 AllocationMemento* memento = AllocationMemento::cast(candidate);
500 if (!memento->IsValid()) return NULL; 500 if (!memento->IsValid()) return NULL;
501 return memento; 501 return memento;
502 } 502 }
503 503
504 504
505 bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason,
506 const GCFlags flags,
507 const GCCallbackFlags callback_flags,
508 const GCFlagOverride override) {
509 GCFlagScope flag_scope(this, flags, callback_flags, override);
510 const char* collector_reason = nullptr;
511 const GarbageCollector collector =
512 SelectGarbageCollector(space, &collector_reason);
513 return CollectGarbage(collector, gc_reason, collector_reason);
514 }
515
516
517 bool Heap::CollectGarbageNewSpace(const char* gc_reason) {
518 return CollectGarbage(NEW_SPACE, gc_reason, kNoGCFlags, kNoGCCallbackFlags,
519 kDontOverride);
520 }
521
522
523 void Heap::UpdateAllocationSiteFeedback(HeapObject* object, 505 void Heap::UpdateAllocationSiteFeedback(HeapObject* object,
524 ScratchpadSlotMode mode) { 506 ScratchpadSlotMode mode) {
525 Heap* heap = object->GetHeap(); 507 Heap* heap = object->GetHeap();
526 DCHECK(heap->InFromSpace(object)); 508 DCHECK(heap->InFromSpace(object));
527 509
528 if (!FLAG_allocation_site_pretenuring || 510 if (!FLAG_allocation_site_pretenuring ||
529 !AllocationSite::CanTrack(object->map()->instance_type())) 511 !AllocationSite::CanTrack(object->map()->instance_type()))
530 return; 512 return;
531 513
532 AllocationMemento* memento = heap->FindAllocationMemento(object); 514 AllocationMemento* memento = heap->FindAllocationMemento(object);
(...skipping 25 matching lines...) Expand all
558 540
559 UpdateAllocationSiteFeedback(object, IGNORE_SCRATCHPAD_SLOT); 541 UpdateAllocationSiteFeedback(object, IGNORE_SCRATCHPAD_SLOT);
560 542
561 // AllocationMementos are unrooted and shouldn't survive a scavenge 543 // AllocationMementos are unrooted and shouldn't survive a scavenge
562 DCHECK(object->map() != object->GetHeap()->allocation_memento_map()); 544 DCHECK(object->map() != object->GetHeap()->allocation_memento_map());
563 // Call the slow part of scavenge object. 545 // Call the slow part of scavenge object.
564 return ScavengeObjectSlow(p, object); 546 return ScavengeObjectSlow(p, object);
565 } 547 }
566 548
567 549
550 bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason,
551 const v8::GCCallbackFlags callbackFlags) {
552 const char* collector_reason = NULL;
553 GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
554 return CollectGarbage(collector, gc_reason, collector_reason, callbackFlags);
555 }
556
557
568 Isolate* Heap::isolate() { 558 Isolate* Heap::isolate() {
569 return reinterpret_cast<Isolate*>( 559 return reinterpret_cast<Isolate*>(
570 reinterpret_cast<intptr_t>(this) - 560 reinterpret_cast<intptr_t>(this) -
571 reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16); 561 reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16);
572 } 562 }
573 563
574 564
575 // Calls the FUNCTION_CALL function and retries it up to three times 565 // Calls the FUNCTION_CALL function and retries it up to three times
576 // to guarantee that any allocations performed during the call will 566 // to guarantee that any allocations performed during the call will
577 // succeed if there's enough memory. 567 // succeed if there's enough memory.
578 568
579 // Warning: Do not use the identifiers __object__, __maybe_object__ or 569 // Warning: Do not use the identifiers __object__, __maybe_object__ or
580 // __scope__ in a call to this macro. 570 // __scope__ in a call to this macro.
581 571
582 #define RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \ 572 #define RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
583 if (__allocation__.To(&__object__)) { \ 573 if (__allocation__.To(&__object__)) { \
584 DCHECK(__object__ != (ISOLATE)->heap()->exception()); \ 574 DCHECK(__object__ != (ISOLATE)->heap()->exception()); \
585 RETURN_VALUE; \ 575 RETURN_VALUE; \
586 } 576 }
587 577
588 #define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \ 578 #define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \
589 do { \ 579 do { \
590 AllocationResult __allocation__ = FUNCTION_CALL; \ 580 AllocationResult __allocation__ = FUNCTION_CALL; \
591 Object* __object__ = NULL; \ 581 Object* __object__ = NULL; \
592 RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \ 582 RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
593 /* Two GCs before panicking. In newspace will almost always succeed. */ \ 583 /* Two GCs before panicking. In newspace will almost always succeed. */ \
594 for (int __i__ = 0; __i__ < 2; __i__++) { \ 584 for (int __i__ = 0; __i__ < 2; __i__++) { \
595 (ISOLATE)->heap()->CollectGarbage(__allocation__.RetrySpace(), \ 585 (ISOLATE)->heap()->CollectGarbage(__allocation__.RetrySpace(), \
596 "allocation failure", \ 586 "allocation failure"); \
597 Heap::kNoGCFlags, kNoGCCallbackFlags); \ 587 __allocation__ = FUNCTION_CALL; \
598 __allocation__ = FUNCTION_CALL; \ 588 RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
599 RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \ 589 } \
600 } \ 590 (ISOLATE)->counters()->gc_last_resort_from_handles()->Increment(); \
601 (ISOLATE)->counters()->gc_last_resort_from_handles()->Increment(); \ 591 (ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc"); \
602 (ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc"); \ 592 { \
603 { \ 593 AlwaysAllocateScope __scope__(ISOLATE); \
604 AlwaysAllocateScope __scope__(ISOLATE); \ 594 __allocation__ = FUNCTION_CALL; \
605 __allocation__ = FUNCTION_CALL; \ 595 } \
606 } \ 596 RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
607 RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \ 597 /* TODO(1181417): Fix this. */ \
608 /* TODO(1181417): Fix this. */ \ 598 v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true); \
609 v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true); \ 599 RETURN_EMPTY; \
610 RETURN_EMPTY; \
611 } while (false) 600 } while (false)
612 601
613 #define CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, RETURN_VALUE, \ 602 #define CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, RETURN_VALUE, \
614 RETURN_EMPTY) \ 603 RETURN_EMPTY) \
615 CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) 604 CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)
616 605
617 #define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \ 606 #define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \
618 CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, \ 607 CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, \
619 return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \ 608 return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \
620 return Handle<TYPE>()) 609 return Handle<TYPE>())
(...skipping 174 matching lines...) Expand 10 before | Expand all | Expand 10 after
795 784
796 void VerifySmisVisitor::VisitPointers(Object** start, Object** end) { 785 void VerifySmisVisitor::VisitPointers(Object** start, Object** end) {
797 for (Object** current = start; current < end; current++) { 786 for (Object** current = start; current < end; current++) {
798 CHECK((*current)->IsSmi()); 787 CHECK((*current)->IsSmi());
799 } 788 }
800 } 789 }
801 } 790 }
802 } // namespace v8::internal 791 } // namespace v8::internal
803 792
804 #endif // V8_HEAP_HEAP_INL_H_ 793 #endif // V8_HEAP_HEAP_INL_H_
OLDNEW
« no previous file with comments | « src/heap/heap.cc ('k') | src/heap/memory-reducer.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698