Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(66)

Side by Side Diff: src/heap/heap-inl.h

Issue 1314863003: [heap] More flag cleanup. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Added more comments Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/heap.cc ('k') | src/heap/memory-reducer.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_HEAP_INL_H_ 5 #ifndef V8_HEAP_HEAP_INL_H_
6 #define V8_HEAP_HEAP_INL_H_ 6 #define V8_HEAP_HEAP_INL_H_
7 7
8 #include <cmath> 8 #include <cmath>
9 9
10 #include "src/base/platform/platform.h" 10 #include "src/base/platform/platform.h"
(...skipping 484 matching lines...) Expand 10 before | Expand all | Expand 10 after
495 memento_address + HeapObject::kHeaderSize <= top || 495 memento_address + HeapObject::kHeaderSize <= top ||
496 !NewSpacePage::OnSamePage(memento_address, top - 1)); 496 !NewSpacePage::OnSamePage(memento_address, top - 1));
497 if (memento_address == top) return NULL; 497 if (memento_address == top) return NULL;
498 498
499 AllocationMemento* memento = AllocationMemento::cast(candidate); 499 AllocationMemento* memento = AllocationMemento::cast(candidate);
500 if (!memento->IsValid()) return NULL; 500 if (!memento->IsValid()) return NULL;
501 return memento; 501 return memento;
502 } 502 }
503 503
504 504
505 bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason,
506 const GCFlags flags,
507 const GCCallbackFlags callback_flags,
508 const GCFlagOverride override) {
509 GCFlagScope flag_scope(this, flags, callback_flags, override);
510 const char* collector_reason = nullptr;
511 const GarbageCollector collector =
512 SelectGarbageCollector(space, &collector_reason);
513 return CollectGarbage(collector, gc_reason, collector_reason);
514 }
515
516
517 bool Heap::CollectGarbageNewSpace(const char* gc_reason) {
518 return CollectGarbage(NEW_SPACE, gc_reason, kNoGCFlags, kNoGCCallbackFlags,
519 kDontOverride);
520 }
521
522
505 void Heap::UpdateAllocationSiteFeedback(HeapObject* object, 523 void Heap::UpdateAllocationSiteFeedback(HeapObject* object,
506 ScratchpadSlotMode mode) { 524 ScratchpadSlotMode mode) {
507 Heap* heap = object->GetHeap(); 525 Heap* heap = object->GetHeap();
508 DCHECK(heap->InFromSpace(object)); 526 DCHECK(heap->InFromSpace(object));
509 527
510 if (!FLAG_allocation_site_pretenuring || 528 if (!FLAG_allocation_site_pretenuring ||
511 !AllocationSite::CanTrack(object->map()->instance_type())) 529 !AllocationSite::CanTrack(object->map()->instance_type()))
512 return; 530 return;
513 531
514 AllocationMemento* memento = heap->FindAllocationMemento(object); 532 AllocationMemento* memento = heap->FindAllocationMemento(object);
(...skipping 25 matching lines...) Expand all
540 558
541 UpdateAllocationSiteFeedback(object, IGNORE_SCRATCHPAD_SLOT); 559 UpdateAllocationSiteFeedback(object, IGNORE_SCRATCHPAD_SLOT);
542 560
543 // AllocationMementos are unrooted and shouldn't survive a scavenge 561 // AllocationMementos are unrooted and shouldn't survive a scavenge
544 DCHECK(object->map() != object->GetHeap()->allocation_memento_map()); 562 DCHECK(object->map() != object->GetHeap()->allocation_memento_map());
545 // Call the slow part of scavenge object. 563 // Call the slow part of scavenge object.
546 return ScavengeObjectSlow(p, object); 564 return ScavengeObjectSlow(p, object);
547 } 565 }
548 566
549 567
550 bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason,
551 const v8::GCCallbackFlags callbackFlags) {
552 const char* collector_reason = NULL;
553 GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
554 return CollectGarbage(collector, gc_reason, collector_reason, callbackFlags);
555 }
556
557
558 Isolate* Heap::isolate() { 568 Isolate* Heap::isolate() {
559 return reinterpret_cast<Isolate*>( 569 return reinterpret_cast<Isolate*>(
560 reinterpret_cast<intptr_t>(this) - 570 reinterpret_cast<intptr_t>(this) -
561 reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16); 571 reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16);
562 } 572 }
563 573
564 574
565 // Calls the FUNCTION_CALL function and retries it up to three times 575 // Calls the FUNCTION_CALL function and retries it up to three times
566 // to guarantee that any allocations performed during the call will 576 // to guarantee that any allocations performed during the call will
567 // succeed if there's enough memory. 577 // succeed if there's enough memory.
568 578
569 // Warning: Do not use the identifiers __object__, __maybe_object__ or 579 // Warning: Do not use the identifiers __object__, __maybe_object__ or
570 // __scope__ in a call to this macro. 580 // __scope__ in a call to this macro.
571 581
572 #define RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \ 582 #define RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
573 if (__allocation__.To(&__object__)) { \ 583 if (__allocation__.To(&__object__)) { \
574 DCHECK(__object__ != (ISOLATE)->heap()->exception()); \ 584 DCHECK(__object__ != (ISOLATE)->heap()->exception()); \
575 RETURN_VALUE; \ 585 RETURN_VALUE; \
576 } 586 }
577 587
578 #define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \ 588 #define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \
579 do { \ 589 do { \
580 AllocationResult __allocation__ = FUNCTION_CALL; \ 590 AllocationResult __allocation__ = FUNCTION_CALL; \
581 Object* __object__ = NULL; \ 591 Object* __object__ = NULL; \
582 RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \ 592 RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
583 /* Two GCs before panicking. In newspace will almost always succeed. */ \ 593 /* Two GCs before panicking. In newspace will almost always succeed. */ \
584 for (int __i__ = 0; __i__ < 2; __i__++) { \ 594 for (int __i__ = 0; __i__ < 2; __i__++) { \
585 (ISOLATE)->heap()->CollectGarbage(__allocation__.RetrySpace(), \ 595 (ISOLATE)->heap()->CollectGarbage(__allocation__.RetrySpace(), \
586 "allocation failure"); \ 596 "allocation failure", \
587 __allocation__ = FUNCTION_CALL; \ 597 Heap::kNoGCFlags, kNoGCCallbackFlags); \
588 RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \ 598 __allocation__ = FUNCTION_CALL; \
589 } \ 599 RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
590 (ISOLATE)->counters()->gc_last_resort_from_handles()->Increment(); \ 600 } \
591 (ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc"); \ 601 (ISOLATE)->counters()->gc_last_resort_from_handles()->Increment(); \
592 { \ 602 (ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc"); \
593 AlwaysAllocateScope __scope__(ISOLATE); \ 603 { \
594 __allocation__ = FUNCTION_CALL; \ 604 AlwaysAllocateScope __scope__(ISOLATE); \
595 } \ 605 __allocation__ = FUNCTION_CALL; \
596 RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \ 606 } \
597 /* TODO(1181417): Fix this. */ \ 607 RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
598 v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true); \ 608 /* TODO(1181417): Fix this. */ \
599 RETURN_EMPTY; \ 609 v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true); \
610 RETURN_EMPTY; \
600 } while (false) 611 } while (false)
601 612
602 #define CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, RETURN_VALUE, \ 613 #define CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, RETURN_VALUE, \
603 RETURN_EMPTY) \ 614 RETURN_EMPTY) \
604 CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) 615 CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)
605 616
606 #define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \ 617 #define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \
607 CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, \ 618 CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, \
608 return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \ 619 return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \
609 return Handle<TYPE>()) 620 return Handle<TYPE>())
(...skipping 174 matching lines...) Expand 10 before | Expand all | Expand 10 after
784 795
785 void VerifySmisVisitor::VisitPointers(Object** start, Object** end) { 796 void VerifySmisVisitor::VisitPointers(Object** start, Object** end) {
786 for (Object** current = start; current < end; current++) { 797 for (Object** current = start; current < end; current++) {
787 CHECK((*current)->IsSmi()); 798 CHECK((*current)->IsSmi());
788 } 799 }
789 } 800 }
790 } 801 }
791 } // namespace v8::internal 802 } // namespace v8::internal
792 803
793 #endif // V8_HEAP_HEAP_INL_H_ 804 #endif // V8_HEAP_HEAP_INL_H_
OLDNEW
« no previous file with comments | « src/heap/heap.cc ('k') | src/heap/memory-reducer.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698