Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(158)

Side by Side Diff: src/heap/heap-inl.h

Issue 1314863003: [heap] More flag cleanup. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Remove unnecessary parameter. Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_HEAP_INL_H_ 5 #ifndef V8_HEAP_HEAP_INL_H_
6 #define V8_HEAP_HEAP_INL_H_ 6 #define V8_HEAP_HEAP_INL_H_
7 7
8 #include <cmath> 8 #include <cmath>
9 9
10 #include "src/base/platform/platform.h" 10 #include "src/base/platform/platform.h"
(...skipping 479 matching lines...) Expand 10 before | Expand all | Expand 10 after
490 memento_address + HeapObject::kHeaderSize <= top || 490 memento_address + HeapObject::kHeaderSize <= top ||
491 !NewSpacePage::OnSamePage(memento_address, top - 1)); 491 !NewSpacePage::OnSamePage(memento_address, top - 1));
492 if (memento_address == top) return NULL; 492 if (memento_address == top) return NULL;
493 493
494 AllocationMemento* memento = AllocationMemento::cast(candidate); 494 AllocationMemento* memento = AllocationMemento::cast(candidate);
495 if (!memento->IsValid()) return NULL; 495 if (!memento->IsValid()) return NULL;
496 return memento; 496 return memento;
497 } 497 }
498 498
499 499
500 bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason,
501 const GCFlags flags,
502 const GCCallbackFlags callback_flags,
503 const GCFlagOverride override) {
504 GCFlagScope flag_scope(this, flags, callback_flags, override);
505 const char* collector_reason = nullptr;
506 const GarbageCollector collector =
507 SelectGarbageCollector(space, &collector_reason);
508 return CollectGarbage(collector, gc_reason, collector_reason);
509 }
510
511
512 bool Heap::CollectGarbageNewSpace(const char* gc_reason) {
513 return CollectGarbage(NEW_SPACE, gc_reason, kNoGCFlags, kNoGCCallbackFlags,
514 kDontOverride);
515 }
516
517
500 void Heap::UpdateAllocationSiteFeedback(HeapObject* object, 518 void Heap::UpdateAllocationSiteFeedback(HeapObject* object,
501 ScratchpadSlotMode mode) { 519 ScratchpadSlotMode mode) {
502 Heap* heap = object->GetHeap(); 520 Heap* heap = object->GetHeap();
503 DCHECK(heap->InFromSpace(object)); 521 DCHECK(heap->InFromSpace(object));
504 522
505 if (!FLAG_allocation_site_pretenuring || 523 if (!FLAG_allocation_site_pretenuring ||
506 !AllocationSite::CanTrack(object->map()->instance_type())) 524 !AllocationSite::CanTrack(object->map()->instance_type()))
507 return; 525 return;
508 526
509 AllocationMemento* memento = heap->FindAllocationMemento(object); 527 AllocationMemento* memento = heap->FindAllocationMemento(object);
(...skipping 25 matching lines...) Expand all
535 553
536 UpdateAllocationSiteFeedback(object, IGNORE_SCRATCHPAD_SLOT); 554 UpdateAllocationSiteFeedback(object, IGNORE_SCRATCHPAD_SLOT);
537 555
538 // AllocationMementos are unrooted and shouldn't survive a scavenge 556 // AllocationMementos are unrooted and shouldn't survive a scavenge
539 DCHECK(object->map() != object->GetHeap()->allocation_memento_map()); 557 DCHECK(object->map() != object->GetHeap()->allocation_memento_map());
540 // Call the slow part of scavenge object. 558 // Call the slow part of scavenge object.
541 return ScavengeObjectSlow(p, object); 559 return ScavengeObjectSlow(p, object);
542 } 560 }
543 561
544 562
545 bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason,
546 const v8::GCCallbackFlags callbackFlags) {
547 const char* collector_reason = NULL;
548 GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
549 return CollectGarbage(collector, gc_reason, collector_reason, callbackFlags);
550 }
551
552
553 Isolate* Heap::isolate() { 563 Isolate* Heap::isolate() {
554 return reinterpret_cast<Isolate*>( 564 return reinterpret_cast<Isolate*>(
555 reinterpret_cast<intptr_t>(this) - 565 reinterpret_cast<intptr_t>(this) -
556 reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16); 566 reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16);
557 } 567 }
558 568
559 569
560 // Calls the FUNCTION_CALL function and retries it up to three times 570 // Calls the FUNCTION_CALL function and retries it up to three times
561 // to guarantee that any allocations performed during the call will 571 // to guarantee that any allocations performed during the call will
562 // succeed if there's enough memory. 572 // succeed if there's enough memory.
563 573
564 // Warning: Do not use the identifiers __object__, __maybe_object__ or 574 // Warning: Do not use the identifiers __object__, __maybe_object__ or
565 // __scope__ in a call to this macro. 575 // __scope__ in a call to this macro.
566 576
567 #define RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \ 577 #define RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
568 if (__allocation__.To(&__object__)) { \ 578 if (__allocation__.To(&__object__)) { \
569 DCHECK(__object__ != (ISOLATE)->heap()->exception()); \ 579 DCHECK(__object__ != (ISOLATE)->heap()->exception()); \
570 RETURN_VALUE; \ 580 RETURN_VALUE; \
571 } 581 }
572 582
573 #define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \ 583 #define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \
574 do { \ 584 do { \
575 AllocationResult __allocation__ = FUNCTION_CALL; \ 585 AllocationResult __allocation__ = FUNCTION_CALL; \
576 Object* __object__ = NULL; \ 586 Object* __object__ = NULL; \
577 RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \ 587 RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
578 /* Two GCs before panicking. In newspace will almost always succeed. */ \ 588 /* Two GCs before panicking. In newspace will almost always succeed. */ \
579 for (int __i__ = 0; __i__ < 2; __i__++) { \ 589 for (int __i__ = 0; __i__ < 2; __i__++) { \
580 (ISOLATE)->heap()->CollectGarbage(__allocation__.RetrySpace(), \ 590 (ISOLATE)->heap()->CollectGarbage(__allocation__.RetrySpace(), \
581 "allocation failure"); \ 591 "allocation failure", \
582 __allocation__ = FUNCTION_CALL; \ 592 Heap::kNoGCFlags, kNoGCCallbackFlags); \
583 RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \ 593 __allocation__ = FUNCTION_CALL; \
584 } \ 594 RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
585 (ISOLATE)->counters()->gc_last_resort_from_handles()->Increment(); \ 595 } \
586 (ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc"); \ 596 (ISOLATE)->counters()->gc_last_resort_from_handles()->Increment(); \
587 { \ 597 (ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc"); \
588 AlwaysAllocateScope __scope__(ISOLATE); \ 598 { \
589 __allocation__ = FUNCTION_CALL; \ 599 AlwaysAllocateScope __scope__(ISOLATE); \
590 } \ 600 __allocation__ = FUNCTION_CALL; \
591 RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \ 601 } \
592 /* TODO(1181417): Fix this. */ \ 602 RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
593 v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true); \ 603 /* TODO(1181417): Fix this. */ \
594 RETURN_EMPTY; \ 604 v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true); \
605 RETURN_EMPTY; \
595 } while (false) 606 } while (false)
596 607
597 #define CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, RETURN_VALUE, \ 608 #define CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, RETURN_VALUE, \
598 RETURN_EMPTY) \ 609 RETURN_EMPTY) \
599 CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) 610 CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)
600 611
601 #define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \ 612 #define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \
602 CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, \ 613 CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, \
603 return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \ 614 return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \
604 return Handle<TYPE>()) 615 return Handle<TYPE>())
(...skipping 174 matching lines...) Expand 10 before | Expand all | Expand 10 after
779 790
780 void VerifySmisVisitor::VisitPointers(Object** start, Object** end) { 791 void VerifySmisVisitor::VisitPointers(Object** start, Object** end) {
781 for (Object** current = start; current < end; current++) { 792 for (Object** current = start; current < end; current++) {
782 CHECK((*current)->IsSmi()); 793 CHECK((*current)->IsSmi());
783 } 794 }
784 } 795 }
785 } 796 }
786 } // namespace v8::internal 797 } // namespace v8::internal
787 798
788 #endif // V8_HEAP_HEAP_INL_H_ 799 #endif // V8_HEAP_HEAP_INL_H_
OLDNEW
« src/heap/heap.h ('K') | « src/heap/heap.cc ('k') | src/heap/memory-reducer.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698