Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(187)

Side by Side Diff: src/heap.cc

Issue 8256012: Remove some asserts to speed up debug mode. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 9 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | src/heap-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1425 matching lines...) Expand 10 before | Expand all | Expand 10 after
1436 } 1436 }
1437 1437
1438 return target; 1438 return target;
1439 } 1439 }
1440 1440
1441 template<ObjectContents object_contents, SizeRestriction size_restriction> 1441 template<ObjectContents object_contents, SizeRestriction size_restriction>
1442 static inline void EvacuateObject(Map* map, 1442 static inline void EvacuateObject(Map* map,
1443 HeapObject** slot, 1443 HeapObject** slot,
1444 HeapObject* object, 1444 HeapObject* object,
1445 int object_size) { 1445 int object_size) {
1446 ASSERT((size_restriction != SMALL) || 1446 SLOW_ASSERT((size_restriction != SMALL) ||
1447 (object_size <= Page::kMaxHeapObjectSize)); 1447 (object_size <= Page::kMaxHeapObjectSize));
1448 ASSERT(object->Size() == object_size); 1448 SLOW_ASSERT(object->Size() == object_size);
1449 1449
1450 Heap* heap = map->GetHeap(); 1450 Heap* heap = map->GetHeap();
1451 if (heap->ShouldBePromoted(object->address(), object_size)) { 1451 if (heap->ShouldBePromoted(object->address(), object_size)) {
1452 MaybeObject* maybe_result; 1452 MaybeObject* maybe_result;
1453 1453
1454 if ((size_restriction != SMALL) && 1454 if ((size_restriction != SMALL) &&
1455 (object_size > Page::kMaxHeapObjectSize)) { 1455 (object_size > Page::kMaxHeapObjectSize)) {
1456 maybe_result = heap->lo_space()->AllocateRaw(object_size, 1456 maybe_result = heap->lo_space()->AllocateRaw(object_size,
1457 NOT_EXECUTABLE); 1457 NOT_EXECUTABLE);
1458 } else { 1458 } else {
(...skipping 212 matching lines...) Expand 10 before | Expand all | Expand 10 after
1671 scavenging_visitors_table_.Register( 1671 scavenging_visitors_table_.Register(
1672 StaticVisitorBase::kVisitShortcutCandidate, 1672 StaticVisitorBase::kVisitShortcutCandidate,
1673 scavenging_visitors_table_.GetVisitorById( 1673 scavenging_visitors_table_.GetVisitorById(
1674 StaticVisitorBase::kVisitConsString)); 1674 StaticVisitorBase::kVisitConsString));
1675 } 1675 }
1676 } 1676 }
1677 } 1677 }
1678 1678
1679 1679
1680 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) { 1680 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
1681 ASSERT(HEAP->InFromSpace(object)); 1681 SLOW_ASSERT(HEAP->InFromSpace(object));
1682 MapWord first_word = object->map_word(); 1682 MapWord first_word = object->map_word();
1683 ASSERT(!first_word.IsForwardingAddress()); 1683 SLOW_ASSERT(!first_word.IsForwardingAddress());
1684 Map* map = first_word.ToMap(); 1684 Map* map = first_word.ToMap();
1685 map->GetHeap()->DoScavengeObject(map, p, object); 1685 map->GetHeap()->DoScavengeObject(map, p, object);
1686 } 1686 }
1687 1687
1688 1688
1689 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type, 1689 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
1690 int instance_size) { 1690 int instance_size) {
1691 Object* result; 1691 Object* result;
1692 { MaybeObject* maybe_result = AllocateRawMap(); 1692 { MaybeObject* maybe_result = AllocateRawMap();
1693 if (!maybe_result->ToObject(&result)) return maybe_result; 1693 if (!maybe_result->ToObject(&result)) return maybe_result;
(...skipping 1987 matching lines...) Expand 10 before | Expand all | Expand 10 after
3681 // Make sure result is a global object with properties in dictionary. 3681 // Make sure result is a global object with properties in dictionary.
3682 ASSERT(global->IsGlobalObject()); 3682 ASSERT(global->IsGlobalObject());
3683 ASSERT(!global->HasFastProperties()); 3683 ASSERT(!global->HasFastProperties());
3684 return global; 3684 return global;
3685 } 3685 }
3686 3686
3687 3687
3688 MaybeObject* Heap::CopyJSObject(JSObject* source) { 3688 MaybeObject* Heap::CopyJSObject(JSObject* source) {
3689 // Never used to copy functions. If functions need to be copied we 3689 // Never used to copy functions. If functions need to be copied we
3690 // have to be careful to clear the literals array. 3690 // have to be careful to clear the literals array.
3691 ASSERT(!source->IsJSFunction()); 3691 SLOW_ASSERT(!source->IsJSFunction());
3692 3692
3693 // Make the clone. 3693 // Make the clone.
3694 Map* map = source->map(); 3694 Map* map = source->map();
3695 int object_size = map->instance_size(); 3695 int object_size = map->instance_size();
3696 Object* clone; 3696 Object* clone;
3697 3697
3698 // If we're forced to always allocate, we use the general allocation 3698 // If we're forced to always allocate, we use the general allocation
3699 // functions which may leave us with an object in old space. 3699 // functions which may leave us with an object in old space.
3700 if (always_allocate()) { 3700 if (always_allocate()) {
3701 { MaybeObject* maybe_clone = 3701 { MaybeObject* maybe_clone =
3702 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE); 3702 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3703 if (!maybe_clone->ToObject(&clone)) return maybe_clone; 3703 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3704 } 3704 }
3705 Address clone_address = HeapObject::cast(clone)->address(); 3705 Address clone_address = HeapObject::cast(clone)->address();
3706 CopyBlock(clone_address, 3706 CopyBlock(clone_address,
3707 source->address(), 3707 source->address(),
3708 object_size); 3708 object_size);
3709 // Update write barrier for all fields that lie beyond the header. 3709 // Update write barrier for all fields that lie beyond the header.
3710 RecordWrites(clone_address, 3710 RecordWrites(clone_address,
3711 JSObject::kHeaderSize, 3711 JSObject::kHeaderSize,
3712 (object_size - JSObject::kHeaderSize) / kPointerSize); 3712 (object_size - JSObject::kHeaderSize) / kPointerSize);
3713 } else { 3713 } else {
3714 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size); 3714 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
3715 if (!maybe_clone->ToObject(&clone)) return maybe_clone; 3715 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3716 } 3716 }
3717 ASSERT(InNewSpace(clone)); 3717 SLOW_ASSERT(InNewSpace(clone));
3718 // Since we know the clone is allocated in new space, we can copy 3718 // Since we know the clone is allocated in new space, we can copy
3719 // the contents without worrying about updating the write barrier. 3719 // the contents without worrying about updating the write barrier.
3720 CopyBlock(HeapObject::cast(clone)->address(), 3720 CopyBlock(HeapObject::cast(clone)->address(),
3721 source->address(), 3721 source->address(),
3722 object_size); 3722 object_size);
3723 } 3723 }
3724 3724
3725 ASSERT(JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind()); 3725 SLOW_ASSERT(
3726 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
3726 FixedArrayBase* elements = FixedArrayBase::cast(source->elements()); 3727 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
3727 FixedArray* properties = FixedArray::cast(source->properties()); 3728 FixedArray* properties = FixedArray::cast(source->properties());
3728 // Update elements if necessary. 3729 // Update elements if necessary.
3729 if (elements->length() > 0) { 3730 if (elements->length() > 0) {
3730 Object* elem; 3731 Object* elem;
3731 { MaybeObject* maybe_elem; 3732 { MaybeObject* maybe_elem;
3732 if (elements->map() == fixed_cow_array_map()) { 3733 if (elements->map() == fixed_cow_array_map()) {
3733 maybe_elem = FixedArray::cast(elements); 3734 maybe_elem = FixedArray::cast(elements);
3734 } else if (source->HasFastDoubleElements()) { 3735 } else if (source->HasFastDoubleElements()) {
3735 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements)); 3736 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
(...skipping 1059 matching lines...) Expand 10 before | Expand all | Expand 10 after
4795 // If the store buffer becomes overfull we mark pages as being exempt from 4796 // If the store buffer becomes overfull we mark pages as being exempt from
4796 // the store buffer. These pages are scanned to find pointers that point 4797 // the store buffer. These pages are scanned to find pointers that point
4797 // to the new space. In that case we may hit newly promoted objects and 4798 // to the new space. In that case we may hit newly promoted objects and
4798 // fix the pointers before the promotion queue gets to them. Thus the 'if'. 4799 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
4799 if (object->IsHeapObject()) { 4800 if (object->IsHeapObject()) {
4800 if (Heap::InFromSpace(object)) { 4801 if (Heap::InFromSpace(object)) {
4801 callback(reinterpret_cast<HeapObject**>(slot), 4802 callback(reinterpret_cast<HeapObject**>(slot),
4802 HeapObject::cast(object)); 4803 HeapObject::cast(object));
4803 Object* new_object = *slot; 4804 Object* new_object = *slot;
4804 if (InNewSpace(new_object)) { 4805 if (InNewSpace(new_object)) {
4805 ASSERT(Heap::InToSpace(new_object)); 4806 SLOW_ASSERT(Heap::InToSpace(new_object));
4806 ASSERT(new_object->IsHeapObject()); 4807 SLOW_ASSERT(new_object->IsHeapObject());
4807 store_buffer_.EnterDirectlyIntoStoreBuffer( 4808 store_buffer_.EnterDirectlyIntoStoreBuffer(
4808 reinterpret_cast<Address>(slot)); 4809 reinterpret_cast<Address>(slot));
4809 } 4810 }
4810 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object)); 4811 SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
4811 } else if (record_slots && 4812 } else if (record_slots &&
4812 MarkCompactCollector::IsOnEvacuationCandidate(object)) { 4813 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
4813 mark_compact_collector()->RecordSlot(slot, slot, object); 4814 mark_compact_collector()->RecordSlot(slot, slot, object);
4814 } 4815 }
4815 } 4816 }
4816 slot_address += kPointerSize; 4817 slot_address += kPointerSize;
4817 } 4818 }
4818 } 4819 }
4819 4820
4820 4821
(...skipping 1581 matching lines...) Expand 10 before | Expand all | Expand 10 after
6402 isolate_->heap()->store_buffer()->Compact(); 6403 isolate_->heap()->store_buffer()->Compact();
6403 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED); 6404 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6404 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { 6405 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6405 next = chunk->next_chunk(); 6406 next = chunk->next_chunk();
6406 isolate_->memory_allocator()->Free(chunk); 6407 isolate_->memory_allocator()->Free(chunk);
6407 } 6408 }
6408 chunks_queued_for_free_ = NULL; 6409 chunks_queued_for_free_ = NULL;
6409 } 6410 }
6410 6411
6411 } } // namespace v8::internal 6412 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « no previous file | src/heap-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698