Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(196)

Side by Side Diff: src/heap.cc

Issue 139973004: A64: Synchronize with r15814. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/a64
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/heap.h ('k') | src/heap-snapshot-generator.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after
175 if (max_virtual > 0) { 175 if (max_virtual > 0) {
176 if (code_range_size_ > 0) { 176 if (code_range_size_ > 0) {
177 // Reserve no more than 1/8 of the memory for the code range. 177 // Reserve no more than 1/8 of the memory for the code range.
178 code_range_size_ = Min(code_range_size_, max_virtual >> 3); 178 code_range_size_ = Min(code_range_size_, max_virtual >> 3);
179 } 179 }
180 } 180 }
181 181
182 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength); 182 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
183 native_contexts_list_ = NULL; 183 native_contexts_list_ = NULL;
184 array_buffers_list_ = Smi::FromInt(0); 184 array_buffers_list_ = Smi::FromInt(0);
185 allocation_sites_list_ = Smi::FromInt(0);
185 mark_compact_collector_.heap_ = this; 186 mark_compact_collector_.heap_ = this;
186 external_string_table_.heap_ = this; 187 external_string_table_.heap_ = this;
187 // Put a dummy entry in the remembered pages so we can find the list the 188 // Put a dummy entry in the remembered pages so we can find the list the
188 // minidump even if there are no real unmapped pages. 189 // minidump even if there are no real unmapped pages.
189 RememberUnmappedPage(NULL, false); 190 RememberUnmappedPage(NULL, false);
190 191
191 ClearObjectStats(true); 192 ClearObjectStats(true);
192 } 193 }
193 194
194 195
(...skipping 908 matching lines...) Expand 10 before | Expand all | Expand 10 after
1103 1104
1104 FlushNumberStringCache(); 1105 FlushNumberStringCache();
1105 if (FLAG_cleanup_code_caches_at_gc) { 1106 if (FLAG_cleanup_code_caches_at_gc) {
1106 polymorphic_code_cache()->set_cache(undefined_value()); 1107 polymorphic_code_cache()->set_cache(undefined_value());
1107 } 1108 }
1108 1109
1109 ClearNormalizedMapCaches(); 1110 ClearNormalizedMapCaches();
1110 } 1111 }
1111 1112
1112 1113
1113 Object* Heap::FindCodeObject(Address a) {
1114 return isolate()->inner_pointer_to_code_cache()->
1115 GcSafeFindCodeForInnerPointer(a);
1116 }
1117
1118
1119 // Helper class for copying HeapObjects 1114 // Helper class for copying HeapObjects
1120 class ScavengeVisitor: public ObjectVisitor { 1115 class ScavengeVisitor: public ObjectVisitor {
1121 public: 1116 public:
1122 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {} 1117 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1123 1118
1124 void VisitPointer(Object** p) { ScavengePointer(p); } 1119 void VisitPointer(Object** p) { ScavengePointer(p); }
1125 1120
1126 void VisitPointers(Object** start, Object** end) { 1121 void VisitPointers(Object** start, Object** end) {
1127 // Copy all HeapObject pointers in [start, end) 1122 // Copy all HeapObject pointers in [start, end)
1128 for (Object** p = start; p < end; p++) ScavengePointer(p); 1123 for (Object** p = start; p < end; p++) ScavengePointer(p);
(...skipping 528 matching lines...) Expand 10 before | Expand all | Expand 10 after
1657 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) { 1652 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1658 // We don't record weak slots during marking or scavenges. 1653 // We don't record weak slots during marking or scavenges.
1659 // Instead we do it once when we complete mark-compact cycle. 1654 // Instead we do it once when we complete mark-compact cycle.
1660 // Note that write barrier has no effect if we are already in the middle of 1655 // Note that write barrier has no effect if we are already in the middle of
1661 // compacting mark-sweep cycle and we have to record slots manually. 1656 // compacting mark-sweep cycle and we have to record slots manually.
1662 bool record_slots = 1657 bool record_slots =
1663 gc_state() == MARK_COMPACT && 1658 gc_state() == MARK_COMPACT &&
1664 mark_compact_collector()->is_compacting(); 1659 mark_compact_collector()->is_compacting();
1665 ProcessArrayBuffers(retainer, record_slots); 1660 ProcessArrayBuffers(retainer, record_slots);
1666 ProcessNativeContexts(retainer, record_slots); 1661 ProcessNativeContexts(retainer, record_slots);
1662 ProcessAllocationSites(retainer, record_slots);
1667 } 1663 }
1668 1664
1669 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer, 1665 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
1670 bool record_slots) { 1666 bool record_slots) {
1671 Object* head = 1667 Object* head =
1672 VisitWeakList<Context>( 1668 VisitWeakList<Context>(
1673 this, native_contexts_list(), retainer, record_slots); 1669 this, native_contexts_list(), retainer, record_slots);
1674 // Update the head of the list of contexts. 1670 // Update the head of the list of contexts.
1675 native_contexts_list_ = head; 1671 native_contexts_list_ = head;
1676 } 1672 }
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
1750 Object* undefined = undefined_value(); 1746 Object* undefined = undefined_value();
1751 for (Object* o = array_buffers_list(); o != undefined;) { 1747 for (Object* o = array_buffers_list(); o != undefined;) {
1752 JSArrayBuffer* buffer = JSArrayBuffer::cast(o); 1748 JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1753 Runtime::FreeArrayBuffer(isolate(), buffer); 1749 Runtime::FreeArrayBuffer(isolate(), buffer);
1754 o = buffer->weak_next(); 1750 o = buffer->weak_next();
1755 } 1751 }
1756 array_buffers_list_ = undefined; 1752 array_buffers_list_ = undefined;
1757 } 1753 }
1758 1754
1759 1755
1756 template<>
1757 struct WeakListVisitor<AllocationSite> {
1758 static void SetWeakNext(AllocationSite* obj, Object* next) {
1759 obj->set_weak_next(next);
1760 }
1761
1762 static Object* WeakNext(AllocationSite* obj) {
1763 return obj->weak_next();
1764 }
1765
1766 static void VisitLiveObject(Heap* heap,
1767 AllocationSite* array_buffer,
1768 WeakObjectRetainer* retainer,
1769 bool record_slots) {}
1770
1771 static void VisitPhantomObject(Heap* heap, AllocationSite* phantom) {}
1772
1773 static int WeakNextOffset() {
1774 return AllocationSite::kWeakNextOffset;
1775 }
1776 };
1777
1778
1779 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer,
1780 bool record_slots) {
1781 Object* allocation_site_obj =
1782 VisitWeakList<AllocationSite>(this,
1783 allocation_sites_list(),
1784 retainer, record_slots);
1785 set_allocation_sites_list(allocation_site_obj);
1786 }
1787
1788
1760 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) { 1789 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1761 DisallowHeapAllocation no_allocation; 1790 DisallowHeapAllocation no_allocation;
1762 1791
1763 // Both the external string table and the string table may contain 1792 // Both the external string table and the string table may contain
1764 // external strings, but neither lists them exhaustively, nor is the 1793 // external strings, but neither lists them exhaustively, nor is the
1765 // intersection set empty. Therefore we iterate over the external string 1794 // intersection set empty. Therefore we iterate over the external string
1766 // table first, ignoring internalized strings, and then over the 1795 // table first, ignoring internalized strings, and then over the
1767 // internalized string table. 1796 // internalized string table.
1768 1797
1769 class ExternalStringTableVisitorAdapter : public ObjectVisitor { 1798 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after
1925 template VisitSpecialized<Symbol::kSize>); 1954 template VisitSpecialized<Symbol::kSize>);
1926 1955
1927 table_.Register(kVisitSharedFunctionInfo, 1956 table_.Register(kVisitSharedFunctionInfo,
1928 &ObjectEvacuationStrategy<POINTER_OBJECT>:: 1957 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1929 template VisitSpecialized<SharedFunctionInfo::kSize>); 1958 template VisitSpecialized<SharedFunctionInfo::kSize>);
1930 1959
1931 table_.Register(kVisitJSWeakMap, 1960 table_.Register(kVisitJSWeakMap,
1932 &ObjectEvacuationStrategy<POINTER_OBJECT>:: 1961 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1933 Visit); 1962 Visit);
1934 1963
1964 table_.Register(kVisitJSWeakSet,
1965 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1966 Visit);
1967
1935 table_.Register(kVisitJSArrayBuffer, 1968 table_.Register(kVisitJSArrayBuffer,
1936 &ObjectEvacuationStrategy<POINTER_OBJECT>:: 1969 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1937 Visit); 1970 Visit);
1938 1971
1939 table_.Register(kVisitJSTypedArray, 1972 table_.Register(kVisitJSTypedArray,
1940 &ObjectEvacuationStrategy<POINTER_OBJECT>:: 1973 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1941 Visit); 1974 Visit);
1942 1975
1943 table_.Register(kVisitJSDataView, 1976 table_.Register(kVisitJSDataView,
1944 &ObjectEvacuationStrategy<POINTER_OBJECT>:: 1977 &ObjectEvacuationStrategy<POINTER_OBJECT>::
(...skipping 935 matching lines...) Expand 10 before | Expand all | Expand 10 after
2880 result->set_value(value); 2913 result->set_value(value);
2881 return result; 2914 return result;
2882 } 2915 }
2883 2916
2884 2917
2885 MaybeObject* Heap::AllocateAllocationSite() { 2918 MaybeObject* Heap::AllocateAllocationSite() {
2886 Object* result; 2919 Object* result;
2887 MaybeObject* maybe_result = Allocate(allocation_site_map(), 2920 MaybeObject* maybe_result = Allocate(allocation_site_map(),
2888 OLD_POINTER_SPACE); 2921 OLD_POINTER_SPACE);
2889 if (!maybe_result->ToObject(&result)) return maybe_result; 2922 if (!maybe_result->ToObject(&result)) return maybe_result;
2890 AllocationSite::cast(result)->Initialize(); 2923 AllocationSite* site = AllocationSite::cast(result);
2924 site->Initialize();
2925
2926 // Link the site
2927 site->set_weak_next(allocation_sites_list());
2928 set_allocation_sites_list(site);
2891 return result; 2929 return result;
2892 } 2930 }
2893 2931
2894 2932
2895 MaybeObject* Heap::CreateOddball(const char* to_string, 2933 MaybeObject* Heap::CreateOddball(const char* to_string,
2896 Object* to_number, 2934 Object* to_number,
2897 byte kind) { 2935 byte kind) {
2898 Object* result; 2936 Object* result;
2899 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE); 2937 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2900 if (!maybe_result->ToObject(&result)) return maybe_result; 2938 if (!maybe_result->ToObject(&result)) return maybe_result;
(...skipping 680 matching lines...) Expand 10 before | Expand all | Expand 10 after
3581 Code* construct_stub = 3619 Code* construct_stub =
3582 isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric); 3620 isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3583 share->set_construct_stub(construct_stub); 3621 share->set_construct_stub(construct_stub);
3584 share->set_instance_class_name(Object_string()); 3622 share->set_instance_class_name(Object_string());
3585 share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER); 3623 share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3586 share->set_script(undefined_value(), SKIP_WRITE_BARRIER); 3624 share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3587 share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER); 3625 share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3588 share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER); 3626 share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3589 share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER); 3627 share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3590 share->set_ast_node_count(0); 3628 share->set_ast_node_count(0);
3591 share->set_stress_deopt_counter(FLAG_deopt_every_n_times);
3592 share->set_counters(0); 3629 share->set_counters(0);
3593 3630
3594 // Set integer fields (smi or int, depending on the architecture). 3631 // Set integer fields (smi or int, depending on the architecture).
3595 share->set_length(0); 3632 share->set_length(0);
3596 share->set_formal_parameter_count(0); 3633 share->set_formal_parameter_count(0);
3597 share->set_expected_nof_properties(0); 3634 share->set_expected_nof_properties(0);
3598 share->set_num_literals(0); 3635 share->set_num_literals(0);
3599 share->set_start_position_and_type(0); 3636 share->set_start_position_and_type(0);
3600 share->set_end_position(0); 3637 share->set_end_position(0);
3601 share->set_function_token_position(0); 3638 share->set_function_token_position(0);
(...skipping 606 matching lines...) Expand 10 before | Expand all | Expand 10 after
4208 4245
4209 4246
4210 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space, 4247 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
4211 Handle<AllocationSite> allocation_site) { 4248 Handle<AllocationSite> allocation_site) {
4212 ASSERT(gc_state_ == NOT_IN_GC); 4249 ASSERT(gc_state_ == NOT_IN_GC);
4213 ASSERT(map->instance_type() != MAP_TYPE); 4250 ASSERT(map->instance_type() != MAP_TYPE);
4214 // If allocation failures are disallowed, we may allocate in a different 4251 // If allocation failures are disallowed, we may allocate in a different
4215 // space when new space is full and the object is not a large object. 4252 // space when new space is full and the object is not a large object.
4216 AllocationSpace retry_space = 4253 AllocationSpace retry_space =
4217 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type()); 4254 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4218 int size = map->instance_size() + AllocationSiteInfo::kSize; 4255 int size = map->instance_size() + AllocationMemento::kSize;
4219 Object* result; 4256 Object* result;
4220 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space); 4257 MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4221 if (!maybe_result->ToObject(&result)) return maybe_result; 4258 if (!maybe_result->ToObject(&result)) return maybe_result;
4222 // No need for write barrier since object is white and map is in old space. 4259 // No need for write barrier since object is white and map is in old space.
4223 HeapObject::cast(result)->set_map_no_write_barrier(map); 4260 HeapObject::cast(result)->set_map_no_write_barrier(map);
4224 AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>( 4261 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4225 reinterpret_cast<Address>(result) + map->instance_size()); 4262 reinterpret_cast<Address>(result) + map->instance_size());
4226 alloc_info->set_map_no_write_barrier(allocation_site_info_map()); 4263 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
4227 alloc_info->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER); 4264 alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
4228 return result; 4265 return result;
4229 } 4266 }
4230 4267
4231 4268
4232 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) { 4269 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4233 ASSERT(gc_state_ == NOT_IN_GC); 4270 ASSERT(gc_state_ == NOT_IN_GC);
4234 ASSERT(map->instance_type() != MAP_TYPE); 4271 ASSERT(map->instance_type() != MAP_TYPE);
4235 // If allocation failures are disallowed, we may allocate in a different 4272 // If allocation failures are disallowed, we may allocate in a different
4236 // space when new space is full and the object is not a large object. 4273 // space when new space is full and the object is not a large object.
4237 AllocationSpace retry_space = 4274 AllocationSpace retry_space =
(...skipping 691 matching lines...) Expand 10 before | Expand all | Expand 10 after
4929 ASSERT(map->CanTrackAllocationSite()); 4966 ASSERT(map->CanTrackAllocationSite());
4930 ASSERT(map->instance_type() == JS_ARRAY_TYPE); 4967 ASSERT(map->instance_type() == JS_ARRAY_TYPE);
4931 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER; 4968 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4932 4969
4933 // If we're forced to always allocate, we use the general allocation 4970 // If we're forced to always allocate, we use the general allocation
4934 // functions which may leave us with an object in old space. 4971 // functions which may leave us with an object in old space.
4935 int adjusted_object_size = object_size; 4972 int adjusted_object_size = object_size;
4936 if (always_allocate()) { 4973 if (always_allocate()) {
4937 // We'll only track origin if we are certain to allocate in new space 4974 // We'll only track origin if we are certain to allocate in new space
4938 const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4; 4975 const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
4939 if ((object_size + AllocationSiteInfo::kSize) < kMinFreeNewSpaceAfterGC) { 4976 if ((object_size + AllocationMemento::kSize) < kMinFreeNewSpaceAfterGC) {
4940 adjusted_object_size += AllocationSiteInfo::kSize; 4977 adjusted_object_size += AllocationMemento::kSize;
4941 } 4978 }
4942 4979
4943 { MaybeObject* maybe_clone = 4980 { MaybeObject* maybe_clone =
4944 AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE); 4981 AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4945 if (!maybe_clone->ToObject(&clone)) return maybe_clone; 4982 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4946 } 4983 }
4947 Address clone_address = HeapObject::cast(clone)->address(); 4984 Address clone_address = HeapObject::cast(clone)->address();
4948 CopyBlock(clone_address, 4985 CopyBlock(clone_address,
4949 source->address(), 4986 source->address(),
4950 object_size); 4987 object_size);
4951 // Update write barrier for all fields that lie beyond the header. 4988 // Update write barrier for all fields that lie beyond the header.
4952 int write_barrier_offset = adjusted_object_size > object_size 4989 int write_barrier_offset = adjusted_object_size > object_size
4953 ? JSArray::kSize + AllocationSiteInfo::kSize 4990 ? JSArray::kSize + AllocationMemento::kSize
4954 : JSObject::kHeaderSize; 4991 : JSObject::kHeaderSize;
4955 if (((object_size - write_barrier_offset) / kPointerSize) > 0) { 4992 if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
4956 RecordWrites(clone_address, 4993 RecordWrites(clone_address,
4957 write_barrier_offset, 4994 write_barrier_offset,
4958 (object_size - write_barrier_offset) / kPointerSize); 4995 (object_size - write_barrier_offset) / kPointerSize);
4959 } 4996 }
4960 4997
4961 // Track allocation site information, if we failed to allocate it inline. 4998 // Track allocation site information, if we failed to allocate it inline.
4962 if (InNewSpace(clone) && 4999 if (InNewSpace(clone) &&
4963 adjusted_object_size == object_size) { 5000 adjusted_object_size == object_size) {
4964 MaybeObject* maybe_alloc_info = 5001 MaybeObject* maybe_alloc_memento =
4965 AllocateStruct(ALLOCATION_SITE_INFO_TYPE); 5002 AllocateStruct(ALLOCATION_MEMENTO_TYPE);
4966 AllocationSiteInfo* alloc_info; 5003 AllocationMemento* alloc_memento;
4967 if (maybe_alloc_info->To(&alloc_info)) { 5004 if (maybe_alloc_memento->To(&alloc_memento)) {
4968 alloc_info->set_map_no_write_barrier(allocation_site_info_map()); 5005 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
4969 alloc_info->set_allocation_site(site, SKIP_WRITE_BARRIER); 5006 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
4970 } 5007 }
4971 } 5008 }
4972 } else { 5009 } else {
4973 wb_mode = SKIP_WRITE_BARRIER; 5010 wb_mode = SKIP_WRITE_BARRIER;
4974 adjusted_object_size += AllocationSiteInfo::kSize; 5011 adjusted_object_size += AllocationMemento::kSize;
4975 5012
4976 { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size); 5013 { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
4977 if (!maybe_clone->ToObject(&clone)) return maybe_clone; 5014 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4978 } 5015 }
4979 SLOW_ASSERT(InNewSpace(clone)); 5016 SLOW_ASSERT(InNewSpace(clone));
4980 // Since we know the clone is allocated in new space, we can copy 5017 // Since we know the clone is allocated in new space, we can copy
4981 // the contents without worrying about updating the write barrier. 5018 // the contents without worrying about updating the write barrier.
4982 CopyBlock(HeapObject::cast(clone)->address(), 5019 CopyBlock(HeapObject::cast(clone)->address(),
4983 source->address(), 5020 source->address(),
4984 object_size); 5021 object_size);
4985 } 5022 }
4986 5023
4987 if (adjusted_object_size > object_size) { 5024 if (adjusted_object_size > object_size) {
4988 AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>( 5025 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4989 reinterpret_cast<Address>(clone) + object_size); 5026 reinterpret_cast<Address>(clone) + object_size);
4990 alloc_info->set_map_no_write_barrier(allocation_site_info_map()); 5027 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
4991 alloc_info->set_allocation_site(site, SKIP_WRITE_BARRIER); 5028 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
4992 } 5029 }
4993 5030
4994 SLOW_ASSERT( 5031 SLOW_ASSERT(
4995 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind()); 5032 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4996 FixedArrayBase* elements = FixedArrayBase::cast(source->elements()); 5033 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4997 FixedArray* properties = FixedArray::cast(source->properties()); 5034 FixedArray* properties = FixedArray::cast(source->properties());
4998 // Update elements if necessary. 5035 // Update elements if necessary.
4999 if (elements->length() > 0) { 5036 if (elements->length() > 0) {
5000 Object* elem; 5037 Object* elem;
5001 { MaybeObject* maybe_elem; 5038 { MaybeObject* maybe_elem;
(...skipping 803 matching lines...) Expand 10 before | Expand all | Expand 10 after
5805 context->set_previous(previous); 5842 context->set_previous(previous);
5806 context->set_extension(name); 5843 context->set_extension(name);
5807 context->set_global_object(previous->global_object()); 5844 context->set_global_object(previous->global_object());
5808 context->set(Context::THROWN_OBJECT_INDEX, thrown_object); 5845 context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5809 return context; 5846 return context;
5810 } 5847 }
5811 5848
5812 5849
5813 MaybeObject* Heap::AllocateWithContext(JSFunction* function, 5850 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5814 Context* previous, 5851 Context* previous,
5815 JSObject* extension) { 5852 JSReceiver* extension) {
5816 Object* result; 5853 Object* result;
5817 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS); 5854 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5818 if (!maybe_result->ToObject(&result)) return maybe_result; 5855 if (!maybe_result->ToObject(&result)) return maybe_result;
5819 } 5856 }
5820 Context* context = reinterpret_cast<Context*>(result); 5857 Context* context = reinterpret_cast<Context*>(result);
5821 context->set_map_no_write_barrier(with_context_map()); 5858 context->set_map_no_write_barrier(with_context_map());
5822 context->set_closure(function); 5859 context->set_closure(function);
5823 context->set_previous(previous); 5860 context->set_previous(previous);
5824 context->set_extension(extension); 5861 context->set_extension(extension);
5825 context->set_global_object(previous->global_object()); 5862 context->set_global_object(previous->global_object());
(...skipping 1061 matching lines...) Expand 10 before | Expand all | Expand 10 after
6887 bool Heap::CreateHeapObjects() { 6924 bool Heap::CreateHeapObjects() {
6888 // Create initial maps. 6925 // Create initial maps.
6889 if (!CreateInitialMaps()) return false; 6926 if (!CreateInitialMaps()) return false;
6890 if (!CreateApiObjects()) return false; 6927 if (!CreateApiObjects()) return false;
6891 6928
6892 // Create initial objects 6929 // Create initial objects
6893 if (!CreateInitialObjects()) return false; 6930 if (!CreateInitialObjects()) return false;
6894 6931
6895 native_contexts_list_ = undefined_value(); 6932 native_contexts_list_ = undefined_value();
6896 array_buffers_list_ = undefined_value(); 6933 array_buffers_list_ = undefined_value();
6934 allocation_sites_list_ = undefined_value();
6897 return true; 6935 return true;
6898 } 6936 }
6899 6937
6900 6938
6901 void Heap::SetStackLimits() { 6939 void Heap::SetStackLimits() {
6902 ASSERT(isolate_ != NULL); 6940 ASSERT(isolate_ != NULL);
6903 ASSERT(isolate_ == isolate()); 6941 ASSERT(isolate_ == isolate());
6904 // On 64 bit machines, pointers are generally out of range of Smis. We write 6942 // On 64 bit machines, pointers are generally out of range of Smis. We write
6905 // something that looks like an out of range Smi to the GC. 6943 // something that looks like an out of range Smi to the GC.
6906 6944
(...skipping 441 matching lines...) Expand 10 before | Expand all | Expand 10 after
7348 7386
7349 void HeapIterator::reset() { 7387 void HeapIterator::reset() {
7350 // Restart the iterator. 7388 // Restart the iterator.
7351 Shutdown(); 7389 Shutdown();
7352 Init(); 7390 Init();
7353 } 7391 }
7354 7392
7355 7393
7356 #ifdef DEBUG 7394 #ifdef DEBUG
7357 7395
7358 Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL); 7396 Object* const PathTracer::kAnyGlobalObject = NULL;
7359 7397
7360 class PathTracer::MarkVisitor: public ObjectVisitor { 7398 class PathTracer::MarkVisitor: public ObjectVisitor {
7361 public: 7399 public:
7362 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {} 7400 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7363 void VisitPointers(Object** start, Object** end) { 7401 void VisitPointers(Object** start, Object** end) {
7364 // Scan all HeapObject pointers in [start, end) 7402 // Scan all HeapObject pointers in [start, end)
7365 for (Object** p = start; !tracer_->found() && (p < end); p++) { 7403 for (Object** p = start; !tracer_->found() && (p < end); p++) {
7366 if ((*p)->IsHeapObject()) 7404 if ((*p)->IsHeapObject())
7367 tracer_->MarkRecursively(p, this); 7405 tracer_->MarkRecursively(p, this);
7368 } 7406 }
(...skipping 331 matching lines...) Expand 10 before | Expand all | Expand 10 after
7700 PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]); 7738 PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7701 PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]); 7739 PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7702 PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]); 7740 PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7703 PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]); 7741 PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7704 PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]); 7742 PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7705 PrintF("compaction_ptrs=%.1f ", 7743 PrintF("compaction_ptrs=%.1f ",
7706 scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]); 7744 scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7707 PrintF("intracompaction_ptrs=%.1f ", 7745 PrintF("intracompaction_ptrs=%.1f ",
7708 scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]); 7746 scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7709 PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]); 7747 PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7710 PrintF("weakmap_process=%.1f ", scopes_[Scope::MC_WEAKMAP_PROCESS]); 7748 PrintF("weakcollection_process=%.1f ",
7711 PrintF("weakmap_clear=%.1f ", scopes_[Scope::MC_WEAKMAP_CLEAR]); 7749 scopes_[Scope::MC_WEAKCOLLECTION_PROCESS]);
7750 PrintF("weakcollection_clear=%.1f ",
7751 scopes_[Scope::MC_WEAKCOLLECTION_CLEAR]);
7712 7752
7713 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_); 7753 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7714 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects()); 7754 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7715 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ", 7755 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7716 in_free_list_or_wasted_before_gc_); 7756 in_free_list_or_wasted_before_gc_);
7717 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_)); 7757 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7718 7758
7719 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_); 7759 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7720 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_); 7760 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7721 PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_); 7761 PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
(...skipping 418 matching lines...) Expand 10 before | Expand all | Expand 10 after
8140 if (FLAG_parallel_recompilation) { 8180 if (FLAG_parallel_recompilation) {
8141 heap_->relocation_mutex_->Lock(); 8181 heap_->relocation_mutex_->Lock();
8142 #ifdef DEBUG 8182 #ifdef DEBUG
8143 heap_->relocation_mutex_locked_by_optimizer_thread_ = 8183 heap_->relocation_mutex_locked_by_optimizer_thread_ =
8144 heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread(); 8184 heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
8145 #endif // DEBUG 8185 #endif // DEBUG
8146 } 8186 }
8147 } 8187 }
8148 8188
8149 } } // namespace v8::internal 8189 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap.h ('k') | src/heap-snapshot-generator.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698