Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(140)

Side by Side Diff: src/heap.cc

Issue 24255005: Some cleanup fixes (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Addressed comments Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/heap.h ('k') | src/hydrogen.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 2949 matching lines...) Expand 10 before | Expand all | Expand 10 after
2960 MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) { 2960 MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
2961 Box* result; 2961 Box* result;
2962 MaybeObject* maybe_result = AllocateStruct(BOX_TYPE); 2962 MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
2963 if (!maybe_result->To(&result)) return maybe_result; 2963 if (!maybe_result->To(&result)) return maybe_result;
2964 result->set_value(value); 2964 result->set_value(value);
2965 return result; 2965 return result;
2966 } 2966 }
2967 2967
2968 2968
2969 MaybeObject* Heap::AllocateAllocationSite() { 2969 MaybeObject* Heap::AllocateAllocationSite() {
2970 Object* result; 2970 AllocationSite* site;
2971 MaybeObject* maybe_result = Allocate(allocation_site_map(), 2971 MaybeObject* maybe_result = Allocate(allocation_site_map(),
2972 OLD_POINTER_SPACE); 2972 OLD_POINTER_SPACE);
2973 if (!maybe_result->ToObject(&result)) return maybe_result; 2973 if (!maybe_result->To(&site)) return maybe_result;
2974 AllocationSite* site = AllocationSite::cast(result);
2975 site->Initialize(); 2974 site->Initialize();
2976 2975
2977 // Link the site 2976 // Link the site
2978 site->set_weak_next(allocation_sites_list()); 2977 site->set_weak_next(allocation_sites_list());
2979 set_allocation_sites_list(site); 2978 set_allocation_sites_list(site);
2980 return result; 2979 return site;
2981 } 2980 }
2982 2981
2983 2982
2984 MaybeObject* Heap::CreateOddball(const char* to_string, 2983 MaybeObject* Heap::CreateOddball(const char* to_string,
2985 Object* to_number, 2984 Object* to_number,
2986 byte kind) { 2985 byte kind) {
2987 Object* result; 2986 Object* result;
2988 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE); 2987 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2989 if (!maybe_result->ToObject(&result)) return maybe_result; 2988 if (!maybe_result->ToObject(&result)) return maybe_result;
2990 } 2989 }
(...skipping 1926 matching lines...) Expand 10 before | Expand all | Expand 10 after
4917 global->set_map(new_map); 4916 global->set_map(new_map);
4918 global->set_properties(dictionary); 4917 global->set_properties(dictionary);
4919 4918
4920 // Make sure result is a global object with properties in dictionary. 4919 // Make sure result is a global object with properties in dictionary.
4921 ASSERT(global->IsGlobalObject()); 4920 ASSERT(global->IsGlobalObject());
4922 ASSERT(!global->HasFastProperties()); 4921 ASSERT(!global->HasFastProperties());
4923 return global; 4922 return global;
4924 } 4923 }
4925 4924
4926 4925
4927 MaybeObject* Heap::CopyJSObject(JSObject* source) { 4926 MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
4928 // Never used to copy functions. If functions need to be copied we 4927 // Never used to copy functions. If functions need to be copied we
4929 // have to be careful to clear the literals array. 4928 // have to be careful to clear the literals array.
4930 SLOW_ASSERT(!source->IsJSFunction()); 4929 SLOW_ASSERT(!source->IsJSFunction());
4931 4930
4932 // Make the clone. 4931 // Make the clone.
4933 Map* map = source->map(); 4932 Map* map = source->map();
4934 int object_size = map->instance_size(); 4933 int object_size = map->instance_size();
4935 Object* clone; 4934 Object* clone;
4936 4935
4936 ASSERT(site == NULL || (AllocationSite::CanTrack(map->instance_type()) &&
4937 map->instance_type() == JS_ARRAY_TYPE));
4938
4937 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER; 4939 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4938 4940
4939 // If we're forced to always allocate, we use the general allocation 4941 // If we're forced to always allocate, we use the general allocation
4940 // functions which may leave us with an object in old space. 4942 // functions which may leave us with an object in old space.
4941 if (always_allocate()) { 4943 if (always_allocate()) {
4942 { MaybeObject* maybe_clone = 4944 { MaybeObject* maybe_clone =
4943 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE); 4945 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4944 if (!maybe_clone->ToObject(&clone)) return maybe_clone; 4946 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4945 } 4947 }
4946 Address clone_address = HeapObject::cast(clone)->address(); 4948 Address clone_address = HeapObject::cast(clone)->address();
4947 CopyBlock(clone_address, 4949 CopyBlock(clone_address,
4948 source->address(), 4950 source->address(),
4949 object_size); 4951 object_size);
4950 // Update write barrier for all fields that lie beyond the header. 4952 // Update write barrier for all fields that lie beyond the header.
4951 RecordWrites(clone_address, 4953 RecordWrites(clone_address,
4952 JSObject::kHeaderSize, 4954 JSObject::kHeaderSize,
4953 (object_size - JSObject::kHeaderSize) / kPointerSize); 4955 (object_size - JSObject::kHeaderSize) / kPointerSize);
4954 } else { 4956 } else {
4955 wb_mode = SKIP_WRITE_BARRIER; 4957 wb_mode = SKIP_WRITE_BARRIER;
4956 4958
4957 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size); 4959 { int adjusted_object_size = site != NULL
4960 ? object_size + AllocationMemento::kSize
4961 : object_size;
4962 MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
4958 if (!maybe_clone->ToObject(&clone)) return maybe_clone; 4963 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4959 } 4964 }
4960 SLOW_ASSERT(InNewSpace(clone)); 4965 SLOW_ASSERT(InNewSpace(clone));
4961 // Since we know the clone is allocated in new space, we can copy 4966 // Since we know the clone is allocated in new space, we can copy
4962 // the contents without worrying about updating the write barrier. 4967 // the contents without worrying about updating the write barrier.
4963 CopyBlock(HeapObject::cast(clone)->address(), 4968 CopyBlock(HeapObject::cast(clone)->address(),
4964 source->address(), 4969 source->address(),
4965 object_size); 4970 object_size);
4971
4972 if (site != NULL) {
4973 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4974 reinterpret_cast<Address>(clone) + object_size);
4975 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
4976 ASSERT(site->map() == allocation_site_map());
4977 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
4978 }
4966 } 4979 }
4967 4980
4968 SLOW_ASSERT( 4981 SLOW_ASSERT(
4969 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4970 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4971 FixedArray* properties = FixedArray::cast(source->properties());
4972 // Update elements if necessary.
4973 if (elements->length() > 0) {
4974 Object* elem;
4975 { MaybeObject* maybe_elem;
4976 if (elements->map() == fixed_cow_array_map()) {
4977 maybe_elem = FixedArray::cast(elements);
4978 } else if (source->HasFastDoubleElements()) {
4979 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4980 } else {
4981 maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4982 }
4983 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4984 }
4985 JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4986 }
4987 // Update properties if necessary.
4988 if (properties->length() > 0) {
4989 Object* prop;
4990 { MaybeObject* maybe_prop = CopyFixedArray(properties);
4991 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4992 }
4993 JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4994 }
4995 // Return the new clone.
4996 return clone;
4997 }
4998
4999
5000 MaybeObject* Heap::CopyJSObjectWithAllocationSite(
5001 JSObject* source,
5002 AllocationSite* site) {
5003 // Never used to copy functions. If functions need to be copied we
5004 // have to be careful to clear the literals array.
5005 SLOW_ASSERT(!source->IsJSFunction());
5006
5007 // Make the clone.
5008 Map* map = source->map();
5009 int object_size = map->instance_size();
5010 Object* clone;
5011
5012 ASSERT(AllocationSite::CanTrack(map->instance_type()));
5013 ASSERT(map->instance_type() == JS_ARRAY_TYPE);
5014 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
5015
5016 // If we're forced to always allocate, we use the general allocation
5017 // functions which may leave us with an object in old space.
5018 int adjusted_object_size = object_size;
5019 if (always_allocate()) {
5020 // We'll only track origin if we are certain to allocate in new space
5021 const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
5022 if ((object_size + AllocationMemento::kSize) < kMinFreeNewSpaceAfterGC) {
5023 adjusted_object_size += AllocationMemento::kSize;
5024 }
5025
5026 { MaybeObject* maybe_clone =
5027 AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
5028 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
5029 }
5030 Address clone_address = HeapObject::cast(clone)->address();
5031 CopyBlock(clone_address,
5032 source->address(),
5033 object_size);
5034 // Update write barrier for all fields that lie beyond the header.
5035 int write_barrier_offset = adjusted_object_size > object_size
5036 ? JSArray::kSize + AllocationMemento::kSize
5037 : JSObject::kHeaderSize;
5038 if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
5039 RecordWrites(clone_address,
5040 write_barrier_offset,
5041 (object_size - write_barrier_offset) / kPointerSize);
5042 }
5043
5044 // Track allocation site information, if we failed to allocate it inline.
5045 if (InNewSpace(clone) &&
5046 adjusted_object_size == object_size) {
5047 MaybeObject* maybe_alloc_memento =
5048 AllocateStruct(ALLOCATION_MEMENTO_TYPE);
5049 AllocationMemento* alloc_memento;
5050 if (maybe_alloc_memento->To(&alloc_memento)) {
5051 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
5052 ASSERT(site->map() == allocation_site_map());
5053 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
5054 }
5055 }
5056 } else {
5057 wb_mode = SKIP_WRITE_BARRIER;
5058 adjusted_object_size += AllocationMemento::kSize;
5059
5060 { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
5061 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
5062 }
5063 SLOW_ASSERT(InNewSpace(clone));
5064 // Since we know the clone is allocated in new space, we can copy
5065 // the contents without worrying about updating the write barrier.
5066 CopyBlock(HeapObject::cast(clone)->address(),
5067 source->address(),
5068 object_size);
5069 }
5070
5071 if (adjusted_object_size > object_size) {
5072 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
5073 reinterpret_cast<Address>(clone) + object_size);
5074 alloc_memento->set_map_no_write_barrier(allocation_memento_map());
5075 ASSERT(site->map() == allocation_site_map());
5076 alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
5077 }
5078
5079 SLOW_ASSERT(
5080 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind()); 4982 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
5081 FixedArrayBase* elements = FixedArrayBase::cast(source->elements()); 4983 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
5082 FixedArray* properties = FixedArray::cast(source->properties()); 4984 FixedArray* properties = FixedArray::cast(source->properties());
5083 // Update elements if necessary. 4985 // Update elements if necessary.
5084 if (elements->length() > 0) { 4986 if (elements->length() > 0) {
5085 Object* elem; 4987 Object* elem;
5086 { MaybeObject* maybe_elem; 4988 { MaybeObject* maybe_elem;
5087 if (elements->map() == fixed_cow_array_map()) { 4989 if (elements->map() == fixed_cow_array_map()) {
5088 maybe_elem = FixedArray::cast(elements); 4990 maybe_elem = FixedArray::cast(elements);
5089 } else if (source->HasFastDoubleElements()) { 4991 } else if (source->HasFastDoubleElements()) {
(...skipping 2995 matching lines...) Expand 10 before | Expand all | Expand 10 after
8085 if (FLAG_concurrent_recompilation) { 7987 if (FLAG_concurrent_recompilation) {
8086 heap_->relocation_mutex_->Lock(); 7988 heap_->relocation_mutex_->Lock();
8087 #ifdef DEBUG 7989 #ifdef DEBUG
8088 heap_->relocation_mutex_locked_by_optimizer_thread_ = 7990 heap_->relocation_mutex_locked_by_optimizer_thread_ =
8089 heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread(); 7991 heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
8090 #endif // DEBUG 7992 #endif // DEBUG
8091 } 7993 }
8092 } 7994 }
8093 7995
8094 } } // namespace v8::internal 7996 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap.h ('k') | src/hydrogen.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698