Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(721)

Side by Side Diff: src/heap.cc

Issue 23641009: Refactor and cleanup VirtualMemory. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Remove useless test case. Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after
165 configured_(false), 165 configured_(false),
166 chunks_queued_for_free_(NULL), 166 chunks_queued_for_free_(NULL),
167 relocation_mutex_(NULL) { 167 relocation_mutex_(NULL) {
168 // Allow build-time customization of the max semispace size. Building 168 // Allow build-time customization of the max semispace size. Building
169 // V8 with snapshots and a non-default max semispace size is much 169 // V8 with snapshots and a non-default max semispace size is much
170 // easier if you can define it as part of the build environment. 170 // easier if you can define it as part of the build environment.
171 #if defined(V8_MAX_SEMISPACE_SIZE) 171 #if defined(V8_MAX_SEMISPACE_SIZE)
172 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE; 172 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
173 #endif 173 #endif
174 174
175 intptr_t max_virtual = OS::MaxVirtualMemory(); 175 intptr_t max_virtual = static_cast<intptr_t>(VirtualMemory::GetLimit());
176
177 if (max_virtual > 0) { 176 if (max_virtual > 0) {
178 if (code_range_size_ > 0) { 177 if (code_range_size_ > 0) {
179 // Reserve no more than 1/8 of the memory for the code range. 178 // Reserve no more than 1/8 of the memory for the code range.
180 code_range_size_ = Min(code_range_size_, max_virtual >> 3); 179 code_range_size_ = Min(code_range_size_, max_virtual >> 3);
181 } 180 }
182 } 181 }
183 182
184 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength); 183 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
185 native_contexts_list_ = NULL; 184 native_contexts_list_ = NULL;
186 array_buffers_list_ = Smi::FromInt(0); 185 array_buffers_list_ = Smi::FromInt(0);
(...skipping 3953 matching lines...) Expand 10 before | Expand all | Expand 10 after
4140 // Compute size. 4139 // Compute size.
4141 int body_size = RoundUp(desc.instr_size, kObjectAlignment); 4140 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
4142 int obj_size = Code::SizeFor(body_size); 4141 int obj_size = Code::SizeFor(body_size);
4143 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment)); 4142 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
4144 MaybeObject* maybe_result; 4143 MaybeObject* maybe_result;
4145 // Large code objects and code objects which should stay at a fixed address 4144 // Large code objects and code objects which should stay at a fixed address
4146 // are allocated in large object space. 4145 // are allocated in large object space.
4147 HeapObject* result; 4146 HeapObject* result;
4148 bool force_lo_space = obj_size > code_space()->AreaSize(); 4147 bool force_lo_space = obj_size > code_space()->AreaSize();
4149 if (force_lo_space) { 4148 if (force_lo_space) {
4150 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE); 4149 maybe_result = lo_space_->AllocateRaw(obj_size, VirtualMemory::EXECUTABLE);
4151 } else { 4150 } else {
4152 maybe_result = code_space_->AllocateRaw(obj_size); 4151 maybe_result = code_space_->AllocateRaw(obj_size);
4153 } 4152 }
4154 if (!maybe_result->To<HeapObject>(&result)) return maybe_result; 4153 if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4155 4154
4156 if (immovable && !force_lo_space && 4155 if (immovable && !force_lo_space &&
4157 // Objects on the first page of each space are never moved. 4156 // Objects on the first page of each space are never moved.
4158 !code_space_->FirstPage()->Contains(result->address())) { 4157 !code_space_->FirstPage()->Contains(result->address())) {
4159 // Discard the first code allocation, which was on a page where it could be 4158 // Discard the first code allocation, which was on a page where it could be
4160 // moved. 4159 // moved.
4161 CreateFillerObjectAt(result->address(), obj_size); 4160 CreateFillerObjectAt(result->address(), obj_size);
4162 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE); 4161 maybe_result = lo_space_->AllocateRaw(obj_size, VirtualMemory::EXECUTABLE);
4163 if (!maybe_result->To<HeapObject>(&result)) return maybe_result; 4162 if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4164 } 4163 }
4165 4164
4166 // Initialize the object 4165 // Initialize the object
4167 result->set_map_no_write_barrier(code_map()); 4166 result->set_map_no_write_barrier(code_map());
4168 Code* code = Code::cast(result); 4167 Code* code = Code::cast(result);
4169 ASSERT(!isolate_->code_range()->exists() || 4168 ASSERT(!isolate_->code_range()->exists() ||
4170 isolate_->code_range()->contains(code->address())); 4169 isolate_->code_range()->contains(code->address()));
4171 code->set_instruction_size(desc.instr_size); 4170 code->set_instruction_size(desc.instr_size);
4172 code->set_relocation_info(reloc_info); 4171 code->set_relocation_info(reloc_info);
(...skipping 30 matching lines...) Expand all
4203 #endif 4202 #endif
4204 return code; 4203 return code;
4205 } 4204 }
4206 4205
4207 4206
4208 MaybeObject* Heap::CopyCode(Code* code) { 4207 MaybeObject* Heap::CopyCode(Code* code) {
4209 // Allocate an object the same size as the code object. 4208 // Allocate an object the same size as the code object.
4210 int obj_size = code->Size(); 4209 int obj_size = code->Size();
4211 MaybeObject* maybe_result; 4210 MaybeObject* maybe_result;
4212 if (obj_size > code_space()->AreaSize()) { 4211 if (obj_size > code_space()->AreaSize()) {
4213 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE); 4212 maybe_result = lo_space_->AllocateRaw(obj_size, VirtualMemory::EXECUTABLE);
4214 } else { 4213 } else {
4215 maybe_result = code_space_->AllocateRaw(obj_size); 4214 maybe_result = code_space_->AllocateRaw(obj_size);
4216 } 4215 }
4217 4216
4218 Object* result; 4217 Object* result;
4219 if (!maybe_result->ToObject(&result)) return maybe_result; 4218 if (!maybe_result->ToObject(&result)) return maybe_result;
4220 4219
4221 // Copy code object. 4220 // Copy code object.
4222 Address old_addr = code->address(); 4221 Address old_addr = code->address();
4223 Address new_addr = reinterpret_cast<HeapObject*>(result)->address(); 4222 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
(...skipping 22 matching lines...) Expand all
4246 4245
4247 int new_obj_size = Code::SizeFor(new_body_size); 4246 int new_obj_size = Code::SizeFor(new_body_size);
4248 4247
4249 Address old_addr = code->address(); 4248 Address old_addr = code->address();
4250 4249
4251 size_t relocation_offset = 4250 size_t relocation_offset =
4252 static_cast<size_t>(code->instruction_end() - old_addr); 4251 static_cast<size_t>(code->instruction_end() - old_addr);
4253 4252
4254 MaybeObject* maybe_result; 4253 MaybeObject* maybe_result;
4255 if (new_obj_size > code_space()->AreaSize()) { 4254 if (new_obj_size > code_space()->AreaSize()) {
4256 maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE); 4255 maybe_result = lo_space_->AllocateRaw(
4256 new_obj_size, VirtualMemory::EXECUTABLE);
4257 } else { 4257 } else {
4258 maybe_result = code_space_->AllocateRaw(new_obj_size); 4258 maybe_result = code_space_->AllocateRaw(new_obj_size);
4259 } 4259 }
4260 4260
4261 Object* result; 4261 Object* result;
4262 if (!maybe_result->ToObject(&result)) return maybe_result; 4262 if (!maybe_result->ToObject(&result)) return maybe_result;
4263 4263
4264 // Copy code object. 4264 // Copy code object.
4265 Address new_addr = reinterpret_cast<HeapObject*>(result)->address(); 4265 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4266 4266
(...skipping 1092 matching lines...) Expand 10 before | Expand all | Expand 10 after
5359 if (chars > SeqTwoByteString::kMaxLength) { 5359 if (chars > SeqTwoByteString::kMaxLength) {
5360 return Failure::OutOfMemoryException(0xa); 5360 return Failure::OutOfMemoryException(0xa);
5361 } 5361 }
5362 map = internalized_string_map(); 5362 map = internalized_string_map();
5363 size = SeqTwoByteString::SizeFor(chars); 5363 size = SeqTwoByteString::SizeFor(chars);
5364 } 5364 }
5365 5365
5366 // Allocate string. 5366 // Allocate string.
5367 Object* result; 5367 Object* result;
5368 { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize) 5368 { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
5369 ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE) 5369 ? lo_space_->AllocateRaw(size, VirtualMemory::NOT_EXECUTABLE)
5370 : old_data_space_->AllocateRaw(size); 5370 : old_data_space_->AllocateRaw(size);
5371 if (!maybe_result->ToObject(&result)) return maybe_result; 5371 if (!maybe_result->ToObject(&result)) return maybe_result;
5372 } 5372 }
5373 5373
5374 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map); 5374 reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
5375 // Set length and hash fields of the allocated string. 5375 // Set length and hash fields of the allocated string.
5376 String* answer = String::cast(result); 5376 String* answer = String::cast(result);
5377 answer->set_length(chars); 5377 answer->set_length(chars);
5378 answer->set_hash_field(hash_field); 5378 answer->set_hash_field(hash_field);
5379 5379
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after
5512 if (length < 0 || length > FixedArray::kMaxLength) { 5512 if (length < 0 || length > FixedArray::kMaxLength) {
5513 return Failure::OutOfMemoryException(0xd); 5513 return Failure::OutOfMemoryException(0xd);
5514 } 5514 }
5515 ASSERT(length > 0); 5515 ASSERT(length > 0);
5516 // Use the general function if we're forced to always allocate. 5516 // Use the general function if we're forced to always allocate.
5517 if (always_allocate()) return AllocateFixedArray(length, TENURED); 5517 if (always_allocate()) return AllocateFixedArray(length, TENURED);
5518 // Allocate the raw data for a fixed array. 5518 // Allocate the raw data for a fixed array.
5519 int size = FixedArray::SizeFor(length); 5519 int size = FixedArray::SizeFor(length);
5520 return size <= Page::kMaxNonCodeHeapObjectSize 5520 return size <= Page::kMaxNonCodeHeapObjectSize
5521 ? new_space_.AllocateRaw(size) 5521 ? new_space_.AllocateRaw(size)
5522 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE); 5522 : lo_space_->AllocateRaw(size, VirtualMemory::NOT_EXECUTABLE);
5523 } 5523 }
5524 5524
5525 5525
5526 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) { 5526 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5527 int len = src->length(); 5527 int len = src->length();
5528 Object* obj; 5528 Object* obj;
5529 { MaybeObject* maybe_obj = AllocateRawFixedArray(len); 5529 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
5530 if (!maybe_obj->ToObject(&obj)) return maybe_obj; 5530 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5531 } 5531 }
5532 if (InNewSpace(obj)) { 5532 if (InNewSpace(obj)) {
(...skipping 1334 matching lines...) Expand 10 before | Expand all | Expand 10 after
6867 // Set up new space. 6867 // Set up new space.
6868 if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) { 6868 if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6869 return false; 6869 return false;
6870 } 6870 }
6871 6871
6872 // Initialize old pointer space. 6872 // Initialize old pointer space.
6873 old_pointer_space_ = 6873 old_pointer_space_ =
6874 new OldSpace(this, 6874 new OldSpace(this,
6875 max_old_generation_size_, 6875 max_old_generation_size_,
6876 OLD_POINTER_SPACE, 6876 OLD_POINTER_SPACE,
6877 NOT_EXECUTABLE); 6877 VirtualMemory::NOT_EXECUTABLE);
6878 if (old_pointer_space_ == NULL) return false; 6878 if (old_pointer_space_ == NULL) return false;
6879 if (!old_pointer_space_->SetUp()) return false; 6879 if (!old_pointer_space_->SetUp()) return false;
6880 6880
6881 // Initialize old data space. 6881 // Initialize old data space.
6882 old_data_space_ = 6882 old_data_space_ =
6883 new OldSpace(this, 6883 new OldSpace(this,
6884 max_old_generation_size_, 6884 max_old_generation_size_,
6885 OLD_DATA_SPACE, 6885 OLD_DATA_SPACE,
6886 NOT_EXECUTABLE); 6886 VirtualMemory::NOT_EXECUTABLE);
6887 if (old_data_space_ == NULL) return false; 6887 if (old_data_space_ == NULL) return false;
6888 if (!old_data_space_->SetUp()) return false; 6888 if (!old_data_space_->SetUp()) return false;
6889 6889
6890 // Initialize the code space, set its maximum capacity to the old 6890 // Initialize the code space, set its maximum capacity to the old
6891 // generation size. It needs executable memory. 6891 // generation size. It needs executable memory.
6892 // On 64-bit platform(s), we put all code objects in a 2 GB range of 6892 // On 64-bit platform(s), we put all code objects in a 2 GB range of
6893 // virtual address space, so that they can call each other with near calls. 6893 // virtual address space, so that they can call each other with near calls.
6894 if (code_range_size_ > 0) { 6894 if (code_range_size_ > 0) {
6895 if (!isolate_->code_range()->SetUp(code_range_size_)) { 6895 if (!isolate_->code_range()->SetUp(code_range_size_)) {
6896 return false; 6896 return false;
6897 } 6897 }
6898 } 6898 }
6899 6899
6900 code_space_ = 6900 code_space_ = new OldSpace(
6901 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE); 6901 this, max_old_generation_size_, CODE_SPACE, VirtualMemory::EXECUTABLE);
6902 if (code_space_ == NULL) return false; 6902 if (code_space_ == NULL) return false;
6903 if (!code_space_->SetUp()) return false; 6903 if (!code_space_->SetUp()) return false;
6904 6904
6905 // Initialize map space. 6905 // Initialize map space.
6906 map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE); 6906 map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6907 if (map_space_ == NULL) return false; 6907 if (map_space_ == NULL) return false;
6908 if (!map_space_->SetUp()) return false; 6908 if (!map_space_->SetUp()) return false;
6909 6909
6910 // Initialize simple cell space. 6910 // Initialize simple cell space.
6911 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE); 6911 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
(...skipping 1076 matching lines...) Expand 10 before | Expand all | Expand 10 after
7988 // To work around this we split large chunk into normal kPageSize aligned 7988 // To work around this we split large chunk into normal kPageSize aligned
7989 // pieces and initialize size, owner and flags field of every piece. 7989 // pieces and initialize size, owner and flags field of every piece.
7990 // If FromAnyPointerAddress encounters a slot that belongs to one of 7990 // If FromAnyPointerAddress encounters a slot that belongs to one of
7991 // these smaller pieces it will treat it as a slot on a normal Page. 7991 // these smaller pieces it will treat it as a slot on a normal Page.
7992 Address chunk_end = chunk->address() + chunk->size(); 7992 Address chunk_end = chunk->address() + chunk->size();
7993 MemoryChunk* inner = MemoryChunk::FromAddress( 7993 MemoryChunk* inner = MemoryChunk::FromAddress(
7994 chunk->address() + Page::kPageSize); 7994 chunk->address() + Page::kPageSize);
7995 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1); 7995 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7996 while (inner <= inner_last) { 7996 while (inner <= inner_last) {
7997 // Size of a large chunk is always a multiple of 7997 // Size of a large chunk is always a multiple of
7998 // OS::AllocateAlignment() so there is always 7998 // VirtualMemory::GetAllocationGranularity() so
7999 // enough space for a fake MemoryChunk header. 7999 // there is always enough space for a fake
8000 // MemoryChunk header.
8000 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end); 8001 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
8001 // Guard against overflow. 8002 // Guard against overflow.
8002 if (area_end < inner->address()) area_end = chunk_end; 8003 if (area_end < inner->address()) area_end = chunk_end;
8003 inner->SetArea(inner->address(), area_end); 8004 inner->SetArea(inner->address(), area_end);
8004 inner->set_size(Page::kPageSize); 8005 inner->set_size(Page::kPageSize);
8005 inner->set_owner(lo_space()); 8006 inner->set_owner(lo_space());
8006 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED); 8007 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
8007 inner = MemoryChunk::FromAddress( 8008 inner = MemoryChunk::FromAddress(
8008 inner->address() + Page::kPageSize); 8009 inner->address() + Page::kPageSize);
8009 } 8010 }
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
8097 if (FLAG_concurrent_recompilation) { 8098 if (FLAG_concurrent_recompilation) {
8098 heap_->relocation_mutex_->Lock(); 8099 heap_->relocation_mutex_->Lock();
8099 #ifdef DEBUG 8100 #ifdef DEBUG
8100 heap_->relocation_mutex_locked_by_optimizer_thread_ = 8101 heap_->relocation_mutex_locked_by_optimizer_thread_ =
8101 heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread(); 8102 heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
8102 #endif // DEBUG 8103 #endif // DEBUG
8103 } 8104 }
8104 } 8105 }
8105 8106
8106 } } // namespace v8::internal 8107 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/deoptimizer.cc ('k') | src/heap-inl.h » ('j') | src/platform/virtual-memory.h » ('J')

Powered by Google App Engine
This is Rietveld 408576698