Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(131)

Side by Side Diff: src/heap/spaces.cc

Issue 742733002: Reserve code range block for evacuation. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« src/heap/spaces.h ('K') | « src/heap/spaces.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/base/bits.h" 7 #include "src/base/bits.h"
8 #include "src/base/platform/platform.h" 8 #include "src/base/platform/platform.h"
9 #include "src/full-codegen.h" 9 #include "src/full-codegen.h"
10 #include "src/heap/mark-compact.h" 10 #include "src/heap/mark-compact.h"
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
86 86
87 // ----------------------------------------------------------------------------- 87 // -----------------------------------------------------------------------------
88 // CodeRange 88 // CodeRange
89 89
90 90
91 CodeRange::CodeRange(Isolate* isolate) 91 CodeRange::CodeRange(Isolate* isolate)
92 : isolate_(isolate), 92 : isolate_(isolate),
93 code_range_(NULL), 93 code_range_(NULL),
94 free_list_(0), 94 free_list_(0),
95 allocation_list_(0), 95 allocation_list_(0),
96 current_allocation_block_index_(0) {} 96 current_allocation_block_index_(0),
97 emergency_block_() {}
97 98
98 99
99 bool CodeRange::SetUp(size_t requested) { 100 bool CodeRange::SetUp(size_t requested) {
100 DCHECK(code_range_ == NULL); 101 DCHECK(code_range_ == NULL);
101 102
102 if (requested == 0) { 103 if (requested == 0) {
103 // When a target requires the code range feature, we put all code objects 104 // When a target requires the code range feature, we put all code objects
104 // in a kMaximalCodeRangeSize range of virtual address space, so that 105 // in a kMaximalCodeRangeSize range of virtual address space, so that
105 // they can call each other with near calls. 106 // they can call each other with near calls.
106 if (kRequiresCodeRange) { 107 if (kRequiresCodeRange) {
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
195 current_allocation_block_index_ = 0; 196 current_allocation_block_index_ = 0;
196 // Code range is full or too fragmented. 197 // Code range is full or too fragmented.
197 return false; 198 return false;
198 } 199 }
199 200
200 201
201 Address CodeRange::AllocateRawMemory(const size_t requested_size, 202 Address CodeRange::AllocateRawMemory(const size_t requested_size,
202 const size_t commit_size, 203 const size_t commit_size,
203 size_t* allocated) { 204 size_t* allocated) {
204 DCHECK(commit_size <= requested_size); 205 DCHECK(commit_size <= requested_size);
205 DCHECK(allocation_list_.length() == 0 || 206 FreeBlock current;
206 current_allocation_block_index_ < allocation_list_.length()); 207 if (!ReserveBlock(requested_size, &current)) {
207 if (allocation_list_.length() == 0 || 208 *allocated = 0;
208 requested_size > allocation_list_[current_allocation_block_index_].size) { 209 return NULL;
209 // Find an allocation block large enough.
210 if (!GetNextAllocationBlock(requested_size)) return NULL;
211 } 210 }
212 // Commit the requested memory at the start of the current allocation block. 211 *allocated = current.size;
213 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
214 FreeBlock current = allocation_list_[current_allocation_block_index_];
215 if (aligned_requested >= (current.size - Page::kPageSize)) {
216 // Don't leave a small free block, useless for a large object or chunk.
217 *allocated = current.size;
218 } else {
219 *allocated = aligned_requested;
220 }
221 DCHECK(*allocated <= current.size); 212 DCHECK(*allocated <= current.size);
222 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment)); 213 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
223 if (!isolate_->memory_allocator()->CommitExecutableMemory( 214 if (!isolate_->memory_allocator()->CommitExecutableMemory(
224 code_range_, current.start, commit_size, *allocated)) { 215 code_range_, current.start, commit_size, *allocated)) {
225 *allocated = 0; 216 *allocated = 0;
217 ReleaseBlock(&current);
226 return NULL; 218 return NULL;
227 } 219 }
228 allocation_list_[current_allocation_block_index_].start += *allocated;
229 allocation_list_[current_allocation_block_index_].size -= *allocated;
230 if (*allocated == current.size) {
231 // This block is used up, get the next one.
232 GetNextAllocationBlock(0);
233 }
234 return current.start; 220 return current.start;
235 } 221 }
236 222
237 223
238 bool CodeRange::CommitRawMemory(Address start, size_t length) { 224 bool CodeRange::CommitRawMemory(Address start, size_t length) {
239 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE); 225 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
240 } 226 }
241 227
242 228
243 bool CodeRange::UncommitRawMemory(Address start, size_t length) { 229 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
244 return code_range_->Uncommit(start, length); 230 return code_range_->Uncommit(start, length);
245 } 231 }
246 232
247 233
248 void CodeRange::FreeRawMemory(Address address, size_t length) { 234 void CodeRange::FreeRawMemory(Address address, size_t length) {
249 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment)); 235 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
250 free_list_.Add(FreeBlock(address, length)); 236 free_list_.Add(FreeBlock(address, length));
251 code_range_->Uncommit(address, length); 237 code_range_->Uncommit(address, length);
252 } 238 }
253 239
254 240
255 void CodeRange::TearDown() { 241 void CodeRange::TearDown() {
256 delete code_range_; // Frees all memory in the virtual memory range. 242 delete code_range_; // Frees all memory in the virtual memory range.
257 code_range_ = NULL; 243 code_range_ = NULL;
258 free_list_.Free(); 244 free_list_.Free();
259 allocation_list_.Free(); 245 allocation_list_.Free();
260 } 246 }
261 247
262 248
249 bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
250 DCHECK(allocation_list_.length() == 0 ||
251 current_allocation_block_index_ < allocation_list_.length());
252 if (allocation_list_.length() == 0 ||
253 requested_size > allocation_list_[current_allocation_block_index_].size) {
254 // Find an allocation block large enough.
255 if (!GetNextAllocationBlock(requested_size)) return false;
256 }
257 // Commit the requested memory at the start of the current allocation block.
258 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
259 *block = allocation_list_[current_allocation_block_index_];
260 // Don't leave a small free block, useless for a large object or chunk.
261 if (aligned_requested < (block->size - Page::kPageSize)) {
262 block->size = aligned_requested;
263 }
264 DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
265 allocation_list_[current_allocation_block_index_].start += block->size;
266 allocation_list_[current_allocation_block_index_].size -= block->size;
267 return true;
268 }
269
270
271 void CodeRange::ReleaseBlock(const FreeBlock* block) { free_list_.Add(*block); }
272
273
274 void CodeRange::ReserveEmergencyBlock() {
275 const size_t requested_size = MemoryAllocator::CodePageAreaSize();
276 if (emergency_block_.size == 0) {
277 ReserveBlock(requested_size, &emergency_block_);
278 } else {
279 DCHECK(emergency_block_.size >= requested_size);
280 }
281 }
282
283
284 void CodeRange::ReleaseEmergencyBlock() {
285 if (emergency_block_.size != 0) {
286 ReleaseBlock(&emergency_block_);
287 emergency_block_.size = 0;
288 }
289 }
290
291
263 // ----------------------------------------------------------------------------- 292 // -----------------------------------------------------------------------------
264 // MemoryAllocator 293 // MemoryAllocator
265 // 294 //
266 295
267 MemoryAllocator::MemoryAllocator(Isolate* isolate) 296 MemoryAllocator::MemoryAllocator(Isolate* isolate)
268 : isolate_(isolate), 297 : isolate_(isolate),
269 capacity_(0), 298 capacity_(0),
270 capacity_executable_(0), 299 capacity_executable_(0),
271 size_(0), 300 size_(0),
272 size_executable_(0), 301 size_executable_(0),
(...skipping 826 matching lines...) Expand 10 before | Expand all | Expand 10 after
1099 } else { 1128 } else {
1100 heap()->QueueMemoryChunkForFree(page); 1129 heap()->QueueMemoryChunkForFree(page);
1101 } 1130 }
1102 1131
1103 DCHECK(Capacity() > 0); 1132 DCHECK(Capacity() > 0);
1104 accounting_stats_.ShrinkSpace(AreaSize()); 1133 accounting_stats_.ShrinkSpace(AreaSize());
1105 } 1134 }
1106 1135
1107 1136
1108 void PagedSpace::CreateEmergencyMemory() { 1137 void PagedSpace::CreateEmergencyMemory() {
1138 if (identity() == CODE_SPACE) {
1139 // Make the emergency block available to the allocator.
1140 heap()->isolate()->code_range()->ReleaseEmergencyBlock();
1141 DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize());
1142 }
1109 emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk( 1143 emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
1110 AreaSize(), AreaSize(), executable(), this); 1144 AreaSize(), AreaSize(), executable(), this);
1111 } 1145 }
1112 1146
1113 1147
1114 void PagedSpace::FreeEmergencyMemory() { 1148 void PagedSpace::FreeEmergencyMemory() {
1115 Page* page = static_cast<Page*>(emergency_memory_); 1149 Page* page = static_cast<Page*>(emergency_memory_);
1116 DCHECK(page->LiveBytes() == 0); 1150 DCHECK(page->LiveBytes() == 0);
1117 DCHECK(AreaSize() == page->area_size()); 1151 DCHECK(AreaSize() == page->area_size());
1118 DCHECK(!free_list_.ContainsPageFreeListItems(page)); 1152 DCHECK(!free_list_.ContainsPageFreeListItems(page));
(...skipping 2047 matching lines...) Expand 10 before | Expand all | Expand 10 after
3166 object->ShortPrint(); 3200 object->ShortPrint();
3167 PrintF("\n"); 3201 PrintF("\n");
3168 } 3202 }
3169 printf(" --------------------------------------\n"); 3203 printf(" --------------------------------------\n");
3170 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3204 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3171 } 3205 }
3172 3206
3173 #endif // DEBUG 3207 #endif // DEBUG
3174 } 3208 }
3175 } // namespace v8::internal 3209 } // namespace v8::internal
OLDNEW
« src/heap/spaces.h ('K') | « src/heap/spaces.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698