Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/heap/spaces.cc

Issue 496433004: Handle empty allocation list in CodeRange properly. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | test/cctest/test-spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/base/platform/platform.h" 7 #include "src/base/platform/platform.h"
8 #include "src/full-codegen.h" 8 #include "src/full-codegen.h"
9 #include "src/heap/mark-compact.h" 9 #include "src/heap/mark-compact.h"
10 #include "src/macro-assembler.h" 10 #include "src/macro-assembler.h"
(...skipping 168 matching lines...) Expand 10 before | Expand all | Expand 10 after
179 current_allocation_block_index_ = 0; 179 current_allocation_block_index_ = 0;
180 // Code range is full or too fragmented. 180 // Code range is full or too fragmented.
181 return false; 181 return false;
182 } 182 }
183 183
184 184
185 Address CodeRange::AllocateRawMemory(const size_t requested_size, 185 Address CodeRange::AllocateRawMemory(const size_t requested_size,
186 const size_t commit_size, 186 const size_t commit_size,
187 size_t* allocated) { 187 size_t* allocated) {
188 DCHECK(commit_size <= requested_size); 188 DCHECK(commit_size <= requested_size);
189 DCHECK(current_allocation_block_index_ < allocation_list_.length()); 189 DCHECK(allocation_list_.length() == 0 ||
190 if (requested_size > allocation_list_[current_allocation_block_index_].size) { 190 current_allocation_block_index_ < allocation_list_.length());
191 if (allocation_list_.length() == 0 ||
192 requested_size > allocation_list_[current_allocation_block_index_].size) {
191 // Find an allocation block large enough. 193 // Find an allocation block large enough.
192 if (!GetNextAllocationBlock(requested_size)) return NULL; 194 if (!GetNextAllocationBlock(requested_size)) return NULL;
193 } 195 }
194 // Commit the requested memory at the start of the current allocation block. 196 // Commit the requested memory at the start of the current allocation block.
195 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment); 197 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
196 FreeBlock current = allocation_list_[current_allocation_block_index_]; 198 FreeBlock current = allocation_list_[current_allocation_block_index_];
197 if (aligned_requested >= (current.size - Page::kPageSize)) { 199 if (aligned_requested >= (current.size - Page::kPageSize)) {
198 // Don't leave a small free block, useless for a large object or chunk. 200 // Don't leave a small free block, useless for a large object or chunk.
199 *allocated = current.size; 201 *allocated = current.size;
200 } else { 202 } else {
201 *allocated = aligned_requested; 203 *allocated = aligned_requested;
202 } 204 }
203 DCHECK(*allocated <= current.size); 205 DCHECK(*allocated <= current.size);
204 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment)); 206 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
205 if (!isolate_->memory_allocator()->CommitExecutableMemory( 207 if (!isolate_->memory_allocator()->CommitExecutableMemory(
206 code_range_, current.start, commit_size, *allocated)) { 208 code_range_, current.start, commit_size, *allocated)) {
207 *allocated = 0; 209 *allocated = 0;
208 return NULL; 210 return NULL;
209 } 211 }
210 allocation_list_[current_allocation_block_index_].start += *allocated; 212 allocation_list_[current_allocation_block_index_].start += *allocated;
211 allocation_list_[current_allocation_block_index_].size -= *allocated; 213 allocation_list_[current_allocation_block_index_].size -= *allocated;
212 if (*allocated == current.size) { 214 if (*allocated == current.size) {
213 // This block is used up, get the next one. 215 // This block is used up, get the next one.
214 if (!GetNextAllocationBlock(0)) return NULL; 216 GetNextAllocationBlock(0);
215 } 217 }
216 return current.start; 218 return current.start;
217 } 219 }
218 220
219 221
220 bool CodeRange::CommitRawMemory(Address start, size_t length) { 222 bool CodeRange::CommitRawMemory(Address start, size_t length) {
221 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE); 223 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
222 } 224 }
223 225
224 226
(...skipping 2870 matching lines...) Expand 10 before | Expand all | Expand 10 after
3095 object->ShortPrint(); 3097 object->ShortPrint();
3096 PrintF("\n"); 3098 PrintF("\n");
3097 } 3099 }
3098 printf(" --------------------------------------\n"); 3100 printf(" --------------------------------------\n");
3099 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3101 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3100 } 3102 }
3101 3103
3102 #endif // DEBUG 3104 #endif // DEBUG
3103 } 3105 }
3104 } // namespace v8::internal 3106 } // namespace v8::internal
OLDNEW
« no previous file with comments | « no previous file | test/cctest/test-spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698