Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(341)

Side by Side Diff: src/spaces.cc

Issue 332373002: Do GC if CodeRange fails to allocate a block. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/full-codegen.h" 7 #include "src/full-codegen.h"
8 #include "src/macro-assembler.h" 8 #include "src/macro-assembler.h"
9 #include "src/mark-compact.h" 9 #include "src/mark-compact.h"
10 #include "src/msan.h" 10 #include "src/msan.h"
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after
151 151
152 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, 152 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
153 const FreeBlock* right) { 153 const FreeBlock* right) {
154 // The entire point of CodeRange is that the difference between two 154 // The entire point of CodeRange is that the difference between two
155 // addresses in the range can be represented as a signed 32-bit int, 155 // addresses in the range can be represented as a signed 32-bit int,
156 // so the cast is semantically correct. 156 // so the cast is semantically correct.
157 return static_cast<int>(left->start - right->start); 157 return static_cast<int>(left->start - right->start);
158 } 158 }
159 159
160 160
161 void CodeRange::GetNextAllocationBlock(size_t requested) { 161 bool CodeRange::GetNextAllocationBlock(size_t requested) {
162 for (current_allocation_block_index_++; 162 for (current_allocation_block_index_++;
163 current_allocation_block_index_ < allocation_list_.length(); 163 current_allocation_block_index_ < allocation_list_.length();
164 current_allocation_block_index_++) { 164 current_allocation_block_index_++) {
165 if (requested <= allocation_list_[current_allocation_block_index_].size) { 165 if (requested <= allocation_list_[current_allocation_block_index_].size) {
166 return; // Found a large enough allocation block. 166 return true; // Found a large enough allocation block.
167 } 167 }
168 } 168 }
169 169
170 // Sort and merge the free blocks on the free list and the allocation list. 170 // Sort and merge the free blocks on the free list and the allocation list.
171 free_list_.AddAll(allocation_list_); 171 free_list_.AddAll(allocation_list_);
172 allocation_list_.Clear(); 172 allocation_list_.Clear();
173 free_list_.Sort(&CompareFreeBlockAddress); 173 free_list_.Sort(&CompareFreeBlockAddress);
174 for (int i = 0; i < free_list_.length();) { 174 for (int i = 0; i < free_list_.length();) {
175 FreeBlock merged = free_list_[i]; 175 FreeBlock merged = free_list_[i];
176 i++; 176 i++;
177 // Add adjacent free blocks to the current merged block. 177 // Add adjacent free blocks to the current merged block.
178 while (i < free_list_.length() && 178 while (i < free_list_.length() &&
179 free_list_[i].start == merged.start + merged.size) { 179 free_list_[i].start == merged.start + merged.size) {
180 merged.size += free_list_[i].size; 180 merged.size += free_list_[i].size;
181 i++; 181 i++;
182 } 182 }
183 if (merged.size > 0) { 183 if (merged.size > 0) {
184 allocation_list_.Add(merged); 184 allocation_list_.Add(merged);
185 } 185 }
186 } 186 }
187 free_list_.Clear(); 187 free_list_.Clear();
188 188
189 for (current_allocation_block_index_ = 0; 189 for (current_allocation_block_index_ = 0;
190 current_allocation_block_index_ < allocation_list_.length(); 190 current_allocation_block_index_ < allocation_list_.length();
191 current_allocation_block_index_++) { 191 current_allocation_block_index_++) {
192 if (requested <= allocation_list_[current_allocation_block_index_].size) { 192 if (requested <= allocation_list_[current_allocation_block_index_].size) {
193 return; // Found a large enough allocation block. 193 return true; // Found a large enough allocation block.
194 } 194 }
195 } 195 }
196 196
197 // Code range is full or too fragmented. 197 // Code range is full or too fragmented.
198 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock"); 198 return false;
199 } 199 }
200 200
201 201
202 Address CodeRange::AllocateRawMemory(const size_t requested_size, 202 Address CodeRange::AllocateRawMemory(const size_t requested_size,
203 const size_t commit_size, 203 const size_t commit_size,
204 size_t* allocated) { 204 size_t* allocated) {
205 ASSERT(commit_size <= requested_size); 205 ASSERT(commit_size <= requested_size);
206 ASSERT(current_allocation_block_index_ < allocation_list_.length()); 206 ASSERT(current_allocation_block_index_ < allocation_list_.length());
207 if (requested_size > allocation_list_[current_allocation_block_index_].size) { 207 if (requested_size > allocation_list_[current_allocation_block_index_].size) {
208 // Find an allocation block large enough. This function call may 208 // Find an allocation block large enough.
209 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block. 209 if (!GetNextAllocationBlock(requested_size)) return NULL;
210 GetNextAllocationBlock(requested_size);
211 } 210 }
212 // Commit the requested memory at the start of the current allocation block. 211 // Commit the requested memory at the start of the current allocation block.
213 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment); 212 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
214 FreeBlock current = allocation_list_[current_allocation_block_index_]; 213 FreeBlock current = allocation_list_[current_allocation_block_index_];
215 if (aligned_requested >= (current.size - Page::kPageSize)) { 214 if (aligned_requested >= (current.size - Page::kPageSize)) {
216 // Don't leave a small free block, useless for a large object or chunk. 215 // Don't leave a small free block, useless for a large object or chunk.
217 *allocated = current.size; 216 *allocated = current.size;
218 } else { 217 } else {
219 *allocated = aligned_requested; 218 *allocated = aligned_requested;
220 } 219 }
221 ASSERT(*allocated <= current.size); 220 ASSERT(*allocated <= current.size);
222 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); 221 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
223 if (!isolate_->memory_allocator()->CommitExecutableMemory(code_range_, 222 if (!isolate_->memory_allocator()->CommitExecutableMemory(code_range_,
224 current.start, 223 current.start,
225 commit_size, 224 commit_size,
226 *allocated)) { 225 *allocated)) {
227 *allocated = 0; 226 *allocated = 0;
228 return NULL; 227 return NULL;
229 } 228 }
230 allocation_list_[current_allocation_block_index_].start += *allocated; 229 allocation_list_[current_allocation_block_index_].start += *allocated;
231 allocation_list_[current_allocation_block_index_].size -= *allocated; 230 allocation_list_[current_allocation_block_index_].size -= *allocated;
232 if (*allocated == current.size) { 231 if (*allocated == current.size) {
233 GetNextAllocationBlock(0); // This block is used up, get the next one. 232 // This block is used up, get the next one.
233 if (!GetNextAllocationBlock(0)) return NULL;
234 } 234 }
235 return current.start; 235 return current.start;
236 } 236 }
237 237
238 238
239 bool CodeRange::CommitRawMemory(Address start, size_t length) { 239 bool CodeRange::CommitRawMemory(Address start, size_t length) {
240 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE); 240 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
241 } 241 }
242 242
243 243
(...skipping 2881 matching lines...) Expand 10 before | Expand all | Expand 10 after
3125 object->ShortPrint(); 3125 object->ShortPrint();
3126 PrintF("\n"); 3126 PrintF("\n");
3127 } 3127 }
3128 printf(" --------------------------------------\n"); 3128 printf(" --------------------------------------\n");
3129 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3129 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3130 } 3130 }
3131 3131
3132 #endif // DEBUG 3132 #endif // DEBUG
3133 3133
3134 } } // namespace v8::internal 3134 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698