Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(258)

Side by Side Diff: src/spaces.cc

Issue 4397004: Add 128MB limit for executable pages. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 253 matching lines...) Expand 10 before | Expand all | Expand 10 after
264 code_range_ = NULL; 264 code_range_ = NULL;
265 free_list_.Free(); 265 free_list_.Free();
266 allocation_list_.Free(); 266 allocation_list_.Free();
267 } 267 }
268 268
269 269
270 // ----------------------------------------------------------------------------- 270 // -----------------------------------------------------------------------------
271 // MemoryAllocator 271 // MemoryAllocator
272 // 272 //
273 intptr_t MemoryAllocator::capacity_ = 0; 273 intptr_t MemoryAllocator::capacity_ = 0;
274 intptr_t MemoryAllocator::capacity_executable_ = 0;
274 intptr_t MemoryAllocator::size_ = 0; 275 intptr_t MemoryAllocator::size_ = 0;
275 intptr_t MemoryAllocator::size_executable_ = 0; 276 intptr_t MemoryAllocator::size_executable_ = 0;
276 277
277 List<MemoryAllocator::MemoryAllocationCallbackRegistration> 278 List<MemoryAllocator::MemoryAllocationCallbackRegistration>
278 MemoryAllocator::memory_allocation_callbacks_; 279 MemoryAllocator::memory_allocation_callbacks_;
279 280
280 VirtualMemory* MemoryAllocator::initial_chunk_ = NULL; 281 VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
281 282
282 // 270 is an estimate based on the static default heap size of a pair of 256K 283 // 270 is an estimate based on the static default heap size of a pair of 256K
283 // semispaces and a 64M old generation. 284 // semispaces and a 64M old generation.
(...skipping 11 matching lines...) Expand all
295 free_chunk_ids_[top_++] = free_chunk_id; 296 free_chunk_ids_[top_++] = free_chunk_id;
296 } 297 }
297 298
298 299
299 int MemoryAllocator::Pop() { 300 int MemoryAllocator::Pop() {
300 ASSERT(top_ > 0); 301 ASSERT(top_ > 0);
301 return free_chunk_ids_[--top_]; 302 return free_chunk_ids_[--top_];
302 } 303 }
303 304
304 305
305 bool MemoryAllocator::Setup(intptr_t capacity) { 306 bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
306 capacity_ = RoundUp(capacity, Page::kPageSize); 307 capacity_ = RoundUp(capacity, Page::kPageSize);
308 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
309 ASSERT_GE(capacity_, capacity_executable_);
307 310
308 // Over-estimate the size of chunks_ array. It assumes the expansion of old 311 // Over-estimate the size of chunks_ array. It assumes the expansion of old
309 // space is always in the unit of a chunk (kChunkSize) except the last 312 // space is always in the unit of a chunk (kChunkSize) except the last
310 // expansion. 313 // expansion.
311 // 314 //
312 // Due to alignment, allocated space might be one page less than required 315 // Due to alignment, allocated space might be one page less than required
313 // number (kPagesPerChunk) of pages for old spaces. 316 // number (kPagesPerChunk) of pages for old spaces.
314 // 317 //
315 // Reserve two chunk ids for semispaces, one for map space, one for old 318 // Reserve two chunk ids for semispaces, one for map space, one for old
316 // space, and one for code space. 319 // space, and one for code space.
(...skipping 22 matching lines...) Expand all
339 342
340 if (initial_chunk_ != NULL) { 343 if (initial_chunk_ != NULL) {
341 LOG(DeleteEvent("InitialChunk", initial_chunk_->address())); 344 LOG(DeleteEvent("InitialChunk", initial_chunk_->address()));
342 delete initial_chunk_; 345 delete initial_chunk_;
343 initial_chunk_ = NULL; 346 initial_chunk_ = NULL;
344 } 347 }
345 348
346 ASSERT(top_ == max_nof_chunks_); // all chunks are free 349 ASSERT(top_ == max_nof_chunks_); // all chunks are free
347 top_ = 0; 350 top_ = 0;
348 capacity_ = 0; 351 capacity_ = 0;
352 capacity_executable_ = 0;
349 size_ = 0; 353 size_ = 0;
350 max_nof_chunks_ = 0; 354 max_nof_chunks_ = 0;
351 } 355 }
352 356
353 357
354 void* MemoryAllocator::AllocateRawMemory(const size_t requested, 358 void* MemoryAllocator::AllocateRawMemory(const size_t requested,
355 size_t* allocated, 359 size_t* allocated,
356 Executability executable) { 360 Executability executable) {
357 if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) { 361 if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
358 return NULL; 362 return NULL;
359 } 363 }
360 void* mem; 364 void* mem;
361 if (executable == EXECUTABLE && CodeRange::exists()) { 365 if (executable == EXECUTABLE && CodeRange::exists()) {
366 if (size_executable_ + requested >
367 static_cast<size_t>(capacity_executable_)) {
368 LOG(StringEvent("MemoryAllocator::AllocateRawMemory",
369 "V8 Executable Allocation capacity exceeded"));
Mads Ager (chromium) 2010/11/10 08:14:24 Indentation off by one space it seems?
370 return NULL;
371 }
362 mem = CodeRange::AllocateRawMemory(requested, allocated); 372 mem = CodeRange::AllocateRawMemory(requested, allocated);
373 size_executable_ += static_cast<int>(*allocated);
Mads Ager (chromium) 2010/11/10 08:23:26 This only get's updated when CodeRange::exists().
374
363 } else { 375 } else {
364 mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE)); 376 mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE));
365 } 377 }
366 int alloced = static_cast<int>(*allocated); 378 int alloced = static_cast<int>(*allocated);
367 size_ += alloced; 379 size_ += alloced;
368 380
369 if (executable == EXECUTABLE) size_executable_ += alloced;
370 #ifdef DEBUG 381 #ifdef DEBUG
371 ZapBlock(reinterpret_cast<Address>(mem), alloced); 382 ZapBlock(reinterpret_cast<Address>(mem), alloced);
372 #endif 383 #endif
373 Counters::memory_allocated.Increment(alloced); 384 Counters::memory_allocated.Increment(alloced);
374 return mem; 385 return mem;
375 } 386 }
376 387
377 388
378 void MemoryAllocator::FreeRawMemory(void* mem, 389 void MemoryAllocator::FreeRawMemory(void* mem,
379 size_t length, 390 size_t length,
380 Executability executable) { 391 Executability executable) {
381 #ifdef DEBUG 392 #ifdef DEBUG
382 ZapBlock(reinterpret_cast<Address>(mem), length); 393 ZapBlock(reinterpret_cast<Address>(mem), length);
383 #endif 394 #endif
384 if (CodeRange::contains(static_cast<Address>(mem))) { 395 if (CodeRange::contains(static_cast<Address>(mem))) {
385 CodeRange::FreeRawMemory(mem, length); 396 CodeRange::FreeRawMemory(mem, length);
386 } else { 397 } else {
387 OS::Free(mem, length); 398 OS::Free(mem, length);
388 } 399 }
389 Counters::memory_allocated.Decrement(static_cast<int>(length)); 400 Counters::memory_allocated.Decrement(static_cast<int>(length));
390 size_ -= static_cast<int>(length); 401 size_ -= static_cast<int>(length);
391 if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length); 402 if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
392 403
393 ASSERT(size_ >= 0); 404 ASSERT(size_ >= 0);
405 ASSERT(size_executable_ >= 0);
394 } 406 }
395 407
396 408
397 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space, 409 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
398 AllocationAction action, 410 AllocationAction action,
399 size_t size) { 411 size_t size) {
400 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { 412 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
401 MemoryAllocationCallbackRegistration registration = 413 MemoryAllocationCallbackRegistration registration =
402 memory_allocation_callbacks_[i]; 414 memory_allocation_callbacks_[i];
403 if ((registration.space & space) == space && 415 if ((registration.space & space) == space &&
(...skipping 2638 matching lines...) Expand 10 before | Expand all | Expand 10 after
3042 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { 3054 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
3043 if (obj->IsCode()) { 3055 if (obj->IsCode()) {
3044 Code* code = Code::cast(obj); 3056 Code* code = Code::cast(obj);
3045 code_kind_statistics[code->kind()] += code->Size(); 3057 code_kind_statistics[code->kind()] += code->Size();
3046 } 3058 }
3047 } 3059 }
3048 } 3060 }
3049 #endif // DEBUG 3061 #endif // DEBUG
3050 3062
3051 } } // namespace v8::internal 3063 } } // namespace v8::internal
OLDNEW
« src/heap.cc ('K') | « src/spaces.h ('k') | test/cctest/test-mark-compact.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698