Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(884)

Side by Side Diff: src/spaces.cc

Issue 23641009: Refactor and cleanup VirtualMemory. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Addressed nits. Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 227 matching lines...) Expand 10 before | Expand all | Expand 10 after
238 allocation_list_[current_allocation_block_index_].start += *allocated; 238 allocation_list_[current_allocation_block_index_].start += *allocated;
239 allocation_list_[current_allocation_block_index_].size -= *allocated; 239 allocation_list_[current_allocation_block_index_].size -= *allocated;
240 if (*allocated == current.size) { 240 if (*allocated == current.size) {
241 GetNextAllocationBlock(0); // This block is used up, get the next one. 241 GetNextAllocationBlock(0); // This block is used up, get the next one.
242 } 242 }
243 return current.start; 243 return current.start;
244 } 244 }
245 245
246 246
247 bool CodeRange::CommitRawMemory(Address start, size_t length) { 247 bool CodeRange::CommitRawMemory(Address start, size_t length) {
248 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE); 248 return isolate_->memory_allocator()->CommitMemory(
249 start, length, VirtualMemory::EXECUTABLE);
249 } 250 }
250 251
251 252
252 bool CodeRange::UncommitRawMemory(Address start, size_t length) { 253 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
253 return code_range_->Uncommit(start, length); 254 return code_range_->Uncommit(start, length);
254 } 255 }
255 256
256 257
257 void CodeRange::FreeRawMemory(Address address, size_t length) { 258 void CodeRange::FreeRawMemory(Address address, size_t length) {
258 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); 259 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
259 free_list_.Add(FreeBlock(address, length)); 260 free_list_.Add(FreeBlock(address, length));
260 code_range_->Uncommit(address, length); 261 bool result = code_range_->Uncommit(address, length);
262 ASSERT(result);
263 USE(result);
261 } 264 }
262 265
263 266
264 void CodeRange::TearDown() { 267 void CodeRange::TearDown() {
265 delete code_range_; // Frees all memory in the virtual memory range. 268 delete code_range_; // Frees all memory in the virtual memory range.
266 code_range_ = NULL; 269 code_range_ = NULL;
267 free_list_.Free(); 270 free_list_.Free();
268 allocation_list_.Free(); 271 allocation_list_.Free();
269 } 272 }
270 273
(...skipping 30 matching lines...) Expand all
301 ASSERT(size_ == 0); 304 ASSERT(size_ == 0);
302 // TODO(gc) this will be true again when we fix FreeMemory. 305 // TODO(gc) this will be true again when we fix FreeMemory.
303 // ASSERT(size_executable_ == 0); 306 // ASSERT(size_executable_ == 0);
304 capacity_ = 0; 307 capacity_ = 0;
305 capacity_executable_ = 0; 308 capacity_executable_ = 0;
306 } 309 }
307 310
308 311
309 bool MemoryAllocator::CommitMemory(Address base, 312 bool MemoryAllocator::CommitMemory(Address base,
310 size_t size, 313 size_t size,
311 Executability executable) { 314 VirtualMemory::Executability executability) {
312 if (!VirtualMemory::CommitRegion(base, size, executable == EXECUTABLE)) { 315 if (!VirtualMemory::CommitRegion(base, size, executability)) {
313 return false; 316 return false;
314 } 317 }
315 UpdateAllocatedSpaceLimits(base, base + size); 318 UpdateAllocatedSpaceLimits(base, base + size);
316 return true; 319 return true;
317 } 320 }
318 321
319 322
320 void MemoryAllocator::FreeMemory(VirtualMemory* reservation, 323 void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
321 Executability executable) { 324 VirtualMemory::Executability executability) {
322 // TODO(gc) make code_range part of memory allocator? 325 // TODO(gc) make code_range part of memory allocator?
323 ASSERT(reservation->IsReserved()); 326 ASSERT(reservation->IsReserved());
324 size_t size = reservation->size(); 327 size_t size = reservation->size();
325 ASSERT(size_ >= size); 328 ASSERT(size_ >= size);
326 size_ -= size; 329 size_ -= size;
327 330
328 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); 331 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
329 332
330 if (executable == EXECUTABLE) { 333 if (executability == VirtualMemory::EXECUTABLE) {
331 ASSERT(size_executable_ >= size); 334 ASSERT(size_executable_ >= size);
332 size_executable_ -= size; 335 size_executable_ -= size;
333 } 336 }
334 // Code which is part of the code-range does not have its own VirtualMemory. 337 // Code which is part of the code-range does not have its own VirtualMemory.
335 ASSERT(!isolate_->code_range()->contains( 338 ASSERT(!isolate_->code_range()->contains(
336 static_cast<Address>(reservation->address()))); 339 static_cast<Address>(reservation->address())));
337 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); 340 ASSERT(executability == VirtualMemory::NOT_EXECUTABLE ||
341 !isolate_->code_range()->exists());
338 reservation->Release(); 342 reservation->Release();
339 } 343 }
340 344
341 345
342 void MemoryAllocator::FreeMemory(Address base, 346 void MemoryAllocator::FreeMemory(Address base,
343 size_t size, 347 size_t size,
344 Executability executable) { 348 VirtualMemory::Executability executability) {
345 // TODO(gc) make code_range part of memory allocator? 349 // TODO(gc) make code_range part of memory allocator?
346 ASSERT(size_ >= size); 350 ASSERT(size_ >= size);
347 size_ -= size; 351 size_ -= size;
348 352
349 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); 353 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
350 354
351 if (executable == EXECUTABLE) { 355 if (executability == VirtualMemory::EXECUTABLE) {
352 ASSERT(size_executable_ >= size); 356 ASSERT(size_executable_ >= size);
353 size_executable_ -= size; 357 size_executable_ -= size;
354 } 358 }
355 if (isolate_->code_range()->contains(static_cast<Address>(base))) { 359 if (isolate_->code_range()->contains(static_cast<Address>(base))) {
356 ASSERT(executable == EXECUTABLE); 360 ASSERT(executability == VirtualMemory::EXECUTABLE);
357 isolate_->code_range()->FreeRawMemory(base, size); 361 isolate_->code_range()->FreeRawMemory(base, size);
358 } else { 362 } else {
359 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); 363 ASSERT(executability == VirtualMemory::NOT_EXECUTABLE ||
364 !isolate_->code_range()->exists());
360 bool result = VirtualMemory::ReleaseRegion(base, size); 365 bool result = VirtualMemory::ReleaseRegion(base, size);
361 USE(result); 366 USE(result);
362 ASSERT(result); 367 ASSERT(result);
363 } 368 }
364 } 369 }
365 370
366 371
367 Address MemoryAllocator::ReserveAlignedMemory(size_t size, 372 Address MemoryAllocator::ReserveAlignedMemory(size_t size,
368 size_t alignment, 373 size_t alignment,
369 VirtualMemory* controller) { 374 VirtualMemory* controller) {
370 VirtualMemory reservation(size, alignment); 375 VirtualMemory reservation(size, alignment);
371 376
372 if (!reservation.IsReserved()) return NULL; 377 if (!reservation.IsReserved()) return NULL;
373 size_ += reservation.size(); 378 size_ += reservation.size();
374 Address base = RoundUp(static_cast<Address>(reservation.address()), 379 Address base = RoundUp(static_cast<Address>(reservation.address()),
375 alignment); 380 alignment);
376 controller->TakeControl(&reservation); 381 controller->TakeControl(&reservation);
377 return base; 382 return base;
378 } 383 }
379 384
380 385
381 Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size, 386 Address MemoryAllocator::AllocateAlignedMemory(
382 size_t commit_size, 387 size_t reserve_size,
383 size_t alignment, 388 size_t commit_size,
384 Executability executable, 389 size_t alignment,
385 VirtualMemory* controller) { 390 VirtualMemory::Executability executability,
391 VirtualMemory* controller) {
386 ASSERT(commit_size <= reserve_size); 392 ASSERT(commit_size <= reserve_size);
387 VirtualMemory reservation; 393 VirtualMemory reservation;
388 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation); 394 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
389 if (base == NULL) return NULL; 395 if (base == NULL) return NULL;
390 396
391 if (executable == EXECUTABLE) { 397 if (executability == VirtualMemory::EXECUTABLE) {
392 if (!CommitExecutableMemory(&reservation, 398 if (!CommitExecutableMemory(&reservation,
393 base, 399 base,
394 commit_size, 400 commit_size,
395 reserve_size)) { 401 reserve_size)) {
396 base = NULL; 402 base = NULL;
397 } 403 }
398 } else { 404 } else {
399 if (reservation.Commit(base, commit_size, false)) { 405 if (reservation.Commit(base, commit_size, VirtualMemory::NOT_EXECUTABLE)) {
400 UpdateAllocatedSpaceLimits(base, base + commit_size); 406 UpdateAllocatedSpaceLimits(base, base + commit_size);
401 } else { 407 } else {
402 base = NULL; 408 base = NULL;
403 } 409 }
404 } 410 }
405 411
406 if (base == NULL) { 412 if (base == NULL) {
407 // Failed to commit the body. Release the mapping and any partially 413 // Failed to commit the body. Release the mapping and any partially
408 // commited regions inside it. 414 // commited regions inside it.
409 reservation.Release(); 415 reservation.Release();
(...skipping 16 matching lines...) Expand all
426 Address start, 432 Address start,
427 SemiSpace* semi_space) { 433 SemiSpace* semi_space) {
428 Address area_start = start + NewSpacePage::kObjectStartOffset; 434 Address area_start = start + NewSpacePage::kObjectStartOffset;
429 Address area_end = start + Page::kPageSize; 435 Address area_end = start + Page::kPageSize;
430 436
431 MemoryChunk* chunk = MemoryChunk::Initialize(heap, 437 MemoryChunk* chunk = MemoryChunk::Initialize(heap,
432 start, 438 start,
433 Page::kPageSize, 439 Page::kPageSize,
434 area_start, 440 area_start,
435 area_end, 441 area_end,
436 NOT_EXECUTABLE, 442 VirtualMemory::NOT_EXECUTABLE,
437 semi_space); 443 semi_space);
438 chunk->set_next_chunk(NULL); 444 chunk->set_next_chunk(NULL);
439 chunk->set_prev_chunk(NULL); 445 chunk->set_prev_chunk(NULL);
440 chunk->initialize_scan_on_scavenge(true); 446 chunk->initialize_scan_on_scavenge(true);
441 bool in_to_space = (semi_space->id() != kFromSpace); 447 bool in_to_space = (semi_space->id() != kFromSpace);
442 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE 448 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
443 : MemoryChunk::IN_FROM_SPACE); 449 : MemoryChunk::IN_FROM_SPACE);
444 ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE 450 ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
445 : MemoryChunk::IN_TO_SPACE)); 451 : MemoryChunk::IN_TO_SPACE));
446 NewSpacePage* page = static_cast<NewSpacePage*>(chunk); 452 NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
(...skipping 10 matching lines...) Expand all
457 // All real new-space pages will be in new-space. 463 // All real new-space pages will be in new-space.
458 SetFlags(0, ~0); 464 SetFlags(0, ~0);
459 } 465 }
460 466
461 467
462 MemoryChunk* MemoryChunk::Initialize(Heap* heap, 468 MemoryChunk* MemoryChunk::Initialize(Heap* heap,
463 Address base, 469 Address base,
464 size_t size, 470 size_t size,
465 Address area_start, 471 Address area_start,
466 Address area_end, 472 Address area_end,
467 Executability executable, 473 VirtualMemory::Executability executability,
468 Space* owner) { 474 Space* owner) {
469 MemoryChunk* chunk = FromAddress(base); 475 MemoryChunk* chunk = FromAddress(base);
470 476
471 ASSERT(base == chunk->address()); 477 ASSERT(base == chunk->address());
472 478
473 chunk->heap_ = heap; 479 chunk->heap_ = heap;
474 chunk->size_ = size; 480 chunk->size_ = size;
475 chunk->area_start_ = area_start; 481 chunk->area_start_ = area_start;
476 chunk->area_end_ = area_end; 482 chunk->area_end_ = area_end;
477 chunk->flags_ = 0; 483 chunk->flags_ = 0;
(...skipping 11 matching lines...) Expand all
489 chunk->available_in_huge_free_list_ = 0; 495 chunk->available_in_huge_free_list_ = 0;
490 chunk->non_available_small_blocks_ = 0; 496 chunk->non_available_small_blocks_ = 0;
491 chunk->ResetLiveBytes(); 497 chunk->ResetLiveBytes();
492 Bitmap::Clear(chunk); 498 Bitmap::Clear(chunk);
493 chunk->initialize_scan_on_scavenge(false); 499 chunk->initialize_scan_on_scavenge(false);
494 chunk->SetFlag(WAS_SWEPT_PRECISELY); 500 chunk->SetFlag(WAS_SWEPT_PRECISELY);
495 501
496 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); 502 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
497 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); 503 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
498 504
499 if (executable == EXECUTABLE) { 505 if (executability == VirtualMemory::EXECUTABLE) {
500 chunk->SetFlag(IS_EXECUTABLE); 506 chunk->SetFlag(IS_EXECUTABLE);
501 } 507 }
502 508
503 if (owner == heap->old_data_space()) { 509 if (owner == heap->old_data_space()) {
504 chunk->SetFlag(CONTAINS_ONLY_DATA); 510 chunk->SetFlag(CONTAINS_ONLY_DATA);
505 } 511 }
506 512
507 return chunk; 513 return chunk;
508 } 514 }
509 515
510 516
511 // Commit MemoryChunk area to the requested size. 517 // Commit MemoryChunk area to the requested size.
512 bool MemoryChunk::CommitArea(size_t requested) { 518 bool MemoryChunk::CommitArea(size_t requested) {
513 size_t guard_size = IsFlagSet(IS_EXECUTABLE) ? 519 size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
514 MemoryAllocator::CodePageGuardSize() : 0; 520 MemoryAllocator::CodePageGuardSize() : 0;
515 size_t header_size = area_start() - address() - guard_size; 521 size_t header_size = area_start() - address() - guard_size;
516 size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize()); 522 size_t commit_size = RoundUp(header_size + requested,
523 VirtualMemory::GetPageSize());
517 size_t committed_size = RoundUp(header_size + (area_end() - area_start()), 524 size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
518 OS::CommitPageSize()); 525 VirtualMemory::GetPageSize());
519 526
520 if (commit_size > committed_size) { 527 if (commit_size > committed_size) {
521 // Commit size should be less or equal than the reserved size. 528 // Commit size should be less or equal than the reserved size.
522 ASSERT(commit_size <= size() - 2 * guard_size); 529 ASSERT(commit_size <= size() - 2 * guard_size);
523 // Append the committed area. 530 // Append the committed area.
524 Address start = address() + committed_size + guard_size; 531 Address start = address() + committed_size + guard_size;
525 size_t length = commit_size - committed_size; 532 size_t length = commit_size - committed_size;
526 if (reservation_.IsReserved()) { 533 if (reservation_.IsReserved()) {
527 Executability executable = IsFlagSet(IS_EXECUTABLE) 534 VirtualMemory::Executability executability = IsFlagSet(IS_EXECUTABLE)
528 ? EXECUTABLE : NOT_EXECUTABLE; 535 ? VirtualMemory::EXECUTABLE : VirtualMemory::NOT_EXECUTABLE;
529 if (!heap()->isolate()->memory_allocator()->CommitMemory( 536 if (!heap()->isolate()->memory_allocator()->CommitMemory(
530 start, length, executable)) { 537 start, length, executability)) {
531 return false; 538 return false;
532 } 539 }
533 } else { 540 } else {
534 CodeRange* code_range = heap_->isolate()->code_range(); 541 CodeRange* code_range = heap_->isolate()->code_range();
535 ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE)); 542 ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
536 if (!code_range->CommitRawMemory(start, length)) return false; 543 if (!code_range->CommitRawMemory(start, length)) return false;
537 } 544 }
538 545
539 if (Heap::ShouldZapGarbage()) { 546 if (Heap::ShouldZapGarbage()) {
540 heap_->isolate()->memory_allocator()->ZapBlock(start, length); 547 heap_->isolate()->memory_allocator()->ZapBlock(start, length);
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
582 heap_->decrement_scan_on_scavenge_pages(); 589 heap_->decrement_scan_on_scavenge_pages();
583 ClearFlag(SCAN_ON_SCAVENGE); 590 ClearFlag(SCAN_ON_SCAVENGE);
584 } 591 }
585 next_chunk_->prev_chunk_ = prev_chunk_; 592 next_chunk_->prev_chunk_ = prev_chunk_;
586 prev_chunk_->next_chunk_ = next_chunk_; 593 prev_chunk_->next_chunk_ = next_chunk_;
587 prev_chunk_ = NULL; 594 prev_chunk_ = NULL;
588 next_chunk_ = NULL; 595 next_chunk_ = NULL;
589 } 596 }
590 597
591 598
592 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, 599 MemoryChunk* MemoryAllocator::AllocateChunk(
593 intptr_t commit_area_size, 600 intptr_t reserve_area_size,
594 Executability executable, 601 intptr_t commit_area_size,
595 Space* owner) { 602 VirtualMemory::Executability executability,
603 Space* owner) {
596 ASSERT(commit_area_size <= reserve_area_size); 604 ASSERT(commit_area_size <= reserve_area_size);
597 605
598 size_t chunk_size; 606 size_t chunk_size;
599 Heap* heap = isolate_->heap(); 607 Heap* heap = isolate_->heap();
600 Address base = NULL; 608 Address base = NULL;
601 VirtualMemory reservation; 609 VirtualMemory reservation;
602 Address area_start = NULL; 610 Address area_start = NULL;
603 Address area_end = NULL; 611 Address area_end = NULL;
604 612
605 // 613 //
(...skipping 19 matching lines...) Expand all
625 // | Header | 633 // | Header |
626 // +----------------------------+<- area_start_ (base + kObjectStartOffset) 634 // +----------------------------+<- area_start_ (base + kObjectStartOffset)
627 // | Area | 635 // | Area |
628 // +----------------------------+<- area_end_ (area_start + commit_area_size) 636 // +----------------------------+<- area_end_ (area_start + commit_area_size)
629 // | Committed but not used | 637 // | Committed but not used |
630 // +----------------------------+<- aligned at OS page boundary 638 // +----------------------------+<- aligned at OS page boundary
631 // | Reserved but not committed | 639 // | Reserved but not committed |
632 // +----------------------------+<- base + chunk_size 640 // +----------------------------+<- base + chunk_size
633 // 641 //
634 642
635 if (executable == EXECUTABLE) { 643 if (executability == VirtualMemory::EXECUTABLE) {
636 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size, 644 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
637 OS::CommitPageSize()) + CodePageGuardSize(); 645 VirtualMemory::GetPageSize()) + CodePageGuardSize();
638 646
639 // Check executable memory limit. 647 // Check executable memory limit.
640 if (size_executable_ + chunk_size > capacity_executable_) { 648 if (size_executable_ + chunk_size > capacity_executable_) {
641 LOG(isolate_, 649 LOG(isolate_,
642 StringEvent("MemoryAllocator::AllocateRawMemory", 650 StringEvent("MemoryAllocator::AllocateRawMemory",
643 "V8 Executable Allocation capacity exceeded")); 651 "V8 Executable Allocation capacity exceeded"));
644 return NULL; 652 return NULL;
645 } 653 }
646 654
647 // Size of header (not executable) plus area (executable). 655 // Size of header (not executable) plus area (executable).
648 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, 656 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
649 OS::CommitPageSize()); 657 VirtualMemory::GetPageSize());
650 // Allocate executable memory either from code range or from the 658 // Allocate executable memory either from code range or from the
651 // OS. 659 // OS.
652 if (isolate_->code_range()->exists()) { 660 if (isolate_->code_range()->exists()) {
653 base = isolate_->code_range()->AllocateRawMemory(chunk_size, 661 base = isolate_->code_range()->AllocateRawMemory(chunk_size,
654 commit_size, 662 commit_size,
655 &chunk_size); 663 &chunk_size);
656 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), 664 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
657 MemoryChunk::kAlignment)); 665 MemoryChunk::kAlignment));
658 if (base == NULL) return NULL; 666 if (base == NULL) return NULL;
659 size_ += chunk_size; 667 size_ += chunk_size;
660 // Update executable memory size. 668 // Update executable memory size.
661 size_executable_ += chunk_size; 669 size_executable_ += chunk_size;
662 } else { 670 } else {
663 base = AllocateAlignedMemory(chunk_size, 671 base = AllocateAlignedMemory(chunk_size,
664 commit_size, 672 commit_size,
665 MemoryChunk::kAlignment, 673 MemoryChunk::kAlignment,
666 executable, 674 executability,
667 &reservation); 675 &reservation);
668 if (base == NULL) return NULL; 676 if (base == NULL) return NULL;
669 // Update executable memory size. 677 // Update executable memory size.
670 size_executable_ += reservation.size(); 678 size_executable_ += reservation.size();
671 } 679 }
672 680
673 if (Heap::ShouldZapGarbage()) { 681 if (Heap::ShouldZapGarbage()) {
674 ZapBlock(base, CodePageGuardStartOffset()); 682 ZapBlock(base, CodePageGuardStartOffset());
675 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size); 683 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
676 } 684 }
677 685
678 area_start = base + CodePageAreaStartOffset(); 686 area_start = base + CodePageAreaStartOffset();
679 area_end = area_start + commit_area_size; 687 area_end = area_start + commit_area_size;
680 } else { 688 } else {
681 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size, 689 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
682 OS::CommitPageSize()); 690 VirtualMemory::GetPageSize());
683 size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset + 691 size_t commit_size = RoundUp(
684 commit_area_size, OS::CommitPageSize()); 692 MemoryChunk::kObjectStartOffset + commit_area_size,
693 VirtualMemory::GetPageSize());
685 base = AllocateAlignedMemory(chunk_size, 694 base = AllocateAlignedMemory(chunk_size,
686 commit_size, 695 commit_size,
687 MemoryChunk::kAlignment, 696 MemoryChunk::kAlignment,
688 executable, 697 executability,
689 &reservation); 698 &reservation);
690 699
691 if (base == NULL) return NULL; 700 if (base == NULL) return NULL;
692 701
693 if (Heap::ShouldZapGarbage()) { 702 if (Heap::ShouldZapGarbage()) {
694 ZapBlock(base, Page::kObjectStartOffset + commit_area_size); 703 ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
695 } 704 }
696 705
697 area_start = base + Page::kObjectStartOffset; 706 area_start = base + Page::kObjectStartOffset;
698 area_end = area_start + commit_area_size; 707 area_end = area_start + commit_area_size;
699 } 708 }
700 709
701 // Use chunk_size for statistics and callbacks because we assume that they 710 // Use chunk_size for statistics and callbacks because we assume that they
702 // treat reserved but not-yet committed memory regions of chunks as allocated. 711 // treat reserved but not-yet committed memory regions of chunks as allocated.
703 isolate_->counters()->memory_allocated()-> 712 isolate_->counters()->memory_allocated()->
704 Increment(static_cast<int>(chunk_size)); 713 Increment(static_cast<int>(chunk_size));
705 714
706 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); 715 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
707 if (owner != NULL) { 716 if (owner != NULL) {
708 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); 717 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
709 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); 718 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
710 } 719 }
711 720
712 MemoryChunk* result = MemoryChunk::Initialize(heap, 721 MemoryChunk* result = MemoryChunk::Initialize(heap,
713 base, 722 base,
714 chunk_size, 723 chunk_size,
715 area_start, 724 area_start,
716 area_end, 725 area_end,
717 executable, 726 executability,
718 owner); 727 owner);
719 result->set_reserved_memory(&reservation); 728 result->set_reserved_memory(&reservation);
720 return result; 729 return result;
721 } 730 }
722 731
723 732
724 void Page::ResetFreeListStatistics() { 733 void Page::ResetFreeListStatistics() {
725 non_available_small_blocks_ = 0; 734 non_available_small_blocks_ = 0;
726 available_in_small_free_list_ = 0; 735 available_in_small_free_list_ = 0;
727 available_in_medium_free_list_ = 0; 736 available_in_medium_free_list_ = 0;
728 available_in_large_free_list_ = 0; 737 available_in_large_free_list_ = 0;
729 available_in_huge_free_list_ = 0; 738 available_in_huge_free_list_ = 0;
730 } 739 }
731 740
732 741
733 Page* MemoryAllocator::AllocatePage(intptr_t size, 742 Page* MemoryAllocator::AllocatePage(
734 PagedSpace* owner, 743 intptr_t size,
735 Executability executable) { 744 PagedSpace* owner,
736 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner); 745 VirtualMemory::Executability executability) {
746 MemoryChunk* chunk = AllocateChunk(size, size, executability, owner);
737 747
738 if (chunk == NULL) return NULL; 748 if (chunk == NULL) return NULL;
739 749
740 return Page::Initialize(isolate_->heap(), chunk, executable, owner); 750 return Page::Initialize(isolate_->heap(), chunk, executability, owner);
741 } 751 }
742 752
743 753
744 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, 754 LargePage* MemoryAllocator::AllocateLargePage(
745 Space* owner, 755 intptr_t object_size,
746 Executability executable) { 756 Space* owner,
757 VirtualMemory::Executability executability) {
747 MemoryChunk* chunk = AllocateChunk(object_size, 758 MemoryChunk* chunk = AllocateChunk(object_size,
748 object_size, 759 object_size,
749 executable, 760 executability,
750 owner); 761 owner);
751 if (chunk == NULL) return NULL; 762 if (chunk == NULL) return NULL;
752 return LargePage::Initialize(isolate_->heap(), chunk); 763 return LargePage::Initialize(isolate_->heap(), chunk);
753 } 764 }
754 765
755 766
756 void MemoryAllocator::Free(MemoryChunk* chunk) { 767 void MemoryAllocator::Free(MemoryChunk* chunk) {
757 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); 768 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
758 if (chunk->owner() != NULL) { 769 if (chunk->owner() != NULL) {
759 ObjectSpace space = 770 ObjectSpace space =
760 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); 771 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
761 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); 772 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
762 } 773 }
763 774
764 isolate_->heap()->RememberUnmappedPage( 775 isolate_->heap()->RememberUnmappedPage(
765 reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate()); 776 reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
766 777
767 delete chunk->slots_buffer(); 778 delete chunk->slots_buffer();
768 delete chunk->skip_list(); 779 delete chunk->skip_list();
769 780
770 VirtualMemory* reservation = chunk->reserved_memory(); 781 VirtualMemory* reservation = chunk->reserved_memory();
771 if (reservation->IsReserved()) { 782 if (reservation->IsReserved()) {
772 FreeMemory(reservation, chunk->executable()); 783 FreeMemory(reservation, chunk->executability());
773 } else { 784 } else {
774 FreeMemory(chunk->address(), 785 FreeMemory(chunk->address(),
775 chunk->size(), 786 chunk->size(),
776 chunk->executable()); 787 chunk->executability());
777 } 788 }
778 } 789 }
779 790
780 791
781 bool MemoryAllocator::CommitBlock(Address start, 792 bool MemoryAllocator::CommitBlock(Address start,
782 size_t size, 793 size_t size,
783 Executability executable) { 794 VirtualMemory::Executability executability) {
784 if (!CommitMemory(start, size, executable)) return false; 795 if (!CommitMemory(start, size, executability)) return false;
785 796
786 if (Heap::ShouldZapGarbage()) { 797 if (Heap::ShouldZapGarbage()) {
787 ZapBlock(start, size); 798 ZapBlock(start, size);
788 } 799 }
789 800
790 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); 801 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
791 return true; 802 return true;
792 } 803 }
793 804
794 805
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
859 ", used: %" V8_PTR_PREFIX "d" 870 ", used: %" V8_PTR_PREFIX "d"
860 ", available: %%%d\n\n", 871 ", available: %%%d\n\n",
861 capacity_, size_, static_cast<int>(pct*100)); 872 capacity_, size_, static_cast<int>(pct*100));
862 } 873 }
863 #endif 874 #endif
864 875
865 876
866 int MemoryAllocator::CodePageGuardStartOffset() { 877 int MemoryAllocator::CodePageGuardStartOffset() {
867 // We are guarding code pages: the first OS page after the header 878 // We are guarding code pages: the first OS page after the header
868 // will be protected as non-writable. 879 // will be protected as non-writable.
869 return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize()); 880 return RoundUp(Page::kObjectStartOffset, VirtualMemory::GetPageSize());
870 } 881 }
871 882
872 883
873 int MemoryAllocator::CodePageGuardSize() { 884 int MemoryAllocator::CodePageGuardSize() {
874 return static_cast<int>(OS::CommitPageSize()); 885 return static_cast<int>(VirtualMemory::GetPageSize());
875 } 886 }
876 887
877 888
878 int MemoryAllocator::CodePageAreaStartOffset() { 889 int MemoryAllocator::CodePageAreaStartOffset() {
879 // We are guarding code pages: the first OS page after the header 890 // We are guarding code pages: the first OS page after the header
880 // will be protected as non-writable. 891 // will be protected as non-writable.
881 return CodePageGuardStartOffset() + CodePageGuardSize(); 892 return CodePageGuardStartOffset() + CodePageGuardSize();
882 } 893 }
883 894
884 895
885 int MemoryAllocator::CodePageAreaEndOffset() { 896 int MemoryAllocator::CodePageAreaEndOffset() {
886 // We are guarding code pages: the last OS page will be protected as 897 // We are guarding code pages: the last OS page will be protected as
887 // non-writable. 898 // non-writable.
888 return Page::kPageSize - static_cast<int>(OS::CommitPageSize()); 899 return Page::kPageSize - static_cast<int>(VirtualMemory::GetPageSize());
889 } 900 }
890 901
891 902
892 bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, 903 bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
893 Address start, 904 Address start,
894 size_t commit_size, 905 size_t commit_size,
895 size_t reserved_size) { 906 size_t reserved_size) {
896 // Commit page header (not executable). 907 // Commit page header (not executable).
897 if (!vm->Commit(start, 908 if (!vm->Commit(start,
898 CodePageGuardStartOffset(), 909 CodePageGuardStartOffset(),
899 false)) { 910 VirtualMemory::NOT_EXECUTABLE)) {
900 return false; 911 return false;
901 } 912 }
902 913
903 // Create guard page after the header. 914 // Create guard page after the header.
904 if (!vm->Guard(start + CodePageGuardStartOffset())) { 915 if (!vm->Guard(start + CodePageGuardStartOffset(),
916 VirtualMemory::GetPageSize())) {
905 return false; 917 return false;
906 } 918 }
907 919
908 // Commit page body (executable). 920 // Commit page body (executable).
909 if (!vm->Commit(start + CodePageAreaStartOffset(), 921 if (!vm->Commit(start + CodePageAreaStartOffset(),
910 commit_size - CodePageGuardStartOffset(), 922 commit_size - CodePageGuardStartOffset(),
911 true)) { 923 VirtualMemory::EXECUTABLE)) {
912 return false; 924 return false;
913 } 925 }
914 926
915 // Create guard page before the end. 927 // Create guard page before the end.
916 if (!vm->Guard(start + reserved_size - CodePageGuardSize())) { 928 if (!vm->Guard(start + reserved_size - CodePageGuardSize(),
929 VirtualMemory::GetPageSize())) {
917 return false; 930 return false;
918 } 931 }
919 932
920 UpdateAllocatedSpaceLimits(start, 933 UpdateAllocatedSpaceLimits(start,
921 start + CodePageAreaStartOffset() + 934 start + CodePageAreaStartOffset() +
922 commit_size - CodePageGuardStartOffset()); 935 commit_size - CodePageGuardStartOffset());
923 return true; 936 return true;
924 } 937 }
925 938
926 939
927 // ----------------------------------------------------------------------------- 940 // -----------------------------------------------------------------------------
928 // MemoryChunk implementation 941 // MemoryChunk implementation
929 942
930 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) { 943 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
931 MemoryChunk* chunk = MemoryChunk::FromAddress(address); 944 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
932 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) { 945 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
933 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by); 946 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
934 } 947 }
935 chunk->IncrementLiveBytes(by); 948 chunk->IncrementLiveBytes(by);
936 } 949 }
937 950
938 951
939 // ----------------------------------------------------------------------------- 952 // -----------------------------------------------------------------------------
940 // PagedSpace implementation 953 // PagedSpace implementation
941 954
942 PagedSpace::PagedSpace(Heap* heap, 955 PagedSpace::PagedSpace(Heap* heap,
943 intptr_t max_capacity, 956 intptr_t max_capacity,
944 AllocationSpace id, 957 AllocationSpace id,
945 Executability executable) 958 VirtualMemory::Executability executability)
946 : Space(heap, id, executable), 959 : Space(heap, id, executability),
947 free_list_(this), 960 free_list_(this),
948 was_swept_conservatively_(false), 961 was_swept_conservatively_(false),
949 first_unswept_page_(Page::FromAddress(NULL)), 962 first_unswept_page_(Page::FromAddress(NULL)),
950 unswept_free_bytes_(0) { 963 unswept_free_bytes_(0) {
951 if (id == CODE_SPACE) { 964 if (id == CODE_SPACE) {
952 area_size_ = heap->isolate()->memory_allocator()-> 965 area_size_ = heap->isolate()->memory_allocator()->
953 CodePageAreaSize(); 966 CodePageAreaSize();
954 } else { 967 } else {
955 area_size_ = Page::kPageSize - Page::kObjectStartOffset; 968 area_size_ = Page::kPageSize - Page::kObjectStartOffset;
956 } 969 }
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
1034 bool PagedSpace::Expand() { 1047 bool PagedSpace::Expand() {
1035 if (!CanExpand()) return false; 1048 if (!CanExpand()) return false;
1036 1049
1037 intptr_t size = AreaSize(); 1050 intptr_t size = AreaSize();
1038 1051
1039 if (anchor_.next_page() == &anchor_) { 1052 if (anchor_.next_page() == &anchor_) {
1040 size = SizeOfFirstPage(); 1053 size = SizeOfFirstPage();
1041 } 1054 }
1042 1055
1043 Page* p = heap()->isolate()->memory_allocator()->AllocatePage( 1056 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(
1044 size, this, executable()); 1057 size, this, executability());
1045 if (p == NULL) return false; 1058 if (p == NULL) return false;
1046 1059
1047 ASSERT(Capacity() <= max_capacity_); 1060 ASSERT(Capacity() <= max_capacity_);
1048 1061
1049 p->InsertAfter(anchor_.prev_page()); 1062 p->InsertAfter(anchor_.prev_page());
1050 1063
1051 return true; 1064 return true;
1052 } 1065 }
1053 1066
1054 1067
(...skipping 226 matching lines...) Expand 10 before | Expand all | Expand 10 after
1281 start_ = NULL; 1294 start_ = NULL;
1282 allocation_info_.top = NULL; 1295 allocation_info_.top = NULL;
1283 allocation_info_.limit = NULL; 1296 allocation_info_.limit = NULL;
1284 1297
1285 to_space_.TearDown(); 1298 to_space_.TearDown();
1286 from_space_.TearDown(); 1299 from_space_.TearDown();
1287 1300
1288 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_)); 1301 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1289 1302
1290 ASSERT(reservation_.IsReserved()); 1303 ASSERT(reservation_.IsReserved());
1291 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_, 1304 heap()->isolate()->memory_allocator()->FreeMemory(
1292 NOT_EXECUTABLE); 1305 &reservation_, VirtualMemory::NOT_EXECUTABLE);
1293 chunk_base_ = NULL; 1306 chunk_base_ = NULL;
1294 chunk_size_ = 0; 1307 chunk_size_ = 0;
1295 } 1308 }
1296 1309
1297 1310
1298 void NewSpace::Flip() { 1311 void NewSpace::Flip() {
1299 SemiSpace::Swap(&from_space_, &to_space_); 1312 SemiSpace::Swap(&from_space_, &to_space_);
1300 } 1313 }
1301 1314
1302 1315
(...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after
1517 } 1530 }
1518 1531
1519 1532
1520 bool SemiSpace::Commit() { 1533 bool SemiSpace::Commit() {
1521 ASSERT(!is_committed()); 1534 ASSERT(!is_committed());
1522 int pages = capacity_ / Page::kPageSize; 1535 int pages = capacity_ / Page::kPageSize;
1523 Address end = start_ + maximum_capacity_; 1536 Address end = start_ + maximum_capacity_;
1524 Address start = end - pages * Page::kPageSize; 1537 Address start = end - pages * Page::kPageSize;
1525 if (!heap()->isolate()->memory_allocator()->CommitBlock(start, 1538 if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
1526 capacity_, 1539 capacity_,
1527 executable())) { 1540 executability())) {
1528 return false; 1541 return false;
1529 } 1542 }
1530 1543
1531 NewSpacePage* page = anchor(); 1544 NewSpacePage* page = anchor();
1532 for (int i = 1; i <= pages; i++) { 1545 for (int i = 1; i <= pages; i++) {
1533 NewSpacePage* new_page = 1546 NewSpacePage* new_page =
1534 NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this); 1547 NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
1535 new_page->InsertAfter(page); 1548 new_page->InsertAfter(page);
1536 page = new_page; 1549 page = new_page;
1537 } 1550 }
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
1574 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0); 1587 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1575 ASSERT(new_capacity <= maximum_capacity_); 1588 ASSERT(new_capacity <= maximum_capacity_);
1576 ASSERT(new_capacity > capacity_); 1589 ASSERT(new_capacity > capacity_);
1577 int pages_before = capacity_ / Page::kPageSize; 1590 int pages_before = capacity_ / Page::kPageSize;
1578 int pages_after = new_capacity / Page::kPageSize; 1591 int pages_after = new_capacity / Page::kPageSize;
1579 1592
1580 Address end = start_ + maximum_capacity_; 1593 Address end = start_ + maximum_capacity_;
1581 Address start = end - new_capacity; 1594 Address start = end - new_capacity;
1582 size_t delta = new_capacity - capacity_; 1595 size_t delta = new_capacity - capacity_;
1583 1596
1584 ASSERT(IsAligned(delta, OS::AllocateAlignment())); 1597 ASSERT(IsAligned(delta, VirtualMemory::GetAllocationGranularity()));
1585 if (!heap()->isolate()->memory_allocator()->CommitBlock( 1598 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1586 start, delta, executable())) { 1599 start, delta, executability())) {
1587 return false; 1600 return false;
1588 } 1601 }
1589 capacity_ = new_capacity; 1602 capacity_ = new_capacity;
1590 NewSpacePage* last_page = anchor()->prev_page(); 1603 NewSpacePage* last_page = anchor()->prev_page();
1591 ASSERT(last_page != anchor()); 1604 ASSERT(last_page != anchor());
1592 for (int i = pages_before + 1; i <= pages_after; i++) { 1605 for (int i = pages_before + 1; i <= pages_after; i++) {
1593 Address page_address = end - i * Page::kPageSize; 1606 Address page_address = end - i * Page::kPageSize;
1594 NewSpacePage* new_page = NewSpacePage::Initialize(heap(), 1607 NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
1595 page_address, 1608 page_address,
1596 this); 1609 this);
(...skipping 12 matching lines...) Expand all
1609 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0); 1622 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1610 ASSERT(new_capacity >= initial_capacity_); 1623 ASSERT(new_capacity >= initial_capacity_);
1611 ASSERT(new_capacity < capacity_); 1624 ASSERT(new_capacity < capacity_);
1612 if (is_committed()) { 1625 if (is_committed()) {
1613 // Semispaces grow backwards from the end of their allocated capacity, 1626 // Semispaces grow backwards from the end of their allocated capacity,
1614 // so we find the before and after start addresses relative to the 1627 // so we find the before and after start addresses relative to the
1615 // end of the space. 1628 // end of the space.
1616 Address space_end = start_ + maximum_capacity_; 1629 Address space_end = start_ + maximum_capacity_;
1617 Address old_start = space_end - capacity_; 1630 Address old_start = space_end - capacity_;
1618 size_t delta = capacity_ - new_capacity; 1631 size_t delta = capacity_ - new_capacity;
1619 ASSERT(IsAligned(delta, OS::AllocateAlignment())); 1632 ASSERT(IsAligned(delta, VirtualMemory::GetAllocationGranularity()));
1620 1633
1621 MemoryAllocator* allocator = heap()->isolate()->memory_allocator(); 1634 MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1622 if (!allocator->UncommitBlock(old_start, delta)) { 1635 if (!allocator->UncommitBlock(old_start, delta)) {
1623 return false; 1636 return false;
1624 } 1637 }
1625 1638
1626 int pages_after = new_capacity / Page::kPageSize; 1639 int pages_after = new_capacity / Page::kPageSize;
1627 NewSpacePage* new_last_page = 1640 NewSpacePage* new_last_page =
1628 NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize); 1641 NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
1629 new_last_page->set_next_page(anchor()); 1642 new_last_page->set_next_page(anchor());
(...skipping 1287 matching lines...) Expand 10 before | Expand all | Expand 10 after
2917 // ----------------------------------------------------------------------------- 2930 // -----------------------------------------------------------------------------
2918 // LargeObjectSpace 2931 // LargeObjectSpace
2919 static bool ComparePointers(void* key1, void* key2) { 2932 static bool ComparePointers(void* key1, void* key2) {
2920 return key1 == key2; 2933 return key1 == key2;
2921 } 2934 }
2922 2935
2923 2936
2924 LargeObjectSpace::LargeObjectSpace(Heap* heap, 2937 LargeObjectSpace::LargeObjectSpace(Heap* heap,
2925 intptr_t max_capacity, 2938 intptr_t max_capacity,
2926 AllocationSpace id) 2939 AllocationSpace id)
2927 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis 2940 // Managed on a per-allocation basis
2941 : Space(heap, id, VirtualMemory::NOT_EXECUTABLE),
2928 max_capacity_(max_capacity), 2942 max_capacity_(max_capacity),
2929 first_page_(NULL), 2943 first_page_(NULL),
2930 size_(0), 2944 size_(0),
2931 page_count_(0), 2945 page_count_(0),
2932 objects_size_(0), 2946 objects_size_(0),
2933 chunk_map_(ComparePointers, 1024) {} 2947 chunk_map_(ComparePointers, 1024) {}
2934 2948
2935 2949
2936 bool LargeObjectSpace::SetUp() { 2950 bool LargeObjectSpace::SetUp() {
2937 first_page_ = NULL; 2951 first_page_ = NULL;
(...skipping 13 matching lines...) Expand all
2951 2965
2952 ObjectSpace space = static_cast<ObjectSpace>(1 << identity()); 2966 ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2953 heap()->isolate()->memory_allocator()->PerformAllocationCallback( 2967 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2954 space, kAllocationActionFree, page->size()); 2968 space, kAllocationActionFree, page->size());
2955 heap()->isolate()->memory_allocator()->Free(page); 2969 heap()->isolate()->memory_allocator()->Free(page);
2956 } 2970 }
2957 SetUp(); 2971 SetUp();
2958 } 2972 }
2959 2973
2960 2974
2961 MaybeObject* LargeObjectSpace::AllocateRaw(int object_size, 2975 MaybeObject* LargeObjectSpace::AllocateRaw(
2962 Executability executable) { 2976 int object_size, VirtualMemory::Executability executability) {
2963 // Check if we want to force a GC before growing the old space further. 2977 // Check if we want to force a GC before growing the old space further.
2964 // If so, fail the allocation. 2978 // If so, fail the allocation.
2965 if (!heap()->always_allocate() && 2979 if (!heap()->always_allocate() &&
2966 heap()->OldGenerationAllocationLimitReached()) { 2980 heap()->OldGenerationAllocationLimitReached()) {
2967 return Failure::RetryAfterGC(identity()); 2981 return Failure::RetryAfterGC(identity());
2968 } 2982 }
2969 2983
2970 if (Size() + object_size > max_capacity_) { 2984 if (Size() + object_size > max_capacity_) {
2971 return Failure::RetryAfterGC(identity()); 2985 return Failure::RetryAfterGC(identity());
2972 } 2986 }
2973 2987
2974 LargePage* page = heap()->isolate()->memory_allocator()-> 2988 LargePage* page = heap()->isolate()->memory_allocator()->
2975 AllocateLargePage(object_size, this, executable); 2989 AllocateLargePage(object_size, this, executability);
2976 if (page == NULL) return Failure::RetryAfterGC(identity()); 2990 if (page == NULL) return Failure::RetryAfterGC(identity());
2977 ASSERT(page->area_size() >= object_size); 2991 ASSERT(page->area_size() >= object_size);
2978 2992
2979 size_ += static_cast<int>(page->size()); 2993 size_ += static_cast<int>(page->size());
2980 objects_size_ += object_size; 2994 objects_size_ += object_size;
2981 page_count_++; 2995 page_count_++;
2982 page->set_next_page(first_page_); 2996 page->set_next_page(first_page_);
2983 first_page_ = page; 2997 first_page_ = page;
2984 2998
2985 // Register all MemoryChunk::kAlignment-aligned chunks covered by 2999 // Register all MemoryChunk::kAlignment-aligned chunks covered by
(...skipping 235 matching lines...) Expand 10 before | Expand all | Expand 10 after
3221 object->ShortPrint(); 3235 object->ShortPrint();
3222 PrintF("\n"); 3236 PrintF("\n");
3223 } 3237 }
3224 printf(" --------------------------------------\n"); 3238 printf(" --------------------------------------\n");
3225 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3239 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3226 } 3240 }
3227 3241
3228 #endif // DEBUG 3242 #endif // DEBUG
3229 3243
3230 } } // namespace v8::internal 3244 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698