Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(84)

Side by Side Diff: src/spaces.cc

Issue 9017009: Reduce signal sender thread stack size to 32k. Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 9 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after
256 256
257 257
258 // ----------------------------------------------------------------------------- 258 // -----------------------------------------------------------------------------
259 // MemoryAllocator 259 // MemoryAllocator
260 // 260 //
261 261
262 MemoryAllocator::MemoryAllocator(Isolate* isolate) 262 MemoryAllocator::MemoryAllocator(Isolate* isolate)
263 : isolate_(isolate), 263 : isolate_(isolate),
264 capacity_(0), 264 capacity_(0),
265 capacity_executable_(0), 265 capacity_executable_(0),
266 size_(0), 266 memory_allocator_reserved_(0),
267 size_executable_(0) { 267 size_executable_(0) {
268 } 268 }
269 269
270 270
271 bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) { 271 bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
272 capacity_ = RoundUp(capacity, Page::kPageSize); 272 capacity_ = RoundUp(capacity, Page::kPageSize);
273 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); 273 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
274 ASSERT_GE(capacity_, capacity_executable_); 274 ASSERT_GE(capacity_, capacity_executable_);
275 275
276 size_ = 0; 276 memory_allocator_reserved_ = 0;
277 size_executable_ = 0; 277 size_executable_ = 0;
278 278
279 return true; 279 return true;
280 } 280 }
281 281
282 282
283 void MemoryAllocator::TearDown() { 283 void MemoryAllocator::TearDown() {
284 // Check that spaces were torn down before MemoryAllocator. 284 // Check that spaces were torn down before MemoryAllocator.
285 ASSERT(size_ == 0); 285 CHECK(memory_allocator_reserved_ == 0);
286 // TODO(gc) this will be true again when we fix FreeMemory. 286 // TODO(gc) this will be true again when we fix FreeMemory.
287 // ASSERT(size_executable_ == 0); 287 // ASSERT(size_executable_ == 0);
288 capacity_ = 0; 288 capacity_ = 0;
289 capacity_executable_ = 0; 289 capacity_executable_ = 0;
290 } 290 }
291 291
292 292
293 void MemoryAllocator::FreeMemory(VirtualMemory* reservation, 293 void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
294 Executability executable) { 294 Executability executable) {
295 // TODO(gc) make code_range part of memory allocator? 295 // TODO(gc) make code_range part of memory allocator?
296 ASSERT(reservation->IsReserved()); 296 ASSERT(reservation->IsReserved());
297 size_t size = reservation->size(); 297 size_t size = reservation->size();
298 ASSERT(size_ >= size); 298 ASSERT(memory_allocator_reserved_ >= size);
299 size_ -= size; 299 memory_allocator_reserved_ -= size;
300 300
301 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); 301 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
302 302
303 if (executable == EXECUTABLE) { 303 if (executable == EXECUTABLE) {
304 ASSERT(size_executable_ >= size); 304 ASSERT(size_executable_ >= size);
305 size_executable_ -= size; 305 size_executable_ -= size;
306 } 306 }
307 // Code which is part of the code-range does not have its own VirtualMemory. 307 // Code which is part of the code-range does not have its own VirtualMemory.
308 ASSERT(!isolate_->code_range()->contains( 308 ASSERT(!isolate_->code_range()->contains(
309 static_cast<Address>(reservation->address()))); 309 static_cast<Address>(reservation->address())));
310 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); 310 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
311 reservation->Release(); 311 reservation->Release();
312 } 312 }
313 313
314 314
315 void MemoryAllocator::FreeMemory(Address base, 315 void MemoryAllocator::FreeMemory(Address base,
316 size_t size, 316 size_t size,
317 Executability executable) { 317 Executability executable) {
318 // TODO(gc) make code_range part of memory allocator? 318 // TODO(gc) make code_range part of memory allocator?
319 ASSERT(size_ >= size); 319 ASSERT(memory_allocator_reserved_ >= size);
320 size_ -= size; 320 memory_allocator_reserved_ -= size;
321 321
322 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); 322 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
323 323
324 if (executable == EXECUTABLE) { 324 if (executable == EXECUTABLE) {
325 ASSERT(size_executable_ >= size); 325 ASSERT(size_executable_ >= size);
326 size_executable_ -= size; 326 size_executable_ -= size;
327 } 327 }
328 if (isolate_->code_range()->contains(static_cast<Address>(base))) { 328 if (isolate_->code_range()->contains(static_cast<Address>(base))) {
329 ASSERT(executable == EXECUTABLE); 329 ASSERT(executable == EXECUTABLE);
330 isolate_->code_range()->FreeRawMemory(base, size); 330 isolate_->code_range()->FreeRawMemory(base, size);
331 } else { 331 } else {
332 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); 332 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
333 bool result = VirtualMemory::ReleaseRegion(base, size); 333 bool result = VirtualMemory::ReleaseRegion(base, size);
334 USE(result); 334 USE(result);
335 ASSERT(result); 335 ASSERT(result);
336 } 336 }
337 } 337 }
338 338
339 339
340 Address MemoryAllocator::ReserveAlignedMemory(size_t size, 340 Address MemoryAllocator::ReserveAlignedMemory(size_t size,
341 size_t alignment, 341 size_t alignment,
342 VirtualMemory* controller) { 342 VirtualMemory* controller) {
343 VirtualMemory reservation(size, alignment); 343 VirtualMemory reservation(size, alignment);
344 344
345 if (!reservation.IsReserved()) return NULL; 345 if (!reservation.IsReserved()) return NULL;
346 size_ += reservation.size(); 346 memory_allocator_reserved_ += reservation.size();
347 Address base = RoundUp(static_cast<Address>(reservation.address()), 347 Address base = RoundUp(static_cast<Address>(reservation.address()),
348 alignment); 348 alignment);
349 controller->TakeControl(&reservation); 349 controller->TakeControl(&reservation);
350 return base; 350 return base;
351 } 351 }
352 352
353 353
354 Address MemoryAllocator::AllocateAlignedMemory(size_t size, 354 Address MemoryAllocator::AllocateAlignedMemory(size_t size,
355 size_t reserved_size,
355 size_t alignment, 356 size_t alignment,
356 Executability executable, 357 Executability executable,
357 VirtualMemory* controller) { 358 VirtualMemory* controller) {
359 ASSERT(RoundUp(reserved_size, OS::CommitPageSize()) >=
360 RoundUp(size, OS::CommitPageSize()));
358 VirtualMemory reservation; 361 VirtualMemory reservation;
359 Address base = ReserveAlignedMemory(size, alignment, &reservation); 362 Address base = ReserveAlignedMemory(reserved_size, alignment, &reservation);
360 if (base == NULL) return NULL; 363 if (base == NULL) return NULL;
361 if (!reservation.Commit(base, 364 if (!reservation.Commit(base,
362 size, 365 size,
363 executable == EXECUTABLE)) { 366 executable == EXECUTABLE)) {
364 return NULL; 367 return NULL;
365 } 368 }
366 controller->TakeControl(&reservation); 369 controller->TakeControl(&reservation);
367 return base; 370 return base;
368 } 371 }
369 372
370 373
371 void Page::InitializeAsAnchor(PagedSpace* owner) { 374 void Page::InitializeAsAnchor(PagedSpace* owner) {
372 set_owner(owner); 375 set_owner(owner);
373 set_prev_page(this); 376 set_prev_page(this);
374 set_next_page(this); 377 set_next_page(this);
375 } 378 }
376 379
377 380
381 void Page::CommitMore(intptr_t space_needed) {
382 ASSERT(size() < kPageSize);
383 intptr_t expand = Min(Max(size(), space_needed), kPageSize - size());
Vyacheslav Egorov (Chromium) 2011/12/21 13:22:12 add a comment that you are trying to a least doubl
384 expand = SignedRoundUpToPowerOf2(size() + expand) - size();
385 ASSERT(expand <= kPageSize - size());
386 Executability executable =
387 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
388 Address old_end = ObjectAreaEnd();
389 if (!reservation_.Commit(old_end, expand, executable)) return;
390
391 set_size(size() + expand);
392
393 PagedSpace* paged_space = reinterpret_cast<PagedSpace*>(owner());
394 paged_space->heap()->isolate()->memory_allocator()->AllocationBookkeeping(
395 paged_space,
396 old_end,
397 0, // No new memory was reserved.
398 expand, // New memory committed.
399 executable);
400 paged_space->IncreaseCapacity(expand);
401
402 // In map space we have to align the expanded area with the correct map
403 // alignment.
404 uintptr_t end_int = old_end - ObjectAreaStart();
405 uintptr_t aligned_end_int =
406 end_int - end_int % paged_space->ObjectAlignment();
407 if (aligned_end_int < end_int) {
408 aligned_end_int += paged_space->ObjectAlignment();
409 }
410 Address new_area =
411 reinterpret_cast<Address>(ObjectAreaStart() + aligned_end_int);
412 paged_space->AddToFreeLists(old_end, new_area - old_end);
Vyacheslav Egorov (Chromium) 2011/12/21 13:22:12 it looks a bit spooky it seems we will be wasting
413
414 expand -= (new_area - old_end);
415
416 paged_space->AddToFreeLists(new_area, expand);
417 }
418
419
378 NewSpacePage* NewSpacePage::Initialize(Heap* heap, 420 NewSpacePage* NewSpacePage::Initialize(Heap* heap,
379 Address start, 421 Address start,
380 SemiSpace* semi_space) { 422 SemiSpace* semi_space) {
381 MemoryChunk* chunk = MemoryChunk::Initialize(heap, 423 MemoryChunk* chunk = MemoryChunk::Initialize(heap,
382 start, 424 start,
383 Page::kPageSize, 425 Page::kPageSize,
384 NOT_EXECUTABLE, 426 NOT_EXECUTABLE,
385 semi_space); 427 semi_space);
386 chunk->set_next_chunk(NULL); 428 chunk->set_next_chunk(NULL);
387 chunk->set_prev_chunk(NULL); 429 chunk->set_prev_chunk(NULL);
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
453 ClearFlag(SCAN_ON_SCAVENGE); 495 ClearFlag(SCAN_ON_SCAVENGE);
454 } 496 }
455 next_chunk_->prev_chunk_ = prev_chunk_; 497 next_chunk_->prev_chunk_ = prev_chunk_;
456 prev_chunk_->next_chunk_ = next_chunk_; 498 prev_chunk_->next_chunk_ = next_chunk_;
457 prev_chunk_ = NULL; 499 prev_chunk_ = NULL;
458 next_chunk_ = NULL; 500 next_chunk_ = NULL;
459 } 501 }
460 502
461 503
462 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, 504 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
505 intptr_t committed_body_size,
463 Executability executable, 506 Executability executable,
464 Space* owner) { 507 Space* owner) {
465 size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size; 508 ASSERT(body_size >= committed_body_size);
509 size_t chunk_size = Max(static_cast<intptr_t>(Page::kPageSize),
Vyacheslav Egorov (Chromium) 2011/12/21 13:22:12 why are we wasting address space? if body size is
510 MemoryChunk::kObjectStartOffset + body_size);
511 intptr_t committed_chunk_size =
512 committed_body_size + MemoryChunk::kObjectStartOffset;
513 committed_chunk_size = RoundUp(committed_chunk_size, OS::CommitPageSize());
466 Heap* heap = isolate_->heap(); 514 Heap* heap = isolate_->heap();
467 Address base = NULL; 515 Address base = NULL;
468 VirtualMemory reservation; 516 VirtualMemory reservation;
469 if (executable == EXECUTABLE) { 517 if (executable == EXECUTABLE) {
470 // Check executable memory limit. 518 // Check executable memory limit.
471 if (size_executable_ + chunk_size > capacity_executable_) { 519 if (size_executable_ + chunk_size > capacity_executable_) {
472 LOG(isolate_, 520 LOG(isolate_,
473 StringEvent("MemoryAllocator::AllocateRawMemory", 521 StringEvent("MemoryAllocator::AllocateRawMemory",
474 "V8 Executable Allocation capacity exceeded")); 522 "V8 Executable Allocation capacity exceeded"));
475 return NULL; 523 return NULL;
476 } 524 }
477 525
478 // Allocate executable memory either from code range or from the 526 // Allocate executable memory either from code range or from the
479 // OS. 527 // OS.
480 if (isolate_->code_range()->exists()) { 528 if (isolate_->code_range()->exists()) {
481 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); 529 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
482 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), 530 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
483 MemoryChunk::kAlignment)); 531 MemoryChunk::kAlignment));
484 if (base == NULL) return NULL; 532 if (base == NULL) return NULL;
485 size_ += chunk_size; 533 // The AllocateAlignedMemory method will update the memory allocator
486 // Update executable memory size. 534 // memory used, but we are not using that if we have a code range, so
487 size_executable_ += chunk_size; 535 // we update it here.
536 memory_allocator_reserved_ += chunk_size;
488 } else { 537 } else {
489 base = AllocateAlignedMemory(chunk_size, 538 base = AllocateAlignedMemory(committed_chunk_size,
539 chunk_size,
490 MemoryChunk::kAlignment, 540 MemoryChunk::kAlignment,
491 executable, 541 executable,
492 &reservation); 542 &reservation);
493 if (base == NULL) return NULL; 543 if (base == NULL) return NULL;
494 // Update executable memory size.
495 size_executable_ += reservation.size();
496 } 544 }
497 } else { 545 } else {
498 base = AllocateAlignedMemory(chunk_size, 546 base = AllocateAlignedMemory(committed_chunk_size,
547 chunk_size,
499 MemoryChunk::kAlignment, 548 MemoryChunk::kAlignment,
500 executable, 549 executable,
501 &reservation); 550 &reservation);
502 551
503 if (base == NULL) return NULL; 552 if (base == NULL) return NULL;
504 } 553 }
505 554
506 #ifdef DEBUG 555 AllocationBookkeeping(
507 ZapBlock(base, chunk_size); 556 owner, base, chunk_size, committed_chunk_size, executable);
508 #endif
509 isolate_->counters()->memory_allocated()->
510 Increment(static_cast<int>(chunk_size));
511
512 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
513 if (owner != NULL) {
514 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
515 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
516 }
517 557
518 MemoryChunk* result = MemoryChunk::Initialize(heap, 558 MemoryChunk* result = MemoryChunk::Initialize(heap,
519 base, 559 base,
520 chunk_size, 560 committed_chunk_size,
521 executable, 561 executable,
522 owner); 562 owner);
523 result->set_reserved_memory(&reservation); 563 result->set_reserved_memory(&reservation);
524 return result; 564 return result;
525 } 565 }
526 566
527 567
528 Page* MemoryAllocator::AllocatePage(PagedSpace* owner, 568 void MemoryAllocator::AllocationBookkeeping(Space* owner,
569 Address base,
570 intptr_t reserved_chunk_size,
571 intptr_t committed_chunk_size,
572 Executability executable) {
573 if (executable == EXECUTABLE) {
574 // Update executable memory size.
575 size_executable_ += reserved_chunk_size;
576 }
577
578 #ifdef DEBUG
579 ZapBlock(base, committed_chunk_size);
580 #endif
581 isolate_->counters()->memory_allocated()->
582 Increment(static_cast<int>(committed_chunk_size));
583
584 LOG(isolate_, NewEvent("MemoryChunk", base, committed_chunk_size));
585 if (owner != NULL) {
586 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
587 PerformAllocationCallback(
588 space, kAllocationActionAllocate, committed_chunk_size);
589 }
590 }
591
592
593 Page* MemoryAllocator::AllocatePage(intptr_t object_area_size,
594 PagedSpace* owner,
529 Executability executable) { 595 Executability executable) {
530 MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner); 596 ASSERT(object_area_size <= Page::kObjectAreaSize);
597
598 MemoryChunk* chunk =
599 AllocateChunk(Page::kObjectAreaSize, object_area_size, executable, owner);
531 600
532 if (chunk == NULL) return NULL; 601 if (chunk == NULL) return NULL;
533 602
534 return Page::Initialize(isolate_->heap(), chunk, executable, owner); 603 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
535 } 604 }
536 605
537 606
538 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, 607 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
539 Executability executable, 608 Executability executable,
540 Space* owner) { 609 Space* owner) {
541 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner); 610 MemoryChunk* chunk =
611 AllocateChunk(object_size, object_size, executable, owner);
542 if (chunk == NULL) return NULL; 612 if (chunk == NULL) return NULL;
543 return LargePage::Initialize(isolate_->heap(), chunk); 613 return LargePage::Initialize(isolate_->heap(), chunk);
544 } 614 }
545 615
546 616
547 void MemoryAllocator::Free(MemoryChunk* chunk) { 617 void MemoryAllocator::Free(MemoryChunk* chunk) {
548 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); 618 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
549 if (chunk->owner() != NULL) { 619 if (chunk->owner() != NULL) {
550 ObjectSpace space = 620 ObjectSpace space =
551 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); 621 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
552 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); 622 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
553 } 623 }
554 624
555 delete chunk->slots_buffer(); 625 delete chunk->slots_buffer();
556 delete chunk->skip_list(); 626 delete chunk->skip_list();
557 627
558 VirtualMemory* reservation = chunk->reserved_memory(); 628 VirtualMemory* reservation = chunk->reserved_memory();
559 if (reservation->IsReserved()) { 629 if (reservation->IsReserved()) {
560 FreeMemory(reservation, chunk->executable()); 630 FreeMemory(reservation, chunk->executable());
561 } else { 631 } else {
632 // When we do not have a reservation that is because this allocation
633 // is part of the huge reserved chunk of memory reserved for code on
634 // x64. In that case the size was rounded up to the page size on
635 // allocation so we do the same now when freeing.
562 FreeMemory(chunk->address(), 636 FreeMemory(chunk->address(),
563 chunk->size(), 637 RoundUp(chunk->size(), Page::kPageSize),
Vyacheslav Egorov (Chromium) 2011/12/21 13:22:12 I don't like disagreement between chunk size and a
564 chunk->executable()); 638 chunk->executable());
565 } 639 }
566 } 640 }
567 641
568 642
569 bool MemoryAllocator::CommitBlock(Address start, 643 bool MemoryAllocator::CommitBlock(Address start,
570 size_t size, 644 size_t size,
571 Executability executable) { 645 Executability executable) {
572 if (!VirtualMemory::CommitRegion(start, size, executable)) return false; 646 if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
573 #ifdef DEBUG 647 #ifdef DEBUG
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
633 memory_allocation_callbacks_.Remove(i); 707 memory_allocation_callbacks_.Remove(i);
634 return; 708 return;
635 } 709 }
636 } 710 }
637 UNREACHABLE(); 711 UNREACHABLE();
638 } 712 }
639 713
640 714
641 #ifdef DEBUG 715 #ifdef DEBUG
642 void MemoryAllocator::ReportStatistics() { 716 void MemoryAllocator::ReportStatistics() {
643 float pct = static_cast<float>(capacity_ - size_) / capacity_; 717 float pct =
718 static_cast<float>(capacity_ - memory_allocator_reserved_) / capacity_;
644 PrintF(" capacity: %" V8_PTR_PREFIX "d" 719 PrintF(" capacity: %" V8_PTR_PREFIX "d"
645 ", used: %" V8_PTR_PREFIX "d" 720 ", used: %" V8_PTR_PREFIX "d"
646 ", available: %%%d\n\n", 721 ", available: %%%d\n\n",
647 capacity_, size_, static_cast<int>(pct*100)); 722 capacity_, memory_allocator_reserved_, static_cast<int>(pct*100));
648 } 723 }
649 #endif 724 #endif
650 725
651 // ----------------------------------------------------------------------------- 726 // -----------------------------------------------------------------------------
652 // PagedSpace implementation 727 // PagedSpace implementation
653 728
654 PagedSpace::PagedSpace(Heap* heap, 729 PagedSpace::PagedSpace(Heap* heap,
655 intptr_t max_capacity, 730 intptr_t max_capacity,
656 AllocationSpace id, 731 AllocationSpace id,
657 Executability executable) 732 Executability executable)
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
704 Address next = cur + obj->Size(); 779 Address next = cur + obj->Size();
705 if ((cur <= addr) && (addr < next)) return obj; 780 if ((cur <= addr) && (addr < next)) return obj;
706 } 781 }
707 782
708 UNREACHABLE(); 783 UNREACHABLE();
709 return Failure::Exception(); 784 return Failure::Exception();
710 } 785 }
711 786
712 bool PagedSpace::CanExpand() { 787 bool PagedSpace::CanExpand() {
713 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); 788 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
714 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
715 789
716 if (Capacity() == max_capacity_) return false; 790 if (Capacity() == max_capacity_) return false;
717 791
718 ASSERT(Capacity() < max_capacity_); 792 ASSERT(Capacity() < max_capacity_);
719 793
720 // Are we going to exceed capacity for this space? 794 // Are we going to exceed capacity for this space?
721 if ((Capacity() + Page::kPageSize) > max_capacity_) return false; 795 if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
722 796
723 return true; 797 return true;
724 } 798 }
725 799
726 bool PagedSpace::Expand() { 800 bool PagedSpace::Expand(intptr_t size_in_bytes) {
727 if (!CanExpand()) return false; 801 if (!CanExpand()) return false;
728 802
803 Page* last_page = anchor_.prev_page();
804 if (last_page != &anchor_) {
805 // We have have run out of linear allocation space because the current page
806 // is a small one, that starts on a page aligned boundary, but has not a
807 // full kPageSize of committed memory. In that case, let's commit more
808 // memory for the page.
809 if (last_page->size() < Page::kPageSize &&
Vyacheslav Egorov (Chromium) 2011/12/21 13:22:12 comment is talking about "current page" but the co
810 Page::kPageSize - last_page->size() >= size_in_bytes &&
811 !last_page->IsEvacuationCandidate() &&
812 last_page->WasSwept()) {
813 last_page->CommitMore(size_in_bytes);
814 return true;
815 }
816 }
817
818 // We initially only commit a part of the page, but the deserialization
819 // of the initial snapshot makes the assumption that it can deserialize
820 // into linear memory of a certain size per space, so some of the spaces
821 // need to have a little more committed memory.
822 int initial = Page::kInitiallyCommittedPartOfPage;
823 if (identity() == OLD_POINTER_SPACE) initial = initial * 8;
Vyacheslav Egorov (Chromium) 2011/12/21 13:22:12 This looks very magical. I highly recommend to pro
824 if (identity() == OLD_DATA_SPACE) initial *= 2;
825 if (identity() == CODE_SPACE) initial *= 4;
826
827 ASSERT(initial <= Page::kPageSize);
828 ASSERT(Page::kPageSize - Page::kInitiallyCommittedPartOfPage <
829 Page::kObjectAreaSize);
830
831 intptr_t expansion_size =
832 Max(initial,
833 SignedRoundUpToPowerOf2(
Vyacheslav Egorov (Chromium) 2011/12/21 13:22:12 SignedRoundUpToPowerOf2 does not look right. Shoul
834 MemoryChunk::kObjectStartOffset + size_in_bytes)) -
835 MemoryChunk::kObjectStartOffset;
836
729 Page* p = heap()->isolate()->memory_allocator()-> 837 Page* p = heap()->isolate()->memory_allocator()->
730 AllocatePage(this, executable()); 838 AllocatePage(expansion_size, this, executable());
731 if (p == NULL) return false; 839 if (p == NULL) return false;
732 840
733 ASSERT(Capacity() <= max_capacity_); 841 ASSERT(Capacity() <= max_capacity_);
734 842
735 p->InsertAfter(anchor_.prev_page()); 843 p->InsertAfter(anchor_.prev_page());
736 844
737 return true; 845 return true;
738 } 846 }
739 847
740 848
(...skipping 22 matching lines...) Expand all
763 if (page->WasSwept()) { 871 if (page->WasSwept()) {
764 intptr_t size = free_list_.EvictFreeListItems(page); 872 intptr_t size = free_list_.EvictFreeListItems(page);
765 accounting_stats_.AllocateBytes(size); 873 accounting_stats_.AllocateBytes(size);
766 ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size)); 874 ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size));
767 } 875 }
768 876
769 if (Page::FromAllocationTop(allocation_info_.top) == page) { 877 if (Page::FromAllocationTop(allocation_info_.top) == page) {
770 allocation_info_.top = allocation_info_.limit = NULL; 878 allocation_info_.top = allocation_info_.limit = NULL;
771 } 879 }
772 880
881 intptr_t size = page->ObjectAreaEnd() - page->ObjectAreaStart();
882
773 page->Unlink(); 883 page->Unlink();
774 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { 884 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
775 heap()->isolate()->memory_allocator()->Free(page); 885 heap()->isolate()->memory_allocator()->Free(page);
776 } else { 886 } else {
777 heap()->QueueMemoryChunkForFree(page); 887 heap()->QueueMemoryChunkForFree(page);
778 } 888 }
779 889
780 ASSERT(Capacity() > 0); 890 ASSERT(Capacity() > 0);
781 ASSERT(Capacity() % Page::kObjectAreaSize == 0); 891 accounting_stats_.ShrinkSpace(size);
782 accounting_stats_.ShrinkSpace(Page::kObjectAreaSize);
783 } 892 }
784 893
785 894
786 void PagedSpace::ReleaseAllUnusedPages() { 895 void PagedSpace::ReleaseAllUnusedPages() {
787 PageIterator it(this); 896 PageIterator it(this);
788 while (it.has_next()) { 897 while (it.has_next()) {
789 Page* page = it.next(); 898 Page* page = it.next();
790 if (!page->WasSwept()) { 899 if (!page->WasSwept()) {
791 if (page->LiveBytes() == 0) ReleasePage(page); 900 if (page->LiveBytes() == 0) ReleasePage(page);
792 } else { 901 } else {
(...skipping 956 matching lines...) Expand 10 before | Expand all | Expand 10 after
1749 } else { 1858 } else {
1750 node->set_next(huge_list_); 1859 node->set_next(huge_list_);
1751 huge_list_ = node; 1860 huge_list_ = node;
1752 } 1861 }
1753 available_ += size_in_bytes; 1862 available_ += size_in_bytes;
1754 ASSERT(IsVeryLong() || available_ == SumFreeLists()); 1863 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1755 return 0; 1864 return 0;
1756 } 1865 }
1757 1866
1758 1867
1759 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) { 1868 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list,
1869 int* node_size,
1870 int minimum_size) {
1760 FreeListNode* node = *list; 1871 FreeListNode* node = *list;
1761 1872
1762 if (node == NULL) return NULL; 1873 if (node == NULL) return NULL;
1763 1874
1875 ASSERT(node->map() == node->GetHeap()->raw_unchecked_free_space_map());
1876
1764 while (node != NULL && 1877 while (node != NULL &&
1765 Page::FromAddress(node->address())->IsEvacuationCandidate()) { 1878 Page::FromAddress(node->address())->IsEvacuationCandidate()) {
1766 available_ -= node->Size(); 1879 available_ -= node->Size();
1767 node = node->next(); 1880 node = node->next();
1768 } 1881 }
1769 1882
1770 if (node != NULL) { 1883 if (node == NULL) {
1771 *node_size = node->Size();
1772 *list = node->next();
1773 } else {
1774 *list = NULL; 1884 *list = NULL;
1885 return NULL;
1775 } 1886 }
1776 1887
1888 // Gets the size without checking the map. When we are booting we have
1889 // a FreeListNode before we have created its map.
1890 intptr_t size = reinterpret_cast<FreeSpace*>(node)->Size();
1891
1892 // We don't search the list for one that fits, preferring to look in the
1893 // list of larger nodes, but we do check the first in the list, because
1894 // if we had to expand the space or page we may have placed an entry that
1895 // was just long enough at the head of one of the lists.
1896 if (size < minimum_size) return NULL;
1897
1898 *node_size = size;
1899 available_ -= size;
1900 *list = node->next();
1901
1777 return node; 1902 return node;
1778 } 1903 }
1779 1904
1780 1905
1781 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) { 1906 FreeListNode* FreeList::FindNodeFor(int size_in_bytes,
1907 int* node_size,
1908 Address limit) {
1782 FreeListNode* node = NULL; 1909 FreeListNode* node = NULL;
1783 1910
1784 if (size_in_bytes <= kSmallAllocationMax) { 1911 if (limit != NULL) {
1785 node = PickNodeFromList(&small_list_, node_size); 1912 // We may have a memory area at the head of the free list, which abuts the
1786 if (node != NULL) return node; 1913 // old linear allocation area. This happens if the linear allocation area
1914 // has been shortened to allow an incremental marking step to be performed.
1915 // In that case we prefer to return the free memory area that is contiguous
1916 // with the old linear allocation area.
1917 if (large_list_ != NULL &&
Vyacheslav Egorov (Chromium) 2011/12/21 13:22:12 The same piece of (complicated) code is repeated t
1918 large_list_->address() == limit &&
1919 reinterpret_cast<FreeSpace*>(large_list_)->Size() >= size_in_bytes &&
1920 !Page::FromAddress(large_list_->address())->IsEvacuationCandidate()) {
1921 FreeListNode* answer = large_list_;
1922 int size = reinterpret_cast<FreeSpace*>(large_list_)->Size();
1923 available_ -= size;
1924 *node_size = size;
1925 large_list_ = large_list_->next();
1926 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1927 return answer;
1928 }
1929
1930 if (huge_list_ != NULL &&
1931 huge_list_->address() == limit &&
1932 reinterpret_cast<FreeSpace*>(huge_list_)->Size() >= size_in_bytes &&
1933 !Page::FromAddress(huge_list_->address())->IsEvacuationCandidate()) {
1934 FreeListNode* answer = huge_list_;
1935 int size = reinterpret_cast<FreeSpace*>(huge_list_)->Size();
1936 available_ -= size;
1937 *node_size = size;
1938 huge_list_ = huge_list_->next();
1939 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1940 return answer;
1941 }
1787 } 1942 }
1788 1943
1789 if (size_in_bytes <= kMediumAllocationMax) { 1944 node = PickNodeFromList(&small_list_, node_size, size_in_bytes);
1790 node = PickNodeFromList(&medium_list_, node_size); 1945 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1791 if (node != NULL) return node; 1946 if (node != NULL) return node;
1792 }
1793 1947
1794 if (size_in_bytes <= kLargeAllocationMax) { 1948 node = PickNodeFromList(&medium_list_, node_size, size_in_bytes);
1795 node = PickNodeFromList(&large_list_, node_size); 1949 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1796 if (node != NULL) return node; 1950 if (node != NULL) return node;
1797 } 1951
1952 node = PickNodeFromList(&large_list_, node_size, size_in_bytes);
1953 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1954 if (node != NULL) return node;
1798 1955
1799 for (FreeListNode** cur = &huge_list_; 1956 for (FreeListNode** cur = &huge_list_;
1800 *cur != NULL; 1957 *cur != NULL;
1801 cur = (*cur)->next_address()) { 1958 cur = (*cur)->next_address()) {
1802 FreeListNode* cur_node = *cur; 1959 FreeListNode* cur_node = *cur;
1960
1961 // Skip nodes that are on evacuation candidates. We don't want to
1962 // allocate there.
1803 while (cur_node != NULL && 1963 while (cur_node != NULL &&
1804 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) { 1964 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
1805 available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size(); 1965 available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
1806 cur_node = cur_node->next(); 1966 cur_node = cur_node->next();
1807 } 1967 }
1808 1968
1969 // Cut out the nodes from the evacuation candidates.
1809 *cur = cur_node; 1970 *cur = cur_node;
1971 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1810 if (cur_node == NULL) break; 1972 if (cur_node == NULL) break;
1811 1973
1812 ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map()); 1974 ASSERT(cur_node->map() == HEAP->raw_unchecked_free_space_map());
1813 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur); 1975 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur_node);
1814 int size = cur_as_free_space->Size(); 1976 int size = cur_as_free_space->Size();
1815 if (size >= size_in_bytes) { 1977 if (size >= size_in_bytes) {
1816 // Large enough node found. Unlink it from the list. 1978 // Large enough node found. Unlink it from the list.
1817 node = *cur; 1979 node = cur_node;
1818 *node_size = size; 1980 *node_size = size;
1819 *cur = node->next(); 1981 *cur = node->next();
1820 break; 1982 available_ -= size;
1983 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1984 return node;
1821 } 1985 }
1822 } 1986 }
1823 1987
1824 return node; 1988 return node;
1825 } 1989 }
1826 1990
1827 1991
1828 // Allocation on the old space free list. If it succeeds then a new linear 1992 // Allocation on the old space free list. If it succeeds then a new linear
1829 // allocation space has been set up with the top and limit of the space. If 1993 // allocation space has been set up with the top and limit of the space. If
1830 // the allocation fails then NULL is returned, and the caller can perform a GC 1994 // the allocation fails then NULL is returned, and the caller can perform a GC
1831 // or allocate a new page before retrying. 1995 // or allocate a new page before retrying.
1832 HeapObject* FreeList::Allocate(int size_in_bytes) { 1996 HeapObject* FreeList::Allocate(int size_in_bytes) {
1833 ASSERT(0 < size_in_bytes); 1997 ASSERT(0 < size_in_bytes);
1834 ASSERT(size_in_bytes <= kMaxBlockSize); 1998 ASSERT(size_in_bytes <= kMaxBlockSize);
1835 ASSERT(IsAligned(size_in_bytes, kPointerSize)); 1999 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1836 // Don't free list allocate if there is linear space available. 2000 // Don't free list allocate if there is linear space available.
1837 ASSERT(owner_->limit() - owner_->top() < size_in_bytes); 2001 ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
1838 2002
1839 int new_node_size = 0; 2003 int new_node_size = 0;
1840 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); 2004 FreeListNode* new_node =
2005 FindNodeFor(size_in_bytes, &new_node_size, owner_->limit());
1841 if (new_node == NULL) return NULL; 2006 if (new_node == NULL) return NULL;
1842 2007
1843 available_ -= new_node_size; 2008 if (new_node->address() == owner_->limit()) {
2009 // The new freelist node we were given is an extension of the one we had
2010 // last. This is a common thing to happen when we extend a small page by
2011 // committing more memory. In this case we just add the new node to the
2012 // linear allocation area and recurse.
2013 owner_->Allocate(new_node_size);
2014 owner_->SetTop(owner_->top(), new_node->address() + new_node_size);
2015 MaybeObject* allocation = owner_->AllocateRaw(size_in_bytes);
2016 Object* answer;
2017 if (!allocation->ToObject(&answer)) return NULL;
2018 return HeapObject::cast(answer);
2019 }
2020
1844 ASSERT(IsVeryLong() || available_ == SumFreeLists()); 2021 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1845 2022
1846 int bytes_left = new_node_size - size_in_bytes; 2023 int bytes_left = new_node_size - size_in_bytes;
1847 ASSERT(bytes_left >= 0); 2024 ASSERT(bytes_left >= 0);
1848 2025
1849 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); 2026 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
1850 // Mark the old linear allocation area with a free space map so it can be 2027 // Mark the old linear allocation area with a free space map so it can be
1851 // skipped when scanning the heap. This also puts it back in the free list 2028 // skipped when scanning the heap. This also puts it back in the free list
1852 // if it is big enough. 2029 // if it is big enough.
1853 owner_->Free(owner_->top(), old_linear_size); 2030 owner_->AddToFreeLists(owner_->top(), old_linear_size);
1854 2031
1855 #ifdef DEBUG 2032 #ifdef DEBUG
1856 for (int i = 0; i < size_in_bytes / kPointerSize; i++) { 2033 for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
1857 reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0); 2034 reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0);
1858 } 2035 }
1859 #endif 2036 #endif
1860 2037
1861 owner_->heap()->incremental_marking()->OldSpaceStep( 2038 owner_->heap()->incremental_marking()->OldSpaceStep(
1862 size_in_bytes - old_linear_size); 2039 size_in_bytes - old_linear_size);
1863 2040
1864 // The old-space-step might have finished sweeping and restarted marking. 2041 // The old-space-step might have finished sweeping and restarted marking.
1865 // Verify that it did not turn the page of the new node into an evacuation 2042 // Verify that it did not turn the page of the new node into an evacuation
1866 // candidate. 2043 // candidate.
1867 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); 2044 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
1868 2045
1869 const int kThreshold = IncrementalMarking::kAllocatedThreshold; 2046 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
1870 2047
1871 // Memory in the linear allocation area is counted as allocated. We may free 2048 // Memory in the linear allocation area is counted as allocated. We may free
1872 // a little of this again immediately - see below. 2049 // a little of this again immediately - see below.
1873 owner_->Allocate(new_node_size); 2050 owner_->Allocate(new_node_size);
1874 2051
1875 if (bytes_left > kThreshold && 2052 if (bytes_left > kThreshold &&
1876 owner_->heap()->incremental_marking()->IsMarkingIncomplete() && 2053 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
1877 FLAG_incremental_marking_steps) { 2054 FLAG_incremental_marking_steps) {
1878 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); 2055 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
1879 // We don't want to give too large linear areas to the allocator while 2056 // We don't want to give too large linear areas to the allocator while
1880 // incremental marking is going on, because we won't check again whether 2057 // incremental marking is going on, because we won't check again whether
1881 // we want to do another increment until the linear area is used up. 2058 // we want to do another increment until the linear area is used up.
1882 owner_->Free(new_node->address() + size_in_bytes + linear_size, 2059 owner_->AddToFreeLists(new_node->address() + size_in_bytes + linear_size,
1883 new_node_size - size_in_bytes - linear_size); 2060 new_node_size - size_in_bytes - linear_size);
1884 owner_->SetTop(new_node->address() + size_in_bytes, 2061 owner_->SetTop(new_node->address() + size_in_bytes,
1885 new_node->address() + size_in_bytes + linear_size); 2062 new_node->address() + size_in_bytes + linear_size);
1886 } else if (bytes_left > 0) { 2063 } else if (bytes_left > 0) {
1887 // Normally we give the rest of the node to the allocator as its new 2064 // Normally we give the rest of the node to the allocator as its new
1888 // linear allocation area. 2065 // linear allocation area.
1889 owner_->SetTop(new_node->address() + size_in_bytes, 2066 owner_->SetTop(new_node->address() + size_in_bytes,
1890 new_node->address() + new_node_size); 2067 new_node->address() + new_node_size);
1891 } else { 2068 } else {
2069 ASSERT(bytes_left == 0);
1892 // TODO(gc) Try not freeing linear allocation region when bytes_left 2070 // TODO(gc) Try not freeing linear allocation region when bytes_left
1893 // are zero. 2071 // are zero.
1894 owner_->SetTop(NULL, NULL); 2072 owner_->SetTop(NULL, NULL);
1895 } 2073 }
1896 2074
1897 return new_node; 2075 return new_node;
1898 } 2076 }
1899 2077
1900 2078
1901 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) { 2079 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
2014 // or because we have lowered the limit in order to get periodic incremental 2192 // or because we have lowered the limit in order to get periodic incremental
2015 // marking. The most reliable way to ensure that there is linear space is 2193 // marking. The most reliable way to ensure that there is linear space is
2016 // to do the allocation, then rewind the limit. 2194 // to do the allocation, then rewind the limit.
2017 ASSERT(bytes <= InitialCapacity()); 2195 ASSERT(bytes <= InitialCapacity());
2018 MaybeObject* maybe = AllocateRaw(bytes); 2196 MaybeObject* maybe = AllocateRaw(bytes);
2019 Object* object = NULL; 2197 Object* object = NULL;
2020 if (!maybe->ToObject(&object)) return false; 2198 if (!maybe->ToObject(&object)) return false;
2021 HeapObject* allocation = HeapObject::cast(object); 2199 HeapObject* allocation = HeapObject::cast(object);
2022 Address top = allocation_info_.top; 2200 Address top = allocation_info_.top;
2023 if ((top - bytes) == allocation->address()) { 2201 if ((top - bytes) == allocation->address()) {
2024 allocation_info_.top = allocation->address(); 2202 Address new_top = allocation->address();
2203 ASSERT(new_top >= Page::FromAddress(new_top - 1)->ObjectAreaStart());
2204 allocation_info_.top = new_top;
2025 return true; 2205 return true;
2026 } 2206 }
2027 // There may be a borderline case here where the allocation succeeded, but 2207 // There may be a borderline case here where the allocation succeeded, but
2028 // the limit and top have moved on to a new page. In that case we try again. 2208 // the limit and top have moved on to a new page. In that case we try again.
2029 return ReserveSpace(bytes); 2209 return ReserveSpace(bytes);
2030 } 2210 }
2031 2211
2032 2212
2033 void PagedSpace::PrepareForMarkCompact() { 2213 void PagedSpace::PrepareForMarkCompact() {
2034 // We don't have a linear allocation area while sweeping. It will be restored 2214 // We don't have a linear allocation area while sweeping. It will be restored
2035 // on the first allocation after the sweep. 2215 // on the first allocation after the sweep.
2036 // Mark the old linear allocation area with a free space map so it can be 2216 // Mark the old linear allocation area with a free space map so it can be
2037 // skipped when scanning the heap. 2217 // skipped when scanning the heap.
2038 int old_linear_size = static_cast<int>(limit() - top()); 2218 int old_linear_size = static_cast<int>(limit() - top());
2039 Free(top(), old_linear_size); 2219 AddToFreeLists(top(), old_linear_size);
2040 SetTop(NULL, NULL); 2220 SetTop(NULL, NULL);
2041 2221
2042 // Stop lazy sweeping and clear marking bits for unswept pages. 2222 // Stop lazy sweeping and clear marking bits for unswept pages.
2043 if (first_unswept_page_ != NULL) { 2223 if (first_unswept_page_ != NULL) {
2044 Page* p = first_unswept_page_; 2224 Page* p = first_unswept_page_;
2045 do { 2225 do {
2046 // Do not use ShouldBeSweptLazily predicate here. 2226 // Do not use ShouldBeSweptLazily predicate here.
2047 // New evacuation candidates were selected but they still have 2227 // New evacuation candidates were selected but they still have
2048 // to be swept before collection starts. 2228 // to be swept before collection starts.
2049 if (!p->WasSwept()) { 2229 if (!p->WasSwept()) {
(...skipping 21 matching lines...) Expand all
2071 if (new_top <= allocation_info_.limit) return true; 2251 if (new_top <= allocation_info_.limit) return true;
2072 2252
2073 HeapObject* new_area = free_list_.Allocate(size_in_bytes); 2253 HeapObject* new_area = free_list_.Allocate(size_in_bytes);
2074 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); 2254 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
2075 if (new_area == NULL) return false; 2255 if (new_area == NULL) return false;
2076 2256
2077 int old_linear_size = static_cast<int>(limit() - top()); 2257 int old_linear_size = static_cast<int>(limit() - top());
2078 // Mark the old linear allocation area with a free space so it can be 2258 // Mark the old linear allocation area with a free space so it can be
2079 // skipped when scanning the heap. This also puts it back in the free list 2259 // skipped when scanning the heap. This also puts it back in the free list
2080 // if it is big enough. 2260 // if it is big enough.
2081 Free(top(), old_linear_size); 2261 AddToFreeLists(top(), old_linear_size);
2082 2262
2083 SetTop(new_area->address(), new_area->address() + size_in_bytes); 2263 SetTop(new_area->address(), new_area->address() + size_in_bytes);
2084 Allocate(size_in_bytes); 2264 // The AddToFreeLists call above will reduce the size of the space in the
Vyacheslav Egorov (Chromium) 2011/12/21 13:22:12 Allocate is increasing size in the accounting stat
2265 // allocation stats. We don't need to add this linear area to the size
2266 // because it was carved out of the allocation stats, which were already
2267 // accounted for.
2085 return true; 2268 return true;
2086 } 2269 }
2087 2270
2088 2271
2089 // You have to call this last, since the implementation from PagedSpace 2272 // You have to call this last, since the implementation from PagedSpace
2090 // doesn't know that memory was 'promised' to large object space. 2273 // doesn't know that memory was 'promised' to large object space.
2091 bool LargeObjectSpace::ReserveSpace(int bytes) { 2274 bool LargeObjectSpace::ReserveSpace(int bytes) {
2092 return heap()->OldGenerationSpaceAvailable() >= bytes; 2275 return heap()->OldGenerationSpaceAvailable() >= bytes;
2093 } 2276 }
2094 2277
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
2154 2337
2155 // Free list allocation failed and there is no next page. Fail if we have 2338 // Free list allocation failed and there is no next page. Fail if we have
2156 // hit the old generation size limit that should cause a garbage 2339 // hit the old generation size limit that should cause a garbage
2157 // collection. 2340 // collection.
2158 if (!heap()->always_allocate() && 2341 if (!heap()->always_allocate() &&
2159 heap()->OldGenerationAllocationLimitReached()) { 2342 heap()->OldGenerationAllocationLimitReached()) {
2160 return NULL; 2343 return NULL;
2161 } 2344 }
2162 2345
2163 // Try to expand the space and allocate in the new next page. 2346 // Try to expand the space and allocate in the new next page.
2164 if (Expand()) { 2347 if (Expand(size_in_bytes)) {
2165 return free_list_.Allocate(size_in_bytes); 2348 return free_list_.Allocate(size_in_bytes);
2166 } 2349 }
2167 2350
2168 // Last ditch, sweep all the remaining pages to try to find space. This may 2351 // Last ditch, sweep all the remaining pages to try to find space. This may
2169 // cause a pause. 2352 // cause a pause.
2170 if (!IsSweepingComplete()) { 2353 if (!IsSweepingComplete()) {
2171 AdvanceSweeper(kMaxInt); 2354 AdvanceSweeper(kMaxInt);
2172 2355
2173 // Retry the free list allocation. 2356 // Retry the free list allocation.
2174 HeapObject* object = free_list_.Allocate(size_in_bytes); 2357 HeapObject* object = free_list_.Allocate(size_in_bytes);
(...skipping 340 matching lines...) Expand 10 before | Expand all | Expand 10 after
2515 if (previous == NULL) { 2698 if (previous == NULL) {
2516 first_page_ = current; 2699 first_page_ = current;
2517 } else { 2700 } else {
2518 previous->set_next_page(current); 2701 previous->set_next_page(current);
2519 } 2702 }
2520 2703
2521 // Free the chunk. 2704 // Free the chunk.
2522 heap()->mark_compact_collector()->ReportDeleteIfNeeded( 2705 heap()->mark_compact_collector()->ReportDeleteIfNeeded(
2523 object, heap()->isolate()); 2706 object, heap()->isolate());
2524 size_ -= static_cast<int>(page->size()); 2707 size_ -= static_cast<int>(page->size());
2708 ASSERT(size_ >= 0);
2525 objects_size_ -= object->Size(); 2709 objects_size_ -= object->Size();
2526 page_count_--; 2710 page_count_--;
2527 2711
2528 if (is_pointer_object) { 2712 if (is_pointer_object) {
2529 heap()->QueueMemoryChunkForFree(page); 2713 heap()->QueueMemoryChunkForFree(page);
2530 } else { 2714 } else {
2531 heap()->isolate()->memory_allocator()->Free(page); 2715 heap()->isolate()->memory_allocator()->Free(page);
2532 } 2716 }
2533 } 2717 }
2534 } 2718 }
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after
2653 object->ShortPrint(); 2837 object->ShortPrint();
2654 PrintF("\n"); 2838 PrintF("\n");
2655 } 2839 }
2656 printf(" --------------------------------------\n"); 2840 printf(" --------------------------------------\n");
2657 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 2841 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
2658 } 2842 }
2659 2843
2660 #endif // DEBUG 2844 #endif // DEBUG
2661 2845
2662 } } // namespace v8::internal 2846 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698