OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 13 matching lines...) Expand all Loading... |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | 27 |
28 #include "v8.h" | 28 #include "v8.h" |
29 | 29 |
30 #include "liveobjectlist-inl.h" | 30 #include "liveobjectlist-inl.h" |
31 #include "macro-assembler.h" | 31 #include "macro-assembler.h" |
32 #include "mark-compact.h" | 32 #include "mark-compact.h" |
33 #include "platform.h" | 33 #include "platform.h" |
| 34 #include "snapshot.h" |
34 | 35 |
35 namespace v8 { | 36 namespace v8 { |
36 namespace internal { | 37 namespace internal { |
37 | 38 |
38 | 39 |
39 // ---------------------------------------------------------------------------- | 40 // ---------------------------------------------------------------------------- |
40 // HeapObjectIterator | 41 // HeapObjectIterator |
41 | 42 |
42 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { | 43 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { |
43 // You can't actually iterate over the anchor page. It is not a real page, | 44 // You can't actually iterate over the anchor page. It is not a real page, |
(...skipping 212 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
256 | 257 |
257 | 258 |
258 // ----------------------------------------------------------------------------- | 259 // ----------------------------------------------------------------------------- |
259 // MemoryAllocator | 260 // MemoryAllocator |
260 // | 261 // |
261 | 262 |
262 MemoryAllocator::MemoryAllocator(Isolate* isolate) | 263 MemoryAllocator::MemoryAllocator(Isolate* isolate) |
263 : isolate_(isolate), | 264 : isolate_(isolate), |
264 capacity_(0), | 265 capacity_(0), |
265 capacity_executable_(0), | 266 capacity_executable_(0), |
266 size_(0), | 267 memory_allocator_reserved_(0), |
267 size_executable_(0) { | 268 size_executable_(0) { |
268 } | 269 } |
269 | 270 |
270 | 271 |
271 bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) { | 272 bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) { |
272 capacity_ = RoundUp(capacity, Page::kPageSize); | 273 capacity_ = RoundUp(capacity, Page::kPageSize); |
273 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); | 274 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); |
274 ASSERT_GE(capacity_, capacity_executable_); | 275 ASSERT_GE(capacity_, capacity_executable_); |
275 | 276 |
276 size_ = 0; | 277 memory_allocator_reserved_ = 0; |
277 size_executable_ = 0; | 278 size_executable_ = 0; |
278 | 279 |
279 return true; | 280 return true; |
280 } | 281 } |
281 | 282 |
282 | 283 |
283 void MemoryAllocator::TearDown() { | 284 void MemoryAllocator::TearDown() { |
284 // Check that spaces were torn down before MemoryAllocator. | 285 // Check that spaces were torn down before MemoryAllocator. |
285 ASSERT(size_ == 0); | 286 CHECK(memory_allocator_reserved_ == 0); |
286 // TODO(gc) this will be true again when we fix FreeMemory. | 287 // TODO(gc) this will be true again when we fix FreeMemory. |
287 // ASSERT(size_executable_ == 0); | 288 // ASSERT(size_executable_ == 0); |
288 capacity_ = 0; | 289 capacity_ = 0; |
289 capacity_executable_ = 0; | 290 capacity_executable_ = 0; |
290 } | 291 } |
291 | 292 |
292 | 293 |
293 void MemoryAllocator::FreeMemory(VirtualMemory* reservation, | 294 void MemoryAllocator::FreeMemory(VirtualMemory* reservation, |
294 Executability executable) { | 295 Executability executable) { |
295 // TODO(gc) make code_range part of memory allocator? | 296 // TODO(gc) make code_range part of memory allocator? |
296 ASSERT(reservation->IsReserved()); | 297 ASSERT(reservation->IsReserved()); |
297 size_t size = reservation->size(); | 298 size_t size = reservation->size(); |
298 ASSERT(size_ >= size); | 299 ASSERT(memory_allocator_reserved_ >= size); |
299 size_ -= size; | 300 memory_allocator_reserved_ -= size; |
300 | 301 |
301 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); | 302 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
302 | 303 |
303 if (executable == EXECUTABLE) { | 304 if (executable == EXECUTABLE) { |
304 ASSERT(size_executable_ >= size); | 305 ASSERT(size_executable_ >= size); |
305 size_executable_ -= size; | 306 size_executable_ -= size; |
306 } | 307 } |
307 // Code which is part of the code-range does not have its own VirtualMemory. | 308 // Code which is part of the code-range does not have its own VirtualMemory. |
308 ASSERT(!isolate_->code_range()->contains( | 309 ASSERT(!isolate_->code_range()->contains( |
309 static_cast<Address>(reservation->address()))); | 310 static_cast<Address>(reservation->address()))); |
310 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); | 311 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); |
311 reservation->Release(); | 312 reservation->Release(); |
312 } | 313 } |
313 | 314 |
314 | 315 |
315 void MemoryAllocator::FreeMemory(Address base, | 316 void MemoryAllocator::FreeMemory(Address base, |
316 size_t size, | 317 size_t size, |
317 Executability executable) { | 318 Executability executable) { |
318 // TODO(gc) make code_range part of memory allocator? | 319 // TODO(gc) make code_range part of memory allocator? |
319 ASSERT(size_ >= size); | 320 ASSERT(memory_allocator_reserved_ >= size); |
320 size_ -= size; | 321 memory_allocator_reserved_ -= size; |
321 | 322 |
322 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); | 323 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
323 | 324 |
324 if (executable == EXECUTABLE) { | 325 if (executable == EXECUTABLE) { |
325 ASSERT(size_executable_ >= size); | 326 ASSERT(size_executable_ >= size); |
326 size_executable_ -= size; | 327 size_executable_ -= size; |
327 } | 328 } |
328 if (isolate_->code_range()->contains(static_cast<Address>(base))) { | 329 if (isolate_->code_range()->contains(static_cast<Address>(base))) { |
329 ASSERT(executable == EXECUTABLE); | 330 ASSERT(executable == EXECUTABLE); |
330 isolate_->code_range()->FreeRawMemory(base, size); | 331 isolate_->code_range()->FreeRawMemory(base, size); |
331 } else { | 332 } else { |
332 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); | 333 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); |
333 bool result = VirtualMemory::ReleaseRegion(base, size); | 334 bool result = VirtualMemory::ReleaseRegion(base, size); |
334 USE(result); | 335 USE(result); |
335 ASSERT(result); | 336 ASSERT(result); |
336 } | 337 } |
337 } | 338 } |
338 | 339 |
339 | 340 |
340 Address MemoryAllocator::ReserveAlignedMemory(size_t size, | 341 Address MemoryAllocator::ReserveAlignedMemory(size_t size, |
341 size_t alignment, | 342 size_t alignment, |
342 VirtualMemory* controller) { | 343 VirtualMemory* controller) { |
343 VirtualMemory reservation(size, alignment); | 344 VirtualMemory reservation(size, alignment); |
344 | 345 |
345 if (!reservation.IsReserved()) return NULL; | 346 if (!reservation.IsReserved()) return NULL; |
346 size_ += reservation.size(); | 347 memory_allocator_reserved_ += reservation.size(); |
347 Address base = RoundUp(static_cast<Address>(reservation.address()), | 348 Address base = RoundUp(static_cast<Address>(reservation.address()), |
348 alignment); | 349 alignment); |
349 controller->TakeControl(&reservation); | 350 controller->TakeControl(&reservation); |
350 return base; | 351 return base; |
351 } | 352 } |
352 | 353 |
353 | 354 |
354 Address MemoryAllocator::AllocateAlignedMemory(size_t size, | 355 Address MemoryAllocator::AllocateAlignedMemory(size_t size, |
| 356 size_t reserved_size, |
355 size_t alignment, | 357 size_t alignment, |
356 Executability executable, | 358 Executability executable, |
357 VirtualMemory* controller) { | 359 VirtualMemory* controller) { |
| 360 ASSERT(RoundUp(reserved_size, OS::CommitPageSize()) >= |
| 361 RoundUp(size, OS::CommitPageSize())); |
358 VirtualMemory reservation; | 362 VirtualMemory reservation; |
359 Address base = ReserveAlignedMemory(size, alignment, &reservation); | 363 Address base = ReserveAlignedMemory(reserved_size, alignment, &reservation); |
360 if (base == NULL) return NULL; | 364 if (base == NULL) return NULL; |
361 if (!reservation.Commit(base, | 365 if (!reservation.Commit(base, |
362 size, | 366 size, |
363 executable == EXECUTABLE)) { | 367 executable == EXECUTABLE)) { |
364 return NULL; | 368 return NULL; |
365 } | 369 } |
366 controller->TakeControl(&reservation); | 370 controller->TakeControl(&reservation); |
367 return base; | 371 return base; |
368 } | 372 } |
369 | 373 |
370 | 374 |
371 void Page::InitializeAsAnchor(PagedSpace* owner) { | 375 void Page::InitializeAsAnchor(PagedSpace* owner) { |
372 set_owner(owner); | 376 set_owner(owner); |
373 set_prev_page(this); | 377 set_prev_page(this); |
374 set_next_page(this); | 378 set_next_page(this); |
375 } | 379 } |
376 | 380 |
377 | 381 |
| 382 void Page::CommitMore(intptr_t space_needed) { |
| 383 intptr_t reserved_page_size = reservation_.IsReserved() ? |
| 384 reservation_.size() : |
| 385 Page::kPageSize; |
| 386 ASSERT(size() < reserved_page_size); |
| 387 intptr_t expand = Min(Max(size(), space_needed), reserved_page_size - size()); |
| 388 // At least double the page size (this also rounds to OS page size). |
| 389 expand = Min(reserved_page_size - size(), |
| 390 RoundUpToPowerOf2(size() + expand) - size()); |
| 391 ASSERT(expand <= kPageSize - size()); |
| 392 ASSERT(expand <= reserved_page_size - size()); |
| 393 Executability executable = |
| 394 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
| 395 Address old_end = ObjectAreaEnd(); |
| 396 if (!VirtualMemory::CommitRegion(old_end, expand, executable)) return; |
| 397 |
| 398 set_size(size() + expand); |
| 399 |
| 400 PagedSpace* paged_space = reinterpret_cast<PagedSpace*>(owner()); |
| 401 paged_space->heap()->isolate()->memory_allocator()->AllocationBookkeeping( |
| 402 paged_space, |
| 403 old_end, |
| 404 0, // No new memory was reserved. |
| 405 expand, // New memory committed. |
| 406 executable); |
| 407 paged_space->IncreaseCapacity(expand); |
| 408 |
| 409 // In map space we have to align the expanded area with the correct map |
| 410 // alignment. |
| 411 uintptr_t end_int = old_end - ObjectAreaStart(); |
| 412 uintptr_t aligned_end_int = |
| 413 end_int - end_int % paged_space->ObjectAlignment(); |
| 414 if (aligned_end_int < end_int) { |
| 415 aligned_end_int += paged_space->ObjectAlignment(); |
| 416 } |
| 417 Address new_area = |
| 418 reinterpret_cast<Address>(ObjectAreaStart() + aligned_end_int); |
| 419 // This will waste the space for one map per doubling of the page size until |
| 420 // the next GC. |
| 421 paged_space->AddToFreeLists(old_end, new_area - old_end); |
| 422 |
| 423 expand -= (new_area - old_end); |
| 424 |
| 425 paged_space->AddToFreeLists(new_area, expand); |
| 426 } |
| 427 |
| 428 |
378 NewSpacePage* NewSpacePage::Initialize(Heap* heap, | 429 NewSpacePage* NewSpacePage::Initialize(Heap* heap, |
379 Address start, | 430 Address start, |
380 SemiSpace* semi_space) { | 431 SemiSpace* semi_space) { |
381 MemoryChunk* chunk = MemoryChunk::Initialize(heap, | 432 MemoryChunk* chunk = MemoryChunk::Initialize(heap, |
382 start, | 433 start, |
383 Page::kPageSize, | 434 Page::kPageSize, |
384 NOT_EXECUTABLE, | 435 NOT_EXECUTABLE, |
385 semi_space); | 436 semi_space); |
386 chunk->set_next_chunk(NULL); | 437 chunk->set_next_chunk(NULL); |
387 chunk->set_prev_chunk(NULL); | 438 chunk->set_prev_chunk(NULL); |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
453 ClearFlag(SCAN_ON_SCAVENGE); | 504 ClearFlag(SCAN_ON_SCAVENGE); |
454 } | 505 } |
455 next_chunk_->prev_chunk_ = prev_chunk_; | 506 next_chunk_->prev_chunk_ = prev_chunk_; |
456 prev_chunk_->next_chunk_ = next_chunk_; | 507 prev_chunk_->next_chunk_ = next_chunk_; |
457 prev_chunk_ = NULL; | 508 prev_chunk_ = NULL; |
458 next_chunk_ = NULL; | 509 next_chunk_ = NULL; |
459 } | 510 } |
460 | 511 |
461 | 512 |
462 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, | 513 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, |
| 514 intptr_t committed_body_size, |
463 Executability executable, | 515 Executability executable, |
464 Space* owner) { | 516 Space* owner) { |
465 size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size; | 517 ASSERT(body_size >= committed_body_size); |
| 518 size_t chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + body_size, |
| 519 OS::CommitPageSize()); |
| 520 intptr_t committed_chunk_size = |
| 521 committed_body_size + MemoryChunk::kObjectStartOffset; |
| 522 committed_chunk_size = RoundUp(committed_chunk_size, OS::CommitPageSize()); |
466 Heap* heap = isolate_->heap(); | 523 Heap* heap = isolate_->heap(); |
467 Address base = NULL; | 524 Address base = NULL; |
468 VirtualMemory reservation; | 525 VirtualMemory reservation; |
469 if (executable == EXECUTABLE) { | 526 if (executable == EXECUTABLE) { |
470 // Check executable memory limit. | 527 // Check executable memory limit. |
471 if (size_executable_ + chunk_size > capacity_executable_) { | 528 if (size_executable_ + chunk_size > capacity_executable_) { |
472 LOG(isolate_, | 529 LOG(isolate_, |
473 StringEvent("MemoryAllocator::AllocateRawMemory", | 530 StringEvent("MemoryAllocator::AllocateRawMemory", |
474 "V8 Executable Allocation capacity exceeded")); | 531 "V8 Executable Allocation capacity exceeded")); |
475 return NULL; | 532 return NULL; |
476 } | 533 } |
477 | 534 |
478 // Allocate executable memory either from code range or from the | 535 // Allocate executable memory either from code range or from the |
479 // OS. | 536 // OS. |
480 if (isolate_->code_range()->exists()) { | 537 if (isolate_->code_range()->exists()) { |
481 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); | 538 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); |
482 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), | 539 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), |
483 MemoryChunk::kAlignment)); | 540 MemoryChunk::kAlignment)); |
484 if (base == NULL) return NULL; | 541 if (base == NULL) return NULL; |
485 size_ += chunk_size; | 542 // The AllocateAlignedMemory method will update the memory allocator |
486 // Update executable memory size. | 543 // memory used, but we are not using that if we have a code range, so |
487 size_executable_ += chunk_size; | 544 // we update it here. |
| 545 memory_allocator_reserved_ += chunk_size; |
488 } else { | 546 } else { |
489 base = AllocateAlignedMemory(chunk_size, | 547 base = AllocateAlignedMemory(committed_chunk_size, |
| 548 chunk_size, |
490 MemoryChunk::kAlignment, | 549 MemoryChunk::kAlignment, |
491 executable, | 550 executable, |
492 &reservation); | 551 &reservation); |
493 if (base == NULL) return NULL; | 552 if (base == NULL) return NULL; |
494 // Update executable memory size. | |
495 size_executable_ += reservation.size(); | |
496 } | 553 } |
497 } else { | 554 } else { |
498 base = AllocateAlignedMemory(chunk_size, | 555 base = AllocateAlignedMemory(committed_chunk_size, |
| 556 chunk_size, |
499 MemoryChunk::kAlignment, | 557 MemoryChunk::kAlignment, |
500 executable, | 558 executable, |
501 &reservation); | 559 &reservation); |
502 | 560 |
503 if (base == NULL) return NULL; | 561 if (base == NULL) return NULL; |
504 } | 562 } |
505 | 563 |
506 #ifdef DEBUG | 564 AllocationBookkeeping( |
507 ZapBlock(base, chunk_size); | 565 owner, base, chunk_size, committed_chunk_size, executable); |
508 #endif | |
509 isolate_->counters()->memory_allocated()-> | |
510 Increment(static_cast<int>(chunk_size)); | |
511 | |
512 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); | |
513 if (owner != NULL) { | |
514 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); | |
515 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); | |
516 } | |
517 | 566 |
518 MemoryChunk* result = MemoryChunk::Initialize(heap, | 567 MemoryChunk* result = MemoryChunk::Initialize(heap, |
519 base, | 568 base, |
520 chunk_size, | 569 committed_chunk_size, |
521 executable, | 570 executable, |
522 owner); | 571 owner); |
523 result->set_reserved_memory(&reservation); | 572 result->set_reserved_memory(&reservation); |
524 return result; | 573 return result; |
525 } | 574 } |
526 | 575 |
527 | 576 |
528 Page* MemoryAllocator::AllocatePage(PagedSpace* owner, | 577 void MemoryAllocator::AllocationBookkeeping(Space* owner, |
| 578 Address base, |
| 579 intptr_t reserved_chunk_size, |
| 580 intptr_t committed_chunk_size, |
| 581 Executability executable) { |
| 582 if (executable == EXECUTABLE) { |
| 583 // Update executable memory size. |
| 584 size_executable_ += reserved_chunk_size; |
| 585 } |
| 586 |
| 587 #ifdef DEBUG |
| 588 ZapBlock(base, committed_chunk_size); |
| 589 #endif |
| 590 isolate_->counters()->memory_allocated()-> |
| 591 Increment(static_cast<int>(committed_chunk_size)); |
| 592 |
| 593 LOG(isolate_, NewEvent("MemoryChunk", base, committed_chunk_size)); |
| 594 if (owner != NULL) { |
| 595 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); |
| 596 PerformAllocationCallback( |
| 597 space, kAllocationActionAllocate, committed_chunk_size); |
| 598 } |
| 599 } |
| 600 |
| 601 |
| 602 Page* MemoryAllocator::AllocatePage(intptr_t object_area_size, |
| 603 PagedSpace* owner, |
529 Executability executable) { | 604 Executability executable) { |
530 MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner); | 605 ASSERT(object_area_size <= Page::kObjectAreaSize); |
| 606 |
| 607 MemoryChunk* chunk = |
| 608 AllocateChunk(Page::kObjectAreaSize, object_area_size, executable, owner); |
531 | 609 |
532 if (chunk == NULL) return NULL; | 610 if (chunk == NULL) return NULL; |
533 | 611 |
534 return Page::Initialize(isolate_->heap(), chunk, executable, owner); | 612 return Page::Initialize(isolate_->heap(), chunk, executable, owner); |
535 } | 613 } |
536 | 614 |
537 | 615 |
538 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, | 616 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, |
539 Executability executable, | 617 Executability executable, |
540 Space* owner) { | 618 Space* owner) { |
541 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner); | 619 MemoryChunk* chunk = |
| 620 AllocateChunk(object_size, object_size, executable, owner); |
542 if (chunk == NULL) return NULL; | 621 if (chunk == NULL) return NULL; |
543 return LargePage::Initialize(isolate_->heap(), chunk); | 622 return LargePage::Initialize(isolate_->heap(), chunk); |
544 } | 623 } |
545 | 624 |
546 | 625 |
547 void MemoryAllocator::Free(MemoryChunk* chunk) { | 626 void MemoryAllocator::Free(MemoryChunk* chunk) { |
548 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); | 627 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); |
549 if (chunk->owner() != NULL) { | 628 if (chunk->owner() != NULL) { |
550 ObjectSpace space = | 629 ObjectSpace space = |
551 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); | 630 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); |
552 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); | 631 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); |
553 } | 632 } |
554 | 633 |
555 delete chunk->slots_buffer(); | 634 delete chunk->slots_buffer(); |
556 delete chunk->skip_list(); | 635 delete chunk->skip_list(); |
557 | 636 |
558 VirtualMemory* reservation = chunk->reserved_memory(); | 637 VirtualMemory* reservation = chunk->reserved_memory(); |
559 if (reservation->IsReserved()) { | 638 if (reservation->IsReserved()) { |
560 FreeMemory(reservation, chunk->executable()); | 639 FreeMemory(reservation, chunk->executable()); |
561 } else { | 640 } else { |
| 641 // When we do not have a reservation that is because this allocation |
| 642 // is part of the huge reserved chunk of memory reserved for code on |
| 643 // x64. In that case the size was rounded up to the page size on |
| 644 // allocation so we do the same now when freeing. |
562 FreeMemory(chunk->address(), | 645 FreeMemory(chunk->address(), |
563 chunk->size(), | 646 RoundUp(chunk->size(), Page::kPageSize), |
564 chunk->executable()); | 647 chunk->executable()); |
565 } | 648 } |
566 } | 649 } |
567 | 650 |
568 | 651 |
569 bool MemoryAllocator::CommitBlock(Address start, | 652 bool MemoryAllocator::CommitBlock(Address start, |
570 size_t size, | 653 size_t size, |
571 Executability executable) { | 654 Executability executable) { |
572 if (!VirtualMemory::CommitRegion(start, size, executable)) return false; | 655 if (!VirtualMemory::CommitRegion(start, size, executable)) return false; |
573 #ifdef DEBUG | 656 #ifdef DEBUG |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
633 memory_allocation_callbacks_.Remove(i); | 716 memory_allocation_callbacks_.Remove(i); |
634 return; | 717 return; |
635 } | 718 } |
636 } | 719 } |
637 UNREACHABLE(); | 720 UNREACHABLE(); |
638 } | 721 } |
639 | 722 |
640 | 723 |
641 #ifdef DEBUG | 724 #ifdef DEBUG |
642 void MemoryAllocator::ReportStatistics() { | 725 void MemoryAllocator::ReportStatistics() { |
643 float pct = static_cast<float>(capacity_ - size_) / capacity_; | 726 float pct = |
| 727 static_cast<float>(capacity_ - memory_allocator_reserved_) / capacity_; |
644 PrintF(" capacity: %" V8_PTR_PREFIX "d" | 728 PrintF(" capacity: %" V8_PTR_PREFIX "d" |
645 ", used: %" V8_PTR_PREFIX "d" | 729 ", used: %" V8_PTR_PREFIX "d" |
646 ", available: %%%d\n\n", | 730 ", available: %%%d\n\n", |
647 capacity_, size_, static_cast<int>(pct*100)); | 731 capacity_, memory_allocator_reserved_, static_cast<int>(pct*100)); |
648 } | 732 } |
649 #endif | 733 #endif |
650 | 734 |
651 // ----------------------------------------------------------------------------- | 735 // ----------------------------------------------------------------------------- |
652 // PagedSpace implementation | 736 // PagedSpace implementation |
653 | 737 |
654 PagedSpace::PagedSpace(Heap* heap, | 738 PagedSpace::PagedSpace(Heap* heap, |
655 intptr_t max_capacity, | 739 intptr_t max_capacity, |
656 AllocationSpace id, | 740 AllocationSpace id, |
657 Executability executable) | 741 Executability executable) |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
704 Address next = cur + obj->Size(); | 788 Address next = cur + obj->Size(); |
705 if ((cur <= addr) && (addr < next)) return obj; | 789 if ((cur <= addr) && (addr < next)) return obj; |
706 } | 790 } |
707 | 791 |
708 UNREACHABLE(); | 792 UNREACHABLE(); |
709 return Failure::Exception(); | 793 return Failure::Exception(); |
710 } | 794 } |
711 | 795 |
712 bool PagedSpace::CanExpand() { | 796 bool PagedSpace::CanExpand() { |
713 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); | 797 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); |
714 ASSERT(Capacity() % Page::kObjectAreaSize == 0); | |
715 | 798 |
716 if (Capacity() == max_capacity_) return false; | 799 if (Capacity() == max_capacity_) return false; |
717 | 800 |
718 ASSERT(Capacity() < max_capacity_); | 801 ASSERT(Capacity() < max_capacity_); |
719 | 802 |
720 // Are we going to exceed capacity for this space? | 803 // Are we going to exceed capacity for this space? |
721 if ((Capacity() + Page::kPageSize) > max_capacity_) return false; | 804 if ((Capacity() + Page::kPageSize) > max_capacity_) return false; |
722 | 805 |
723 return true; | 806 return true; |
724 } | 807 } |
725 | 808 |
726 bool PagedSpace::Expand() { | 809 bool PagedSpace::Expand(intptr_t size_in_bytes) { |
727 if (!CanExpand()) return false; | 810 if (!CanExpand()) return false; |
728 | 811 |
| 812 Page* last_page = anchor_.prev_page(); |
| 813 if (last_page != &anchor_) { |
| 814 // We have have run out of linear allocation space. This may be because |
| 815 // the most recently allocated page (stored last in the list) is a small |
| 816 // one, that starts on a page aligned boundary, but has not a full kPageSize |
| 817 // of committed memory. Let's commit more memory for the page. |
| 818 intptr_t reserved_page_size = last_page->reserved_memory()->IsReserved() ? |
| 819 last_page->reserved_memory()->size() : |
| 820 Page::kPageSize; |
| 821 if (last_page->size() < reserved_page_size && |
| 822 reserved_page_size - last_page->size() >= size_in_bytes && |
| 823 !last_page->IsEvacuationCandidate() && |
| 824 last_page->WasSwept()) { |
| 825 last_page->CommitMore(size_in_bytes); |
| 826 return true; |
| 827 } |
| 828 } |
| 829 |
| 830 // We initially only commit a part of the page, but the deserialization |
| 831 // of the initial snapshot makes the assumption that it can deserialize |
| 832 // into linear memory of a certain size per space, so some of the spaces |
| 833 // need to have a little more committed memory. |
| 834 int initial = |
| 835 Max(Page::kInitiallyCommittedPartOfPage, kMinimumSpaceSizes[identity()]); |
| 836 |
| 837 ASSERT(initial <= Page::kPageSize); |
| 838 ASSERT(Page::kPageSize - Page::kInitiallyCommittedPartOfPage < |
| 839 Page::kObjectAreaSize); |
| 840 |
| 841 intptr_t expansion_size = |
| 842 Max(initial, |
| 843 RoundUpToPowerOf2(MemoryChunk::kObjectStartOffset + size_in_bytes)) - |
| 844 MemoryChunk::kObjectStartOffset; |
| 845 |
729 Page* p = heap()->isolate()->memory_allocator()-> | 846 Page* p = heap()->isolate()->memory_allocator()-> |
730 AllocatePage(this, executable()); | 847 AllocatePage(expansion_size, this, executable()); |
731 if (p == NULL) return false; | 848 if (p == NULL) return false; |
732 | 849 |
733 ASSERT(Capacity() <= max_capacity_); | 850 ASSERT(Capacity() <= max_capacity_); |
734 | 851 |
735 p->InsertAfter(anchor_.prev_page()); | 852 p->InsertAfter(anchor_.prev_page()); |
736 | 853 |
737 return true; | 854 return true; |
738 } | 855 } |
739 | 856 |
740 | 857 |
(...skipping 22 matching lines...) Expand all Loading... |
763 if (page->WasSwept()) { | 880 if (page->WasSwept()) { |
764 intptr_t size = free_list_.EvictFreeListItems(page); | 881 intptr_t size = free_list_.EvictFreeListItems(page); |
765 accounting_stats_.AllocateBytes(size); | 882 accounting_stats_.AllocateBytes(size); |
766 ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size)); | 883 ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size)); |
767 } | 884 } |
768 | 885 |
769 if (Page::FromAllocationTop(allocation_info_.top) == page) { | 886 if (Page::FromAllocationTop(allocation_info_.top) == page) { |
770 allocation_info_.top = allocation_info_.limit = NULL; | 887 allocation_info_.top = allocation_info_.limit = NULL; |
771 } | 888 } |
772 | 889 |
| 890 intptr_t size = page->ObjectAreaEnd() - page->ObjectAreaStart(); |
| 891 |
773 page->Unlink(); | 892 page->Unlink(); |
774 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { | 893 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { |
775 heap()->isolate()->memory_allocator()->Free(page); | 894 heap()->isolate()->memory_allocator()->Free(page); |
776 } else { | 895 } else { |
777 heap()->QueueMemoryChunkForFree(page); | 896 heap()->QueueMemoryChunkForFree(page); |
778 } | 897 } |
779 | 898 |
780 ASSERT(Capacity() > 0); | 899 ASSERT(Capacity() > 0); |
781 ASSERT(Capacity() % Page::kObjectAreaSize == 0); | 900 accounting_stats_.ShrinkSpace(size); |
782 accounting_stats_.ShrinkSpace(Page::kObjectAreaSize); | |
783 } | 901 } |
784 | 902 |
785 | 903 |
786 void PagedSpace::ReleaseAllUnusedPages() { | 904 void PagedSpace::ReleaseAllUnusedPages() { |
787 PageIterator it(this); | 905 PageIterator it(this); |
788 while (it.has_next()) { | 906 while (it.has_next()) { |
789 Page* page = it.next(); | 907 Page* page = it.next(); |
790 if (!page->WasSwept()) { | 908 if (!page->WasSwept()) { |
791 if (page->LiveBytes() == 0) ReleasePage(page); | 909 if (page->LiveBytes() == 0) ReleasePage(page); |
792 } else { | 910 } else { |
(...skipping 857 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1650 // Free lists for old object spaces implementation | 1768 // Free lists for old object spaces implementation |
1651 | 1769 |
1652 void FreeListNode::set_size(Heap* heap, int size_in_bytes) { | 1770 void FreeListNode::set_size(Heap* heap, int size_in_bytes) { |
1653 ASSERT(size_in_bytes > 0); | 1771 ASSERT(size_in_bytes > 0); |
1654 ASSERT(IsAligned(size_in_bytes, kPointerSize)); | 1772 ASSERT(IsAligned(size_in_bytes, kPointerSize)); |
1655 | 1773 |
1656 // We write a map and possibly size information to the block. If the block | 1774 // We write a map and possibly size information to the block. If the block |
1657 // is big enough to be a FreeSpace with at least one extra word (the next | 1775 // is big enough to be a FreeSpace with at least one extra word (the next |
1658 // pointer), we set its map to be the free space map and its size to an | 1776 // pointer), we set its map to be the free space map and its size to an |
1659 // appropriate array length for the desired size from HeapObject::Size(). | 1777 // appropriate array length for the desired size from HeapObject::Size(). |
1660 // If the block is too small (eg, one or two words), to hold both a size | 1778 // If the block is too small (e.g. one or two words), to hold both a size |
1661 // field and a next pointer, we give it a filler map that gives it the | 1779 // field and a next pointer, we give it a filler map that gives it the |
1662 // correct size. | 1780 // correct size. |
1663 if (size_in_bytes > FreeSpace::kHeaderSize) { | 1781 if (size_in_bytes > FreeSpace::kHeaderSize) { |
1664 set_map_no_write_barrier(heap->raw_unchecked_free_space_map()); | 1782 set_map_no_write_barrier(heap->raw_unchecked_free_space_map()); |
1665 // Can't use FreeSpace::cast because it fails during deserialization. | 1783 // Can't use FreeSpace::cast because it fails during deserialization. |
1666 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this); | 1784 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this); |
1667 this_as_free_space->set_size(size_in_bytes); | 1785 this_as_free_space->set_size(size_in_bytes); |
1668 } else if (size_in_bytes == kPointerSize) { | 1786 } else if (size_in_bytes == kPointerSize) { |
1669 set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map()); | 1787 set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map()); |
1670 } else if (size_in_bytes == 2 * kPointerSize) { | 1788 } else if (size_in_bytes == 2 * kPointerSize) { |
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1754 } else { | 1872 } else { |
1755 node->set_next(huge_list_); | 1873 node->set_next(huge_list_); |
1756 huge_list_ = node; | 1874 huge_list_ = node; |
1757 } | 1875 } |
1758 available_ += size_in_bytes; | 1876 available_ += size_in_bytes; |
1759 ASSERT(IsVeryLong() || available_ == SumFreeLists()); | 1877 ASSERT(IsVeryLong() || available_ == SumFreeLists()); |
1760 return 0; | 1878 return 0; |
1761 } | 1879 } |
1762 | 1880 |
1763 | 1881 |
1764 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) { | 1882 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, |
| 1883 int* node_size, |
| 1884 int minimum_size) { |
1765 FreeListNode* node = *list; | 1885 FreeListNode* node = *list; |
1766 | 1886 |
1767 if (node == NULL) return NULL; | 1887 if (node == NULL) return NULL; |
1768 | 1888 |
| 1889 ASSERT(node->map() == node->GetHeap()->raw_unchecked_free_space_map()); |
| 1890 |
1769 while (node != NULL && | 1891 while (node != NULL && |
1770 Page::FromAddress(node->address())->IsEvacuationCandidate()) { | 1892 Page::FromAddress(node->address())->IsEvacuationCandidate()) { |
1771 available_ -= node->Size(); | 1893 available_ -= node->Size(); |
1772 node = node->next(); | 1894 node = node->next(); |
1773 } | 1895 } |
1774 | 1896 |
1775 if (node != NULL) { | 1897 if (node == NULL) { |
1776 *node_size = node->Size(); | |
1777 *list = node->next(); | |
1778 } else { | |
1779 *list = NULL; | 1898 *list = NULL; |
| 1899 return NULL; |
1780 } | 1900 } |
1781 | 1901 |
| 1902 // Gets the size without checking the map. When we are booting we have |
| 1903 // a FreeListNode before we have created its map. |
| 1904 intptr_t size = reinterpret_cast<FreeSpace*>(node)->Size(); |
| 1905 |
| 1906 // We don't search the list for one that fits, preferring to look in the |
| 1907 // list of larger nodes, but we do check the first in the list, because |
| 1908 // if we had to expand the space or page we may have placed an entry that |
| 1909 // was just long enough at the head of one of the lists. |
| 1910 if (size < minimum_size) return NULL; |
| 1911 |
| 1912 *node_size = size; |
| 1913 available_ -= size; |
| 1914 *list = node->next(); |
| 1915 |
1782 return node; | 1916 return node; |
1783 } | 1917 } |
1784 | 1918 |
1785 | 1919 |
1786 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) { | 1920 FreeListNode* FreeList::FindAbuttingNode( |
| 1921 int size_in_bytes, int* node_size, Address limit, FreeListNode** list_head) { |
| 1922 FreeListNode* first_node = *list_head; |
| 1923 if (first_node != NULL && |
| 1924 first_node->address() == limit && |
| 1925 reinterpret_cast<FreeSpace*>(first_node)->Size() >= size_in_bytes && |
| 1926 !Page::FromAddress(first_node->address())->IsEvacuationCandidate()) { |
| 1927 FreeListNode* answer = first_node; |
| 1928 int size = reinterpret_cast<FreeSpace*>(first_node)->Size(); |
| 1929 available_ -= size; |
| 1930 *node_size = size; |
| 1931 *list_head = first_node->next(); |
| 1932 ASSERT(IsVeryLong() || available_ == SumFreeLists()); |
| 1933 return answer; |
| 1934 } |
| 1935 return NULL; |
| 1936 } |
| 1937 |
| 1938 |
| 1939 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, |
| 1940 int* node_size, |
| 1941 Address limit) { |
1787 FreeListNode* node = NULL; | 1942 FreeListNode* node = NULL; |
1788 | 1943 |
1789 if (size_in_bytes <= kSmallAllocationMax) { | 1944 if (limit != NULL) { |
1790 node = PickNodeFromList(&small_list_, node_size); | 1945 // We may have a memory area at the head of the free list, which abuts the |
| 1946 // old linear allocation area. This happens if the linear allocation area |
| 1947 // has been shortened to allow an incremental marking step to be performed. |
| 1948 // In that case we prefer to return the free memory area that is contiguous |
| 1949 // with the old linear allocation area. |
| 1950 node = FindAbuttingNode(size_in_bytes, node_size, limit, &large_list_); |
| 1951 if (node != NULL) return node; |
| 1952 node = FindAbuttingNode(size_in_bytes, node_size, limit, &huge_list_); |
1791 if (node != NULL) return node; | 1953 if (node != NULL) return node; |
1792 } | 1954 } |
1793 | 1955 |
1794 if (size_in_bytes <= kMediumAllocationMax) { | 1956 node = PickNodeFromList(&small_list_, node_size, size_in_bytes); |
1795 node = PickNodeFromList(&medium_list_, node_size); | 1957 ASSERT(IsVeryLong() || available_ == SumFreeLists()); |
1796 if (node != NULL) return node; | 1958 if (node != NULL) return node; |
1797 } | |
1798 | 1959 |
1799 if (size_in_bytes <= kLargeAllocationMax) { | 1960 node = PickNodeFromList(&medium_list_, node_size, size_in_bytes); |
1800 node = PickNodeFromList(&large_list_, node_size); | 1961 ASSERT(IsVeryLong() || available_ == SumFreeLists()); |
1801 if (node != NULL) return node; | 1962 if (node != NULL) return node; |
1802 } | 1963 |
| 1964 node = PickNodeFromList(&large_list_, node_size, size_in_bytes); |
| 1965 ASSERT(IsVeryLong() || available_ == SumFreeLists()); |
| 1966 if (node != NULL) return node; |
1803 | 1967 |
1804 for (FreeListNode** cur = &huge_list_; | 1968 for (FreeListNode** cur = &huge_list_; |
1805 *cur != NULL; | 1969 *cur != NULL; |
1806 cur = (*cur)->next_address()) { | 1970 cur = (*cur)->next_address()) { |
1807 FreeListNode* cur_node = *cur; | 1971 node = PickNodeFromList(cur, node_size, size_in_bytes); |
1808 while (cur_node != NULL && | 1972 ASSERT(IsVeryLong() || available_ == SumFreeLists()); |
1809 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) { | 1973 if (node != NULL) return node; |
1810 available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size(); | |
1811 cur_node = cur_node->next(); | |
1812 } | |
1813 | |
1814 *cur = cur_node; | |
1815 if (cur_node == NULL) break; | |
1816 | |
1817 ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map()); | |
1818 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur); | |
1819 int size = cur_as_free_space->Size(); | |
1820 if (size >= size_in_bytes) { | |
1821 // Large enough node found. Unlink it from the list. | |
1822 node = *cur; | |
1823 *node_size = size; | |
1824 *cur = node->next(); | |
1825 break; | |
1826 } | |
1827 } | 1974 } |
1828 | 1975 |
1829 return node; | 1976 return node; |
1830 } | 1977 } |
1831 | 1978 |
1832 | 1979 |
1833 // Allocation on the old space free list. If it succeeds then a new linear | 1980 // Allocation on the old space free list. If it succeeds then a new linear |
1834 // allocation space has been set up with the top and limit of the space. If | 1981 // allocation space has been set up with the top and limit of the space. If |
1835 // the allocation fails then NULL is returned, and the caller can perform a GC | 1982 // the allocation fails then NULL is returned, and the caller can perform a GC |
1836 // or allocate a new page before retrying. | 1983 // or allocate a new page before retrying. |
1837 HeapObject* FreeList::Allocate(int size_in_bytes) { | 1984 HeapObject* FreeList::Allocate(int size_in_bytes) { |
1838 ASSERT(0 < size_in_bytes); | 1985 ASSERT(0 < size_in_bytes); |
1839 ASSERT(size_in_bytes <= kMaxBlockSize); | 1986 ASSERT(size_in_bytes <= kMaxBlockSize); |
1840 ASSERT(IsAligned(size_in_bytes, kPointerSize)); | 1987 ASSERT(IsAligned(size_in_bytes, kPointerSize)); |
1841 // Don't free list allocate if there is linear space available. | 1988 // Don't free list allocate if there is linear space available. |
1842 ASSERT(owner_->limit() - owner_->top() < size_in_bytes); | 1989 ASSERT(owner_->limit() - owner_->top() < size_in_bytes); |
1843 | 1990 |
1844 int new_node_size = 0; | 1991 int new_node_size = 0; |
1845 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); | 1992 FreeListNode* new_node = |
| 1993 FindNodeFor(size_in_bytes, &new_node_size, owner_->limit()); |
1846 if (new_node == NULL) return NULL; | 1994 if (new_node == NULL) return NULL; |
1847 | 1995 |
1848 available_ -= new_node_size; | 1996 if (new_node->address() == owner_->limit()) { |
| 1997 // The new freelist node we were given is an extension of the one we had |
| 1998 // last. This is a common thing to happen when we extend a small page by |
| 1999 // committing more memory. In this case we just add the new node to the |
| 2000 // linear allocation area and recurse. |
| 2001 owner_->Allocate(new_node_size); |
| 2002 owner_->SetTop(owner_->top(), new_node->address() + new_node_size); |
| 2003 MaybeObject* allocation = owner_->AllocateRaw(size_in_bytes); |
| 2004 Object* answer; |
| 2005 if (!allocation->ToObject(&answer)) return NULL; |
| 2006 return HeapObject::cast(answer); |
| 2007 } |
| 2008 |
1849 ASSERT(IsVeryLong() || available_ == SumFreeLists()); | 2009 ASSERT(IsVeryLong() || available_ == SumFreeLists()); |
1850 | 2010 |
1851 int bytes_left = new_node_size - size_in_bytes; | 2011 int bytes_left = new_node_size - size_in_bytes; |
1852 ASSERT(bytes_left >= 0); | 2012 ASSERT(bytes_left >= 0); |
1853 | 2013 |
1854 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); | 2014 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); |
1855 // Mark the old linear allocation area with a free space map so it can be | 2015 // Mark the old linear allocation area with a free space map so it can be |
1856 // skipped when scanning the heap. This also puts it back in the free list | 2016 // skipped when scanning the heap. This also puts it back in the free list |
1857 // if it is big enough. | 2017 // if it is big enough. |
1858 owner_->Free(owner_->top(), old_linear_size); | 2018 if (old_linear_size != 0) { |
| 2019 owner_->AddToFreeLists(owner_->top(), old_linear_size); |
| 2020 } |
1859 | 2021 |
1860 #ifdef DEBUG | 2022 #ifdef DEBUG |
1861 for (int i = 0; i < size_in_bytes / kPointerSize; i++) { | 2023 for (int i = 0; i < size_in_bytes / kPointerSize; i++) { |
1862 reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0); | 2024 reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0); |
1863 } | 2025 } |
1864 #endif | 2026 #endif |
1865 | 2027 |
1866 owner_->heap()->incremental_marking()->OldSpaceStep( | 2028 owner_->heap()->incremental_marking()->OldSpaceStep( |
1867 size_in_bytes - old_linear_size); | 2029 size_in_bytes - old_linear_size); |
1868 | 2030 |
1869 // The old-space-step might have finished sweeping and restarted marking. | 2031 // The old-space-step might have finished sweeping and restarted marking. |
1870 // Verify that it did not turn the page of the new node into an evacuation | 2032 // Verify that it did not turn the page of the new node into an evacuation |
1871 // candidate. | 2033 // candidate. |
1872 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); | 2034 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); |
1873 | 2035 |
1874 const int kThreshold = IncrementalMarking::kAllocatedThreshold; | 2036 const int kThreshold = IncrementalMarking::kAllocatedThreshold; |
1875 | 2037 |
1876 // Memory in the linear allocation area is counted as allocated. We may free | 2038 // Memory in the linear allocation area is counted as allocated. We may free |
1877 // a little of this again immediately - see below. | 2039 // a little of this again immediately - see below. |
1878 owner_->Allocate(new_node_size); | 2040 owner_->Allocate(new_node_size); |
1879 | 2041 |
1880 if (bytes_left > kThreshold && | 2042 if (bytes_left > kThreshold && |
1881 owner_->heap()->incremental_marking()->IsMarkingIncomplete() && | 2043 owner_->heap()->incremental_marking()->IsMarkingIncomplete() && |
1882 FLAG_incremental_marking_steps) { | 2044 FLAG_incremental_marking_steps) { |
1883 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); | 2045 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); |
1884 // We don't want to give too large linear areas to the allocator while | 2046 // We don't want to give too large linear areas to the allocator while |
1885 // incremental marking is going on, because we won't check again whether | 2047 // incremental marking is going on, because we won't check again whether |
1886 // we want to do another increment until the linear area is used up. | 2048 // we want to do another increment until the linear area is used up. |
1887 owner_->Free(new_node->address() + size_in_bytes + linear_size, | 2049 owner_->AddToFreeLists(new_node->address() + size_in_bytes + linear_size, |
1888 new_node_size - size_in_bytes - linear_size); | 2050 new_node_size - size_in_bytes - linear_size); |
1889 owner_->SetTop(new_node->address() + size_in_bytes, | 2051 owner_->SetTop(new_node->address() + size_in_bytes, |
1890 new_node->address() + size_in_bytes + linear_size); | 2052 new_node->address() + size_in_bytes + linear_size); |
1891 } else if (bytes_left > 0) { | 2053 } else if (bytes_left > 0) { |
1892 // Normally we give the rest of the node to the allocator as its new | 2054 // Normally we give the rest of the node to the allocator as its new |
1893 // linear allocation area. | 2055 // linear allocation area. |
1894 owner_->SetTop(new_node->address() + size_in_bytes, | 2056 owner_->SetTop(new_node->address() + size_in_bytes, |
1895 new_node->address() + new_node_size); | 2057 new_node->address() + new_node_size); |
1896 } else { | 2058 } else { |
| 2059 ASSERT(bytes_left == 0); |
1897 // TODO(gc) Try not freeing linear allocation region when bytes_left | 2060 // TODO(gc) Try not freeing linear allocation region when bytes_left |
1898 // are zero. | 2061 // are zero. |
1899 owner_->SetTop(NULL, NULL); | 2062 owner_->SetTop(NULL, NULL); |
1900 } | 2063 } |
1901 | 2064 |
1902 return new_node; | 2065 return new_node; |
1903 } | 2066 } |
1904 | 2067 |
1905 | 2068 |
1906 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) { | 2069 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) { |
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2019 // or because we have lowered the limit in order to get periodic incremental | 2182 // or because we have lowered the limit in order to get periodic incremental |
2020 // marking. The most reliable way to ensure that there is linear space is | 2183 // marking. The most reliable way to ensure that there is linear space is |
2021 // to do the allocation, then rewind the limit. | 2184 // to do the allocation, then rewind the limit. |
2022 ASSERT(bytes <= InitialCapacity()); | 2185 ASSERT(bytes <= InitialCapacity()); |
2023 MaybeObject* maybe = AllocateRaw(bytes); | 2186 MaybeObject* maybe = AllocateRaw(bytes); |
2024 Object* object = NULL; | 2187 Object* object = NULL; |
2025 if (!maybe->ToObject(&object)) return false; | 2188 if (!maybe->ToObject(&object)) return false; |
2026 HeapObject* allocation = HeapObject::cast(object); | 2189 HeapObject* allocation = HeapObject::cast(object); |
2027 Address top = allocation_info_.top; | 2190 Address top = allocation_info_.top; |
2028 if ((top - bytes) == allocation->address()) { | 2191 if ((top - bytes) == allocation->address()) { |
2029 allocation_info_.top = allocation->address(); | 2192 Address new_top = allocation->address(); |
| 2193 ASSERT(new_top >= Page::FromAddress(new_top - 1)->ObjectAreaStart()); |
| 2194 allocation_info_.top = new_top; |
2030 return true; | 2195 return true; |
2031 } | 2196 } |
2032 // There may be a borderline case here where the allocation succeeded, but | 2197 // There may be a borderline case here where the allocation succeeded, but |
2033 // the limit and top have moved on to a new page. In that case we try again. | 2198 // the limit and top have moved on to a new page. In that case we try again. |
2034 return ReserveSpace(bytes); | 2199 return ReserveSpace(bytes); |
2035 } | 2200 } |
2036 | 2201 |
2037 | 2202 |
2038 void PagedSpace::PrepareForMarkCompact() { | 2203 void PagedSpace::PrepareForMarkCompact() { |
2039 // We don't have a linear allocation area while sweeping. It will be restored | 2204 // We don't have a linear allocation area while sweeping. It will be restored |
2040 // on the first allocation after the sweep. | 2205 // on the first allocation after the sweep. |
2041 // Mark the old linear allocation area with a free space map so it can be | 2206 // Mark the old linear allocation area with a free space map so it can be |
2042 // skipped when scanning the heap. | 2207 // skipped when scanning the heap. |
2043 int old_linear_size = static_cast<int>(limit() - top()); | 2208 int old_linear_size = static_cast<int>(limit() - top()); |
2044 Free(top(), old_linear_size); | 2209 AddToFreeLists(top(), old_linear_size); |
2045 SetTop(NULL, NULL); | 2210 SetTop(NULL, NULL); |
2046 | 2211 |
2047 // Stop lazy sweeping and clear marking bits for unswept pages. | 2212 // Stop lazy sweeping and clear marking bits for unswept pages. |
2048 if (first_unswept_page_ != NULL) { | 2213 if (first_unswept_page_ != NULL) { |
2049 Page* p = first_unswept_page_; | 2214 Page* p = first_unswept_page_; |
2050 do { | 2215 do { |
2051 // Do not use ShouldBeSweptLazily predicate here. | 2216 // Do not use ShouldBeSweptLazily predicate here. |
2052 // New evacuation candidates were selected but they still have | 2217 // New evacuation candidates were selected but they still have |
2053 // to be swept before collection starts. | 2218 // to be swept before collection starts. |
2054 if (!p->WasSwept()) { | 2219 if (!p->WasSwept()) { |
(...skipping 21 matching lines...) Expand all Loading... |
2076 if (new_top <= allocation_info_.limit) return true; | 2241 if (new_top <= allocation_info_.limit) return true; |
2077 | 2242 |
2078 HeapObject* new_area = free_list_.Allocate(size_in_bytes); | 2243 HeapObject* new_area = free_list_.Allocate(size_in_bytes); |
2079 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); | 2244 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); |
2080 if (new_area == NULL) return false; | 2245 if (new_area == NULL) return false; |
2081 | 2246 |
2082 int old_linear_size = static_cast<int>(limit() - top()); | 2247 int old_linear_size = static_cast<int>(limit() - top()); |
2083 // Mark the old linear allocation area with a free space so it can be | 2248 // Mark the old linear allocation area with a free space so it can be |
2084 // skipped when scanning the heap. This also puts it back in the free list | 2249 // skipped when scanning the heap. This also puts it back in the free list |
2085 // if it is big enough. | 2250 // if it is big enough. |
2086 Free(top(), old_linear_size); | 2251 AddToFreeLists(top(), old_linear_size); |
2087 | 2252 |
2088 SetTop(new_area->address(), new_area->address() + size_in_bytes); | 2253 SetTop(new_area->address(), new_area->address() + size_in_bytes); |
2089 Allocate(size_in_bytes); | 2254 // The AddToFreeLists call above will reduce the size of the space in the |
| 2255 // allocation stats. We don't need to add this linear area to the size |
| 2256 // with an Allocate(size_in_bytes) call here, because the |
| 2257 // free_list_.Allocate() call above already accounted for this memory. |
2090 return true; | 2258 return true; |
2091 } | 2259 } |
2092 | 2260 |
2093 | 2261 |
2094 // You have to call this last, since the implementation from PagedSpace | 2262 // You have to call this last, since the implementation from PagedSpace |
2095 // doesn't know that memory was 'promised' to large object space. | 2263 // doesn't know that memory was 'promised' to large object space. |
2096 bool LargeObjectSpace::ReserveSpace(int bytes) { | 2264 bool LargeObjectSpace::ReserveSpace(int bytes) { |
2097 return heap()->OldGenerationSpaceAvailable() >= bytes; | 2265 return heap()->OldGenerationSpaceAvailable() >= bytes; |
2098 } | 2266 } |
2099 | 2267 |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2159 | 2327 |
2160 // Free list allocation failed and there is no next page. Fail if we have | 2328 // Free list allocation failed and there is no next page. Fail if we have |
2161 // hit the old generation size limit that should cause a garbage | 2329 // hit the old generation size limit that should cause a garbage |
2162 // collection. | 2330 // collection. |
2163 if (!heap()->always_allocate() && | 2331 if (!heap()->always_allocate() && |
2164 heap()->OldGenerationAllocationLimitReached()) { | 2332 heap()->OldGenerationAllocationLimitReached()) { |
2165 return NULL; | 2333 return NULL; |
2166 } | 2334 } |
2167 | 2335 |
2168 // Try to expand the space and allocate in the new next page. | 2336 // Try to expand the space and allocate in the new next page. |
2169 if (Expand()) { | 2337 if (Expand(size_in_bytes)) { |
2170 return free_list_.Allocate(size_in_bytes); | 2338 return free_list_.Allocate(size_in_bytes); |
2171 } | 2339 } |
2172 | 2340 |
2173 // Last ditch, sweep all the remaining pages to try to find space. This may | 2341 // Last ditch, sweep all the remaining pages to try to find space. This may |
2174 // cause a pause. | 2342 // cause a pause. |
2175 if (!IsSweepingComplete()) { | 2343 if (!IsSweepingComplete()) { |
2176 AdvanceSweeper(kMaxInt); | 2344 AdvanceSweeper(kMaxInt); |
2177 | 2345 |
2178 // Retry the free list allocation. | 2346 // Retry the free list allocation. |
2179 HeapObject* object = free_list_.Allocate(size_in_bytes); | 2347 HeapObject* object = free_list_.Allocate(size_in_bytes); |
(...skipping 340 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2520 if (previous == NULL) { | 2688 if (previous == NULL) { |
2521 first_page_ = current; | 2689 first_page_ = current; |
2522 } else { | 2690 } else { |
2523 previous->set_next_page(current); | 2691 previous->set_next_page(current); |
2524 } | 2692 } |
2525 | 2693 |
2526 // Free the chunk. | 2694 // Free the chunk. |
2527 heap()->mark_compact_collector()->ReportDeleteIfNeeded( | 2695 heap()->mark_compact_collector()->ReportDeleteIfNeeded( |
2528 object, heap()->isolate()); | 2696 object, heap()->isolate()); |
2529 size_ -= static_cast<int>(page->size()); | 2697 size_ -= static_cast<int>(page->size()); |
| 2698 ASSERT(size_ >= 0); |
2530 objects_size_ -= object->Size(); | 2699 objects_size_ -= object->Size(); |
2531 page_count_--; | 2700 page_count_--; |
2532 | 2701 |
2533 if (is_pointer_object) { | 2702 if (is_pointer_object) { |
2534 heap()->QueueMemoryChunkForFree(page); | 2703 heap()->QueueMemoryChunkForFree(page); |
2535 } else { | 2704 } else { |
2536 heap()->isolate()->memory_allocator()->Free(page); | 2705 heap()->isolate()->memory_allocator()->Free(page); |
2537 } | 2706 } |
2538 } | 2707 } |
2539 } | 2708 } |
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2658 object->ShortPrint(); | 2827 object->ShortPrint(); |
2659 PrintF("\n"); | 2828 PrintF("\n"); |
2660 } | 2829 } |
2661 printf(" --------------------------------------\n"); | 2830 printf(" --------------------------------------\n"); |
2662 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 2831 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
2663 } | 2832 } |
2664 | 2833 |
2665 #endif // DEBUG | 2834 #endif // DEBUG |
2666 | 2835 |
2667 } } // namespace v8::internal | 2836 } } // namespace v8::internal |
OLD | NEW |