Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(367)

Side by Side Diff: src/heap/spaces.cc

Issue 1862653002: Move MemoryAllocator and CodeRange into Heap (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Rebase Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/spaces.h ('k') | src/heap/spaces-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/spaces.h" 5 #include "src/heap/spaces.h"
6 6
7 #include "src/base/bits.h" 7 #include "src/base/bits.h"
8 #include "src/base/platform/platform.h" 8 #include "src/base/platform/platform.h"
9 #include "src/full-codegen/full-codegen.h" 9 #include "src/full-codegen/full-codegen.h"
10 #include "src/heap/slot-set.h" 10 #include "src/heap/slot-set.h"
(...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after
215 CHECK_LE(commit_size, 215 CHECK_LE(commit_size,
216 requested_size - 2 * MemoryAllocator::CodePageGuardSize()); 216 requested_size - 2 * MemoryAllocator::CodePageGuardSize());
217 FreeBlock current; 217 FreeBlock current;
218 if (!ReserveBlock(requested_size, &current)) { 218 if (!ReserveBlock(requested_size, &current)) {
219 *allocated = 0; 219 *allocated = 0;
220 return NULL; 220 return NULL;
221 } 221 }
222 *allocated = current.size; 222 *allocated = current.size;
223 DCHECK(*allocated <= current.size); 223 DCHECK(*allocated <= current.size);
224 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment)); 224 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
225 if (!isolate_->memory_allocator()->CommitExecutableMemory( 225 if (!isolate_->heap()->memory_allocator()->CommitExecutableMemory(
226 code_range_, current.start, commit_size, *allocated)) { 226 code_range_, current.start, commit_size, *allocated)) {
227 *allocated = 0; 227 *allocated = 0;
228 ReleaseBlock(&current); 228 ReleaseBlock(&current);
229 return NULL; 229 return NULL;
230 } 230 }
231 return current.start; 231 return current.start;
232 } 232 }
233 233
234 234
235 bool CodeRange::CommitRawMemory(Address start, size_t length) { 235 bool CodeRange::CommitRawMemory(Address start, size_t length) {
236 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE); 236 return isolate_->heap()->memory_allocator()->CommitMemory(start, length,
237 EXECUTABLE);
237 } 238 }
238 239
239 240
240 bool CodeRange::UncommitRawMemory(Address start, size_t length) { 241 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
241 return code_range_->Uncommit(start, length); 242 return code_range_->Uncommit(start, length);
242 } 243 }
243 244
244 245
245 void CodeRange::FreeRawMemory(Address address, size_t length) { 246 void CodeRange::FreeRawMemory(Address address, size_t length) {
246 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment)); 247 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
287 free_list_.Add(*block); 288 free_list_.Add(*block);
288 } 289 }
289 290
290 291
291 // ----------------------------------------------------------------------------- 292 // -----------------------------------------------------------------------------
292 // MemoryAllocator 293 // MemoryAllocator
293 // 294 //
294 295
295 MemoryAllocator::MemoryAllocator(Isolate* isolate) 296 MemoryAllocator::MemoryAllocator(Isolate* isolate)
296 : isolate_(isolate), 297 : isolate_(isolate),
298 code_range_(nullptr),
297 capacity_(0), 299 capacity_(0),
298 capacity_executable_(0), 300 capacity_executable_(0),
299 size_(0), 301 size_(0),
300 size_executable_(0), 302 size_executable_(0),
301 lowest_ever_allocated_(reinterpret_cast<void*>(-1)), 303 lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
302 highest_ever_allocated_(reinterpret_cast<void*>(0)) {} 304 highest_ever_allocated_(reinterpret_cast<void*>(0)) {}
303 305
304 306 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable,
305 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { 307 intptr_t code_range_size) {
306 capacity_ = RoundUp(capacity, Page::kPageSize); 308 capacity_ = RoundUp(capacity, Page::kPageSize);
307 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); 309 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
308 DCHECK_GE(capacity_, capacity_executable_); 310 DCHECK_GE(capacity_, capacity_executable_);
309 311
310 size_ = 0; 312 size_ = 0;
311 size_executable_ = 0; 313 size_executable_ = 0;
312 314
315 code_range_ = new CodeRange(isolate_);
316 if (!code_range_->SetUp(static_cast<size_t>(code_range_size))) return false;
317
313 return true; 318 return true;
314 } 319 }
315 320
316 321
317 void MemoryAllocator::TearDown() { 322 void MemoryAllocator::TearDown() {
318 for (MemoryChunk* chunk : chunk_pool_) { 323 for (MemoryChunk* chunk : chunk_pool_) {
319 FreeMemory(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize, 324 FreeMemory(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize,
320 NOT_EXECUTABLE); 325 NOT_EXECUTABLE);
321 } 326 }
322 // Check that spaces were torn down before MemoryAllocator. 327 // Check that spaces were torn down before MemoryAllocator.
323 DCHECK_EQ(size_.Value(), 0); 328 DCHECK_EQ(size_.Value(), 0);
324 // TODO(gc) this will be true again when we fix FreeMemory. 329 // TODO(gc) this will be true again when we fix FreeMemory.
325 // DCHECK(size_executable_ == 0); 330 // DCHECK(size_executable_ == 0);
326 capacity_ = 0; 331 capacity_ = 0;
327 capacity_executable_ = 0; 332 capacity_executable_ = 0;
333
334 delete code_range_;
335 code_range_ = nullptr;
328 } 336 }
329 337
330 bool MemoryAllocator::CommitMemory(Address base, size_t size, 338 bool MemoryAllocator::CommitMemory(Address base, size_t size,
331 Executability executable) { 339 Executability executable) {
332 if (!base::VirtualMemory::CommitRegion(base, size, 340 if (!base::VirtualMemory::CommitRegion(base, size,
333 executable == EXECUTABLE)) { 341 executable == EXECUTABLE)) {
334 return false; 342 return false;
335 } 343 }
336 UpdateAllocatedSpaceLimits(base, base + size); 344 UpdateAllocatedSpaceLimits(base, base + size);
337 return true; 345 return true;
338 } 346 }
339 347
340 348
341 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation, 349 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
342 Executability executable) { 350 Executability executable) {
343 // TODO(gc) make code_range part of memory allocator? 351 // TODO(gc) make code_range part of memory allocator?
344 // Code which is part of the code-range does not have its own VirtualMemory. 352 // Code which is part of the code-range does not have its own VirtualMemory.
345 DCHECK(isolate_->code_range() == NULL || 353 DCHECK(code_range() == NULL ||
346 !isolate_->code_range()->contains( 354 !code_range()->contains(static_cast<Address>(reservation->address())));
347 static_cast<Address>(reservation->address()))); 355 DCHECK(executable == NOT_EXECUTABLE || code_range() == NULL ||
348 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL || 356 !code_range()->valid() || reservation->size() <= Page::kPageSize);
349 !isolate_->code_range()->valid() ||
350 reservation->size() <= Page::kPageSize);
351 357
352 reservation->Release(); 358 reservation->Release();
353 } 359 }
354 360
355 361
356 void MemoryAllocator::FreeMemory(Address base, size_t size, 362 void MemoryAllocator::FreeMemory(Address base, size_t size,
357 Executability executable) { 363 Executability executable) {
358 // TODO(gc) make code_range part of memory allocator? 364 // TODO(gc) make code_range part of memory allocator?
359 if (isolate_->code_range() != NULL && 365 if (code_range() != NULL &&
360 isolate_->code_range()->contains(static_cast<Address>(base))) { 366 code_range()->contains(static_cast<Address>(base))) {
361 DCHECK(executable == EXECUTABLE); 367 DCHECK(executable == EXECUTABLE);
362 isolate_->code_range()->FreeRawMemory(base, size); 368 code_range()->FreeRawMemory(base, size);
363 } else { 369 } else {
364 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL || 370 DCHECK(executable == NOT_EXECUTABLE || code_range() == NULL ||
365 !isolate_->code_range()->valid()); 371 !code_range()->valid());
366 bool result = base::VirtualMemory::ReleaseRegion(base, size); 372 bool result = base::VirtualMemory::ReleaseRegion(base, size);
367 USE(result); 373 USE(result);
368 DCHECK(result); 374 DCHECK(result);
369 } 375 }
370 } 376 }
371 377
372
373 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment, 378 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
374 base::VirtualMemory* controller) { 379 base::VirtualMemory* controller) {
375 base::VirtualMemory reservation(size, alignment); 380 base::VirtualMemory reservation(size, alignment);
376 381
377 if (!reservation.IsReserved()) return NULL; 382 if (!reservation.IsReserved()) return NULL;
378 size_.Increment(static_cast<intptr_t>(reservation.size())); 383 size_.Increment(static_cast<intptr_t>(reservation.size()));
379 Address base = 384 Address base =
380 RoundUp(static_cast<Address>(reservation.address()), alignment); 385 RoundUp(static_cast<Address>(reservation.address()), alignment);
381 controller->TakeControl(&reservation); 386 controller->TakeControl(&reservation);
382 return base; 387 return base;
383 } 388 }
384 389
385
386 Address MemoryAllocator::AllocateAlignedMemory( 390 Address MemoryAllocator::AllocateAlignedMemory(
387 size_t reserve_size, size_t commit_size, size_t alignment, 391 size_t reserve_size, size_t commit_size, size_t alignment,
388 Executability executable, base::VirtualMemory* controller) { 392 Executability executable, base::VirtualMemory* controller) {
389 DCHECK(commit_size <= reserve_size); 393 DCHECK(commit_size <= reserve_size);
390 base::VirtualMemory reservation; 394 base::VirtualMemory reservation;
391 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation); 395 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
392 if (base == NULL) return NULL; 396 if (base == NULL) return NULL;
393 397
394 if (executable == EXECUTABLE) { 398 if (executable == EXECUTABLE) {
395 if (!CommitExecutableMemory(&reservation, base, commit_size, 399 if (!CommitExecutableMemory(&reservation, base, commit_size,
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after
489 493
490 if (commit_size > committed_size) { 494 if (commit_size > committed_size) {
491 // Commit size should be less or equal than the reserved size. 495 // Commit size should be less or equal than the reserved size.
492 DCHECK(commit_size <= size() - 2 * guard_size); 496 DCHECK(commit_size <= size() - 2 * guard_size);
493 // Append the committed area. 497 // Append the committed area.
494 Address start = address() + committed_size + guard_size; 498 Address start = address() + committed_size + guard_size;
495 size_t length = commit_size - committed_size; 499 size_t length = commit_size - committed_size;
496 if (reservation_.IsReserved()) { 500 if (reservation_.IsReserved()) {
497 Executability executable = 501 Executability executable =
498 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; 502 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
499 if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length, 503 if (!heap()->memory_allocator()->CommitMemory(start, length,
500 executable)) { 504 executable)) {
501 return false; 505 return false;
502 } 506 }
503 } else { 507 } else {
504 CodeRange* code_range = heap_->isolate()->code_range(); 508 CodeRange* code_range = heap_->memory_allocator()->code_range();
505 DCHECK(code_range != NULL && code_range->valid() && 509 DCHECK(code_range != NULL && code_range->valid() &&
506 IsFlagSet(IS_EXECUTABLE)); 510 IsFlagSet(IS_EXECUTABLE));
507 if (!code_range->CommitRawMemory(start, length)) return false; 511 if (!code_range->CommitRawMemory(start, length)) return false;
508 } 512 }
509 513
510 if (Heap::ShouldZapGarbage()) { 514 if (Heap::ShouldZapGarbage()) {
511 heap_->isolate()->memory_allocator()->ZapBlock(start, length); 515 heap_->memory_allocator()->ZapBlock(start, length);
512 } 516 }
513 } else if (commit_size < committed_size) { 517 } else if (commit_size < committed_size) {
514 DCHECK(commit_size > 0); 518 DCHECK(commit_size > 0);
515 // Shrink the committed area. 519 // Shrink the committed area.
516 size_t length = committed_size - commit_size; 520 size_t length = committed_size - commit_size;
517 Address start = address() + committed_size + guard_size - length; 521 Address start = address() + committed_size + guard_size - length;
518 if (reservation_.IsReserved()) { 522 if (reservation_.IsReserved()) {
519 if (!reservation_.Uncommit(start, length)) return false; 523 if (!reservation_.Uncommit(start, length)) return false;
520 } else { 524 } else {
521 CodeRange* code_range = heap_->isolate()->code_range(); 525 CodeRange* code_range = heap_->memory_allocator()->code_range();
522 DCHECK(code_range != NULL && code_range->valid() && 526 DCHECK(code_range != NULL && code_range->valid() &&
523 IsFlagSet(IS_EXECUTABLE)); 527 IsFlagSet(IS_EXECUTABLE));
524 if (!code_range->UncommitRawMemory(start, length)) return false; 528 if (!code_range->UncommitRawMemory(start, length)) return false;
525 } 529 }
526 } 530 }
527 531
528 area_end_ = area_start_ + requested; 532 area_end_ = area_start_ + requested;
529 return true; 533 return true;
530 } 534 }
531 535
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
607 } 611 }
608 612
609 // Size of header (not executable) plus area (executable). 613 // Size of header (not executable) plus area (executable).
610 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, 614 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
611 base::OS::CommitPageSize()); 615 base::OS::CommitPageSize());
612 // Allocate executable memory either from code range or from the 616 // Allocate executable memory either from code range or from the
613 // OS. 617 // OS.
614 #ifdef V8_TARGET_ARCH_MIPS64 618 #ifdef V8_TARGET_ARCH_MIPS64
615 // Use code range only for large object space on mips64 to keep address 619 // Use code range only for large object space on mips64 to keep address
616 // range within 256-MB memory region. 620 // range within 256-MB memory region.
617 if (isolate_->code_range() != NULL && isolate_->code_range()->valid() && 621 if (code_range() != NULL && code_range()->valid() &&
618 reserve_area_size > CodePageAreaSize()) { 622 reserve_area_size > CodePageAreaSize()) {
619 #else 623 #else
620 if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) { 624 if (code_range() != NULL && code_range()->valid()) {
621 #endif 625 #endif
622 base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size, 626 base =
623 &chunk_size); 627 code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size);
624 DCHECK( 628 DCHECK(
625 IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment)); 629 IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
626 if (base == NULL) return NULL; 630 if (base == NULL) return NULL;
627 size_.Increment(static_cast<intptr_t>(chunk_size)); 631 size_.Increment(static_cast<intptr_t>(chunk_size));
628 // Update executable memory size. 632 // Update executable memory size.
629 size_executable_.Increment(static_cast<intptr_t>(chunk_size)); 633 size_executable_.Increment(static_cast<intptr_t>(chunk_size));
630 } else { 634 } else {
631 base = AllocateAlignedMemory(chunk_size, commit_size, 635 base = AllocateAlignedMemory(chunk_size, commit_size,
632 MemoryChunk::kAlignment, executable, 636 MemoryChunk::kAlignment, executable,
633 &reservation); 637 &reservation);
(...skipping 395 matching lines...) Expand 10 before | Expand all | Expand 10 after
1029 1033
1030 bool PagedSpace::SetUp() { return true; } 1034 bool PagedSpace::SetUp() { return true; }
1031 1035
1032 1036
1033 bool PagedSpace::HasBeenSetUp() { return true; } 1037 bool PagedSpace::HasBeenSetUp() { return true; }
1034 1038
1035 1039
1036 void PagedSpace::TearDown() { 1040 void PagedSpace::TearDown() {
1037 PageIterator iterator(this); 1041 PageIterator iterator(this);
1038 while (iterator.has_next()) { 1042 while (iterator.has_next()) {
1039 heap()->isolate()->memory_allocator()->Free(iterator.next()); 1043 heap()->memory_allocator()->Free(iterator.next());
1040 } 1044 }
1041 anchor_.set_next_page(&anchor_); 1045 anchor_.set_next_page(&anchor_);
1042 anchor_.set_prev_page(&anchor_); 1046 anchor_.set_prev_page(&anchor_);
1043 accounting_stats_.Clear(); 1047 accounting_stats_.Clear();
1044 } 1048 }
1045 1049
1046 void PagedSpace::RefillFreeList() { 1050 void PagedSpace::RefillFreeList() {
1047 // Any PagedSpace might invoke RefillFreeList. We filter all but our old 1051 // Any PagedSpace might invoke RefillFreeList. We filter all but our old
1048 // generation spaces out. 1052 // generation spaces out.
1049 if (identity() != OLD_SPACE && identity() != CODE_SPACE && 1053 if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
1165 1169
1166 1170
1167 bool PagedSpace::Expand() { 1171 bool PagedSpace::Expand() {
1168 intptr_t size = AreaSize(); 1172 intptr_t size = AreaSize();
1169 if (snapshotable() && !HasPages()) { 1173 if (snapshotable() && !HasPages()) {
1170 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity()); 1174 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
1171 } 1175 }
1172 1176
1173 if (!CanExpand(size)) return false; 1177 if (!CanExpand(size)) return false;
1174 1178
1175 Page* p = heap()->isolate()->memory_allocator()->AllocatePage<Page>( 1179 Page* p =
1176 size, this, executable()); 1180 heap()->memory_allocator()->AllocatePage<Page>(size, this, executable());
1177 if (p == NULL) return false; 1181 if (p == NULL) return false;
1178 1182
1179 AccountCommitted(static_cast<intptr_t>(p->size())); 1183 AccountCommitted(static_cast<intptr_t>(p->size()));
1180 1184
1181 // Pages created during bootstrapping may contain immortal immovable objects. 1185 // Pages created during bootstrapping may contain immortal immovable objects.
1182 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); 1186 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
1183 1187
1184 // When incremental marking was activated, old generation pages are allocated 1188 // When incremental marking was activated, old generation pages are allocated
1185 // black. 1189 // black.
1186 if (heap()->incremental_marking()->black_allocation()) { 1190 if (heap()->incremental_marking()->black_allocation()) {
(...skipping 488 matching lines...) Expand 10 before | Expand all | Expand 10 after
1675 } 1679 }
1676 1680
1677 1681
1678 bool SemiSpace::Commit() { 1682 bool SemiSpace::Commit() {
1679 DCHECK(!is_committed()); 1683 DCHECK(!is_committed());
1680 NewSpacePage* current = anchor(); 1684 NewSpacePage* current = anchor();
1681 const int num_pages = current_capacity_ / Page::kPageSize; 1685 const int num_pages = current_capacity_ / Page::kPageSize;
1682 for (int i = 0; i < num_pages; i++) { 1686 for (int i = 0; i < num_pages; i++) {
1683 NewSpacePage* new_page = 1687 NewSpacePage* new_page =
1684 heap() 1688 heap()
1685 ->isolate()
1686 ->memory_allocator() 1689 ->memory_allocator()
1687 ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>( 1690 ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
1688 NewSpacePage::kAllocatableMemory, this, executable()); 1691 NewSpacePage::kAllocatableMemory, this, executable());
1689 new_page->InsertAfter(current); 1692 new_page->InsertAfter(current);
1690 current = new_page; 1693 current = new_page;
1691 } 1694 }
1692 Reset(); 1695 Reset();
1693 AccountCommitted(current_capacity_); 1696 AccountCommitted(current_capacity_);
1694 if (age_mark_ == nullptr) { 1697 if (age_mark_ == nullptr) {
1695 age_mark_ = first_page()->area_start(); 1698 age_mark_ = first_page()->area_start();
1696 } 1699 }
1697 committed_ = true; 1700 committed_ = true;
1698 return true; 1701 return true;
1699 } 1702 }
1700 1703
1701 1704
1702 bool SemiSpace::Uncommit() { 1705 bool SemiSpace::Uncommit() {
1703 DCHECK(is_committed()); 1706 DCHECK(is_committed());
1704 NewSpacePageIterator it(this); 1707 NewSpacePageIterator it(this);
1705 while (it.has_next()) { 1708 while (it.has_next()) {
1706 heap()->isolate()->memory_allocator()->Free<MemoryAllocator::kPooled>( 1709 heap()->memory_allocator()->Free<MemoryAllocator::kPooled>(it.next());
1707 it.next());
1708 } 1710 }
1709 anchor()->set_next_page(anchor()); 1711 anchor()->set_next_page(anchor());
1710 anchor()->set_prev_page(anchor()); 1712 anchor()->set_prev_page(anchor());
1711 AccountUncommitted(current_capacity_); 1713 AccountUncommitted(current_capacity_);
1712 committed_ = false; 1714 committed_ = false;
1713 return true; 1715 return true;
1714 } 1716 }
1715 1717
1716 1718
1717 size_t SemiSpace::CommittedPhysicalMemory() { 1719 size_t SemiSpace::CommittedPhysicalMemory() {
(...skipping 15 matching lines...) Expand all
1733 DCHECK_LE(new_capacity, maximum_capacity_); 1735 DCHECK_LE(new_capacity, maximum_capacity_);
1734 DCHECK_GT(new_capacity, current_capacity_); 1736 DCHECK_GT(new_capacity, current_capacity_);
1735 const int delta = new_capacity - current_capacity_; 1737 const int delta = new_capacity - current_capacity_;
1736 DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); 1738 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1737 int delta_pages = delta / NewSpacePage::kPageSize; 1739 int delta_pages = delta / NewSpacePage::kPageSize;
1738 NewSpacePage* last_page = anchor()->prev_page(); 1740 NewSpacePage* last_page = anchor()->prev_page();
1739 DCHECK_NE(last_page, anchor()); 1741 DCHECK_NE(last_page, anchor());
1740 while (delta_pages > 0) { 1742 while (delta_pages > 0) {
1741 NewSpacePage* new_page = 1743 NewSpacePage* new_page =
1742 heap() 1744 heap()
1743 ->isolate()
1744 ->memory_allocator() 1745 ->memory_allocator()
1745 ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>( 1746 ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
1746 NewSpacePage::kAllocatableMemory, this, executable()); 1747 NewSpacePage::kAllocatableMemory, this, executable());
1747 new_page->InsertAfter(last_page); 1748 new_page->InsertAfter(last_page);
1748 Bitmap::Clear(new_page); 1749 Bitmap::Clear(new_page);
1749 // Duplicate the flags that was set on the old page. 1750 // Duplicate the flags that was set on the old page.
1750 new_page->SetFlags(last_page->GetFlags(), 1751 new_page->SetFlags(last_page->GetFlags(),
1751 NewSpacePage::kCopyOnFlipFlagsMask); 1752 NewSpacePage::kCopyOnFlipFlagsMask);
1752 last_page = new_page; 1753 last_page = new_page;
1753 delta_pages--; 1754 delta_pages--;
(...skipping 12 matching lines...) Expand all
1766 const int delta = current_capacity_ - new_capacity; 1767 const int delta = current_capacity_ - new_capacity;
1767 DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); 1768 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1768 int delta_pages = delta / NewSpacePage::kPageSize; 1769 int delta_pages = delta / NewSpacePage::kPageSize;
1769 NewSpacePage* new_last_page; 1770 NewSpacePage* new_last_page;
1770 NewSpacePage* last_page; 1771 NewSpacePage* last_page;
1771 while (delta_pages > 0) { 1772 while (delta_pages > 0) {
1772 last_page = anchor()->prev_page(); 1773 last_page = anchor()->prev_page();
1773 new_last_page = last_page->prev_page(); 1774 new_last_page = last_page->prev_page();
1774 new_last_page->set_next_page(anchor()); 1775 new_last_page->set_next_page(anchor());
1775 anchor()->set_prev_page(new_last_page); 1776 anchor()->set_prev_page(new_last_page);
1776 heap()->isolate()->memory_allocator()->Free<MemoryAllocator::kPooled>( 1777 heap()->memory_allocator()->Free<MemoryAllocator::kPooled>(last_page);
1777 last_page);
1778 delta_pages--; 1778 delta_pages--;
1779 } 1779 }
1780 AccountUncommitted(static_cast<intptr_t>(delta)); 1780 AccountUncommitted(static_cast<intptr_t>(delta));
1781 } 1781 }
1782 current_capacity_ = new_capacity; 1782 current_capacity_ = new_capacity;
1783 return true; 1783 return true;
1784 } 1784 }
1785 1785
1786 void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) { 1786 void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
1787 anchor_.set_owner(this); 1787 anchor_.set_owner(this);
(...skipping 1065 matching lines...) Expand 10 before | Expand all | Expand 10 after
2853 } 2853 }
2854 2854
2855 2855
2856 void LargeObjectSpace::TearDown() { 2856 void LargeObjectSpace::TearDown() {
2857 while (first_page_ != NULL) { 2857 while (first_page_ != NULL) {
2858 LargePage* page = first_page_; 2858 LargePage* page = first_page_;
2859 first_page_ = first_page_->next_page(); 2859 first_page_ = first_page_->next_page();
2860 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address())); 2860 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2861 2861
2862 ObjectSpace space = static_cast<ObjectSpace>(1 << identity()); 2862 ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2863 heap()->isolate()->memory_allocator()->PerformAllocationCallback( 2863 heap()->memory_allocator()->PerformAllocationCallback(
2864 space, kAllocationActionFree, page->size()); 2864 space, kAllocationActionFree, page->size());
2865 heap()->isolate()->memory_allocator()->Free(page); 2865 heap()->memory_allocator()->Free(page);
2866 } 2866 }
2867 SetUp(); 2867 SetUp();
2868 } 2868 }
2869 2869
2870 2870
2871 AllocationResult LargeObjectSpace::AllocateRaw(int object_size, 2871 AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
2872 Executability executable) { 2872 Executability executable) {
2873 // Check if we want to force a GC before growing the old space further. 2873 // Check if we want to force a GC before growing the old space further.
2874 // If so, fail the allocation. 2874 // If so, fail the allocation.
2875 if (!heap()->CanExpandOldGeneration(object_size)) { 2875 if (!heap()->CanExpandOldGeneration(object_size)) {
2876 return AllocationResult::Retry(identity()); 2876 return AllocationResult::Retry(identity());
2877 } 2877 }
2878 2878
2879 LargePage* page = 2879 LargePage* page = heap()->memory_allocator()->AllocatePage<LargePage>(
2880 heap()->isolate()->memory_allocator()->AllocatePage<LargePage>( 2880 object_size, this, executable);
2881 object_size, this, executable);
2882 if (page == NULL) return AllocationResult::Retry(identity()); 2881 if (page == NULL) return AllocationResult::Retry(identity());
2883 DCHECK(page->area_size() >= object_size); 2882 DCHECK(page->area_size() >= object_size);
2884 2883
2885 size_ += static_cast<int>(page->size()); 2884 size_ += static_cast<int>(page->size());
2886 AccountCommitted(static_cast<intptr_t>(page->size())); 2885 AccountCommitted(static_cast<intptr_t>(page->size()));
2887 objects_size_ += object_size; 2886 objects_size_ += object_size;
2888 page_count_++; 2887 page_count_++;
2889 page->set_next_page(first_page_); 2888 page->set_next_page(first_page_);
2890 first_page_ = page; 2889 first_page_ = page;
2891 2890
(...skipping 243 matching lines...) Expand 10 before | Expand all | Expand 10 after
3135 object->ShortPrint(); 3134 object->ShortPrint();
3136 PrintF("\n"); 3135 PrintF("\n");
3137 } 3136 }
3138 printf(" --------------------------------------\n"); 3137 printf(" --------------------------------------\n");
3139 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3138 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3140 } 3139 }
3141 3140
3142 #endif // DEBUG 3141 #endif // DEBUG
3143 } // namespace internal 3142 } // namespace internal
3144 } // namespace v8 3143 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/spaces.h ('k') | src/heap/spaces-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698