| OLD | NEW |
| 1 // Copyright 2006-2010 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. | 11 // with the distribution. |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 60 // buffer overflow) iterate intergenerational pointers without decoding heap | 60 // buffer overflow) iterate intergenerational pointers without decoding heap |
| 61 // object maps so if the page belongs to old pointer space or large object | 61 // object maps so if the page belongs to old pointer space or large object |
| 62 // space it is essential to guarantee that the page does not contain any | 62 // space it is essential to guarantee that the page does not contain any |
| 63 // garbage pointers to new space: every pointer aligned word which satisfies | 63 // garbage pointers to new space: every pointer aligned word which satisfies |
| 64 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in | 64 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in |
| 65 // new space. Thus objects in old pointer and large object spaces should have a | 65 // new space. Thus objects in old pointer and large object spaces should have a |
| 66 // special layout (e.g. no bare integer fields). This requirement does not | 66 // special layout (e.g. no bare integer fields). This requirement does not |
| 67 // apply to map space which is iterated in a special fashion. However we still | 67 // apply to map space which is iterated in a special fashion. However we still |
| 68 // require pointer fields of dead maps to be cleaned. | 68 // require pointer fields of dead maps to be cleaned. |
| 69 // | 69 // |
| 70 // To enable lazy cleaning of old space pages we use a notion of allocation | 70 // To enable lazy cleaning of old space pages we can mark chunks of the page |
| 71 // watermark. Every pointer under watermark is considered to be well formed. | 71 // as being garbage. Garbage sections are marked with a special map. These |
| 72 // Page allocation watermark is not necessarily equal to page allocation top but | 72 // sections are skipped when scanning the page, even if we are otherwise |
| 73 // all alive objects on page should reside under allocation watermark. | 73 // scanning without regard for object boundaries. Garbage sections are chained |
| 74 // During scavenge allocation watermark might be bumped and invalid pointers | 74 // together to form a free list after a GC. Garbage sections created outside |
| 75 // might appear below it. To avoid following them we store a valid watermark | 75 // of GCs by object trunctation etc. may not be in the free list chain. Very |
| 76 // into special field in the page header and set a page WATERMARK_INVALIDATED | 76 // small free spaces are ignored, they need only be cleaned of bogus pointers |
| 77 // flag. For details see comments in the Page::SetAllocationWatermark() method | 77 // into new space. |
| 78 // body. | |
| 79 // | 78 // |
| 79 // Each page may have up to one special garbage section. The start of this |
| 80 // section is denoted by the top field in the space. The end of the section |
| 81 // is denoted by the limit field in the space. This special garbage section |
| 82 // is not marked with a free space map in the data. The point of this section |
| 83 // is to enable linear allocation without having to constantly update the byte |
| 84 // array every time the top field is updated and a new object is created. The |
| 85 // special garbage section is not in the chain of garbage sections. |
| 86 // |
| 87 // Since the top and limit fields are in the space, not the page, only one page |
| 88 // has a special garbage section, and if the top and limit are equal then there |
| 89 // is no special garbage section. |
| 80 | 90 |
| 81 // Some assertion macros used in the debugging mode. | 91 // Some assertion macros used in the debugging mode. |
| 82 | 92 |
| 83 #define ASSERT_PAGE_ALIGNED(address) \ | 93 #define ASSERT_PAGE_ALIGNED(address) \ |
| 84 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) | 94 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) |
| 85 | 95 |
| 86 #define ASSERT_OBJECT_ALIGNED(address) \ | 96 #define ASSERT_OBJECT_ALIGNED(address) \ |
| 87 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0) | 97 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0) |
| 88 | 98 |
| 89 #define ASSERT_MAP_ALIGNED(address) \ | 99 #define ASSERT_MAP_ALIGNED(address) \ |
| 90 ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0) | 100 ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0) |
| 91 | 101 |
| 92 #define ASSERT_OBJECT_SIZE(size) \ | 102 #define ASSERT_OBJECT_SIZE(size) \ |
| 93 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize)) | 103 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize)) |
| 94 | 104 |
| 95 #define ASSERT_PAGE_OFFSET(offset) \ | 105 #define ASSERT_PAGE_OFFSET(offset) \ |
| 96 ASSERT((Page::kObjectStartOffset <= offset) \ | 106 ASSERT((Page::kObjectStartOffset <= offset) \ |
| 97 && (offset <= Page::kPageSize)) | 107 && (offset <= Page::kPageSize)) |
| 98 | 108 |
| 99 #define ASSERT_MAP_PAGE_INDEX(index) \ | 109 #define ASSERT_MAP_PAGE_INDEX(index) \ |
| 100 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) | 110 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) |
| 101 | 111 |
| 102 | 112 |
| 103 class PagedSpace; | 113 class PagedSpace; |
| 104 class MemoryAllocator; | 114 class MemoryAllocator; |
| 105 class AllocationInfo; | 115 class AllocationInfo; |
| 106 class Space; | 116 class Space; |
| 117 class OldSpaceFreeList; |
| 107 | 118 |
| 108 | 119 |
| 109 // Bitmap is a sequence of cells each containing fixed number of bits. | 120 // Bitmap is a sequence of cells each containing fixed number of bits. |
| 110 template<typename StorageDescriptor> | 121 template<typename StorageDescriptor> |
| 111 class Bitmap { | 122 class Bitmap { |
| 112 public: | 123 public: |
| 113 typedef uint32_t CellType; | 124 typedef uint32_t CellType; |
| 114 static const uint32_t kBitsPerCell = 32; | 125 static const uint32_t kBitsPerCell = 32; |
| 115 static const uint32_t kBitsPerCellLog2 = 5; | 126 static const uint32_t kBitsPerCellLog2 = 5; |
| 116 static const uint32_t kBitIndexMask = kBitsPerCell - 1; | 127 static const uint32_t kBitIndexMask = kBitsPerCell - 1; |
| (...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 289 public: | 300 public: |
| 290 static MemoryChunk* FromAddress(Address a) { | 301 static MemoryChunk* FromAddress(Address a) { |
| 291 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); | 302 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); |
| 292 } | 303 } |
| 293 | 304 |
| 294 Address address() { return reinterpret_cast<Address>(this); } | 305 Address address() { return reinterpret_cast<Address>(this); } |
| 295 | 306 |
| 296 bool is_valid() { return address() != NULL; } | 307 bool is_valid() { return address() != NULL; } |
| 297 | 308 |
| 298 MemoryChunk* next_chunk() const { return next_chunk_; } | 309 MemoryChunk* next_chunk() const { return next_chunk_; } |
| 310 MemoryChunk* prev_chunk() const { return prev_chunk_; } |
| 299 | 311 |
| 300 void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; } | 312 void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; } |
| 313 void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; } |
| 301 | 314 |
| 302 Space* owner() const { return owner_; } | 315 Space* owner() const { return owner_; } |
| 303 | 316 |
| 304 Address body() { return address() + kBodyOffset; } | 317 Address body() { return address() + kObjectStartOffset; } |
| 305 | 318 |
| 306 int body_size() { return size() - kBodyOffset; } | 319 int body_size() { return size() - kObjectStartOffset; } |
| 307 | 320 |
| 308 enum MemoryChunkFlags { | 321 enum MemoryChunkFlags { |
| 309 IS_EXECUTABLE, | 322 IS_EXECUTABLE, |
| 323 WAS_SWEPT_CONSERVATIVELY, |
| 310 NUM_MEMORY_CHUNK_FLAGS | 324 NUM_MEMORY_CHUNK_FLAGS |
| 311 }; | 325 }; |
| 312 | 326 |
| 313 void SetFlag(int flag) { | 327 void SetFlag(int flag) { |
| 314 flags_ |= 1 << flag; | 328 flags_ |= 1 << flag; |
| 315 } | 329 } |
| 316 | 330 |
| 317 void ClearFlag(int flag) { | 331 void ClearFlag(int flag) { |
| 318 flags_ &= ~(1 << flag); | 332 flags_ &= ~(1 << flag); |
| 319 } | 333 } |
| (...skipping 11 matching lines...) Expand all Loading... |
| 331 | 345 |
| 332 static const size_t kMarksBitmapLength = | 346 static const size_t kMarksBitmapLength = |
| 333 (1 << kPageSizeBits) >> (kPointerSizeLog2); | 347 (1 << kPageSizeBits) >> (kPointerSizeLog2); |
| 334 | 348 |
| 335 static const size_t kMarksBitmapSize = | 349 static const size_t kMarksBitmapSize = |
| 336 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2); | 350 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2); |
| 337 | 351 |
| 338 static const int kBodyOffset = | 352 static const int kBodyOffset = |
| 339 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + kMarksBitmapSize)); | 353 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + kMarksBitmapSize)); |
| 340 | 354 |
| 355 // The start offset of the object area in a page. Aligned to both maps and |
| 356 // code alignment to be suitable for both. Also aligned to 32 words because |
| 357 // the marking bitmap is arranged in 32 bit chunks. |
| 358 static const int kObjectStartAlignment = 32 * kPointerSize; |
| 359 static const int kObjectStartOffset = kBodyOffset - 1 + |
| 360 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); |
| 361 |
| 341 size_t size() const { return size_; } | 362 size_t size() const { return size_; } |
| 342 | 363 |
| 343 Executability executable() { | 364 Executability executable() { |
| 344 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; | 365 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
| 345 } | 366 } |
| 346 | 367 |
| 347 // --------------------------------------------------------------------- | 368 // --------------------------------------------------------------------- |
| 348 // Markbits support | 369 // Markbits support |
| 349 | 370 |
| 350 class BitmapStorageDescriptor { | 371 class BitmapStorageDescriptor { |
| (...skipping 20 matching lines...) Expand all Loading... |
| 371 const intptr_t offset = | 392 const intptr_t offset = |
| 372 reinterpret_cast<intptr_t>(addr) & kAlignmentMask; | 393 reinterpret_cast<intptr_t>(addr) & kAlignmentMask; |
| 373 | 394 |
| 374 return static_cast<uint32_t>(offset) >> kPointerSizeLog2; | 395 return static_cast<uint32_t>(offset) >> kPointerSizeLog2; |
| 375 } | 396 } |
| 376 | 397 |
| 377 inline Address MarkbitIndexToAddress(uint32_t index) { | 398 inline Address MarkbitIndexToAddress(uint32_t index) { |
| 378 return this->address() + (index << kPointerSizeLog2); | 399 return this->address() + (index << kPointerSizeLog2); |
| 379 } | 400 } |
| 380 | 401 |
| 402 void InsertAfter(MemoryChunk* other); |
| 403 void Unlink(); |
| 404 |
| 381 protected: | 405 protected: |
| 382 MemoryChunk* next_chunk_; | 406 MemoryChunk* next_chunk_; |
| 407 MemoryChunk* prev_chunk_; |
| 383 size_t size_; | 408 size_t size_; |
| 384 intptr_t flags_; | 409 intptr_t flags_; |
| 385 Space* owner_; | 410 Space* owner_; |
| 386 | 411 |
| 387 private: | |
| 388 static MemoryChunk* Initialize(Address base, | 412 static MemoryChunk* Initialize(Address base, |
| 389 size_t size, | 413 size_t size, |
| 390 Executability executable, | 414 Executability executable, |
| 391 Space* owner) { | 415 Space* owner) { |
| 392 MemoryChunk* chunk = FromAddress(base); | 416 MemoryChunk* chunk = FromAddress(base); |
| 393 | 417 |
| 394 ASSERT(base == chunk->address()); | 418 ASSERT(base == chunk->address()); |
| 395 | 419 |
| 396 chunk->next_chunk_ = NULL; | |
| 397 chunk->size_ = size; | 420 chunk->size_ = size; |
| 398 chunk->flags_ = 0; | 421 chunk->flags_ = 0; |
| 399 chunk->owner_ = owner; | 422 chunk->owner_ = owner; |
| 400 chunk->markbits()->Clear(); | 423 chunk->markbits()->Clear(); |
| 401 | 424 |
| 402 if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE); | 425 if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE); |
| 403 | 426 |
| 404 return chunk; | 427 return chunk; |
| 405 } | 428 } |
| 406 | 429 |
| 407 friend class MemoryAllocator; | 430 friend class MemoryAllocator; |
| 408 }; | 431 }; |
| 409 | 432 |
| 410 STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); | 433 STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); |
| 411 | 434 |
| 412 // ----------------------------------------------------------------------------- | 435 // ----------------------------------------------------------------------------- |
| 413 // A page is a memory chunk of a size 1MB. Large object pages may be larger. | 436 // A page is a memory chunk of a size 1MB. Large object pages may be larger. |
| 414 // | 437 // |
| 415 // The only way to get a page pointer is by calling factory methods: | 438 // The only way to get a page pointer is by calling factory methods: |
| 416 // Page* p = Page::FromAddress(addr); or | 439 // Page* p = Page::FromAddress(addr); or |
| 417 // Page* p = Page::FromAllocationTop(top); | 440 // Page* p = Page::FromAllocationTop(top); |
| 418 class Page : public MemoryChunk { | 441 class Page : public MemoryChunk { |
| 419 public: | 442 public: |
| 420 // Returns the page containing a given address. The address ranges | 443 // Returns the page containing a given address. The address ranges |
| 421 // from [page_addr .. page_addr + kPageSize[ | 444 // from [page_addr .. page_addr + kPageSize[ |
| 422 // | 445 // |
| 423 // Note that this function only works for addresses in normal paged | 446 // Note that this function only works for addresses in normal paged |
| 424 // spaces and addresses in the first 8K of large object pages (i.e., | 447 // spaces and addresses in the first 1Mbyte of large object pages (i.e., |
| 425 // the start of large objects but not necessarily derived pointers | 448 // the start of large objects but not necessarily derived pointers |
| 426 // within them). | 449 // within them). |
| 427 INLINE(static Page* FromAddress(Address a)) { | 450 INLINE(static Page* FromAddress(Address a)) { |
| 428 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask); | 451 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask); |
| 429 } | 452 } |
| 430 | 453 |
| 431 // Returns the page containing an allocation top. Because an allocation | 454 // Returns the page containing an allocation top. Because an allocation |
| 432 // top address can be the upper bound of the page, we need to subtract | 455 // top address can be the upper bound of the page, we need to subtract |
| 433 // it with kPointerSize first. The address ranges from | 456 // it with kPointerSize first. The address ranges from |
| 434 // [page_addr + kObjectStartOffset .. page_addr + kPageSize]. | 457 // [page_addr + kObjectStartOffset .. page_addr + kPageSize]. |
| 435 INLINE(static Page* FromAllocationTop(Address top)) { | 458 INLINE(static Page* FromAllocationTop(Address top)) { |
| 436 Page* p = FromAddress(top - kPointerSize); | 459 Page* p = FromAddress(top - kPointerSize); |
| 437 ASSERT_PAGE_OFFSET(p->Offset(top)); | 460 ASSERT_PAGE_OFFSET(p->Offset(top)); |
| 438 return p; | 461 return p; |
| 439 } | 462 } |
| 440 | 463 |
| 441 // Returns the next page in the chain of pages owned by a space. | 464 // Returns the next page in the chain of pages owned by a space. |
| 442 inline Page* next_page() { | 465 inline Page* next_page(); |
| 443 ASSERT(!next_chunk()->is_valid() || next_chunk()->owner() == owner()); | 466 inline Page* prev_page(); |
| 444 return static_cast<Page*>(next_chunk()); | 467 inline void set_next_page(Page* page); |
| 445 } | 468 inline void set_prev_page(Page* page); |
| 446 | 469 |
| 447 inline void set_next_page(Page* page) { | 470 PagedSpace* owner() const { return reinterpret_cast<PagedSpace*>(owner_); } |
| 448 ASSERT(!page->is_valid() || page->owner() == owner()); | |
| 449 set_next_chunk(page); | |
| 450 } | |
| 451 | |
| 452 // Return the end of allocation in this page. Undefined for unused pages. | |
| 453 inline Address AllocationTop(); | |
| 454 | |
| 455 // Return the allocation watermark for the page. | |
| 456 // For old space pages it is guaranteed that the area under the watermark | |
| 457 // does not contain any garbage pointers to new space. | |
| 458 inline Address AllocationWatermark(); | |
| 459 | |
| 460 // Return the allocation watermark offset from the beginning of the page. | |
| 461 inline uint32_t AllocationWatermarkOffset(); | |
| 462 | |
| 463 inline void SetAllocationWatermark(Address allocation_watermark); | |
| 464 | |
| 465 inline void SetCachedAllocationWatermark(Address allocation_watermark); | |
| 466 inline Address CachedAllocationWatermark(); | |
| 467 | 471 |
| 468 // Returns the start address of the object area in this page. | 472 // Returns the start address of the object area in this page. |
| 469 Address ObjectAreaStart() { return address() + kObjectStartOffset; } | 473 Address ObjectAreaStart() { return address() + kObjectStartOffset; } |
| 470 | 474 |
| 471 // Returns the end address (exclusive) of the object area in this page. | 475 // Returns the end address (exclusive) of the object area in this page. |
| 472 Address ObjectAreaEnd() { return address() + Page::kPageSize; } | 476 Address ObjectAreaEnd() { return address() + Page::kPageSize; } |
| 473 | 477 |
| 474 // Checks whether an address is page aligned. | 478 // Checks whether an address is page aligned. |
| 475 static bool IsAlignedToPageSize(Address a) { | 479 static bool IsAlignedToPageSize(Address a) { |
| 476 return 0 == (OffsetFrom(a) & kPageAlignmentMask); | 480 return 0 == (OffsetFrom(a) & kPageAlignmentMask); |
| (...skipping 13 matching lines...) Expand all Loading... |
| 490 } | 494 } |
| 491 | 495 |
| 492 // --------------------------------------------------------------------- | 496 // --------------------------------------------------------------------- |
| 493 | 497 |
| 494 // Page size in bytes. This must be a multiple of the OS page size. | 498 // Page size in bytes. This must be a multiple of the OS page size. |
| 495 static const int kPageSize = 1 << kPageSizeBits; | 499 static const int kPageSize = 1 << kPageSizeBits; |
| 496 | 500 |
| 497 // Page size mask. | 501 // Page size mask. |
| 498 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; | 502 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; |
| 499 | 503 |
| 500 // The start offset of the object area in a page. Aligned to both maps and | |
| 501 // code alignment to be suitable for both. | |
| 502 static const int kObjectStartOffset = kBodyOffset; | |
| 503 | |
| 504 // Object area size in bytes. | 504 // Object area size in bytes. |
| 505 static const int kObjectAreaSize = kPageSize - kObjectStartOffset; | 505 static const int kObjectAreaSize = kPageSize - kObjectStartOffset; |
| 506 | 506 |
| 507 // Maximum object size that fits in a page. | 507 // Maximum object size that fits in a page. |
| 508 static const int kMaxHeapObjectSize = kObjectAreaSize; | 508 static const int kMaxHeapObjectSize = kObjectAreaSize; |
| 509 | 509 |
| 510 static const int kFirstUsedCell = | 510 static const int kFirstUsedCell = |
| 511 (kBodyOffset/kPointerSize) >> MarkbitsBitmap::kBitsPerCellLog2; | 511 (kObjectStartOffset/kPointerSize) >> MarkbitsBitmap::kBitsPerCellLog2; |
| 512 | 512 |
| 513 static const int kLastUsedCell = | 513 static const int kLastUsedCell = |
| 514 ((kPageSize - kPointerSize)/kPointerSize) >> | 514 ((kPageSize - kPointerSize)/kPointerSize) >> |
| 515 MarkbitsBitmap::kBitsPerCellLog2; | 515 MarkbitsBitmap::kBitsPerCellLog2; |
| 516 | 516 |
| 517 | |
| 518 enum PageFlag { | |
| 519 // Page allocation watermark was bumped by preallocation during scavenge. | |
| 520 // Correct watermark can be retrieved by CachedAllocationWatermark() method | |
| 521 WATERMARK_INVALIDATED = NUM_MEMORY_CHUNK_FLAGS, | |
| 522 | |
| 523 // We say that memory region [start_addr, end_addr[ is continuous if | |
| 524 // and only if: | |
| 525 // a) start_addr coincides with the start of a valid heap object | |
| 526 // b) for any valid heap object o in this region address | |
| 527 // o->address() + o->Size() is either equal to end_addr or coincides | |
| 528 // with the start of a valid heap object. | |
| 529 // We say that a page is continuous if and only if the region | |
| 530 // [page->ObjectAreaStart(), page->AllocationTop()[ is continuous. | |
| 531 // For non-continuous pages we say that address lb is a linearity boundary | |
| 532 // if and only if [lb, page->AllocationTop()[ is either empty or continuous. | |
| 533 IS_CONTINUOUS, | |
| 534 | |
| 535 NUM_PAGE_FLAGS // Must be last | |
| 536 }; | |
| 537 | |
| 538 // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during | |
| 539 // scavenge we just invalidate the watermark on each old space page after | |
| 540 // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED | |
| 541 // flag at the beginning of the next scavenge and each page becomes marked as | |
| 542 // having a valid watermark. | |
| 543 // | |
| 544 // The following invariant must hold for pages in old pointer and map spaces: | |
| 545 // If page is in use then page is marked as having invalid watermark at | |
| 546 // the beginning and at the end of any GC. | |
| 547 // | |
| 548 // This invariant guarantees that after flipping flag meaning at the | |
| 549 // beginning of scavenge all pages in use will be marked as having valid | |
| 550 // watermark. | |
| 551 static inline void FlipMeaningOfInvalidatedWatermarkFlag(); | |
| 552 | |
| 553 // Returns true if the page allocation watermark was not altered during | |
| 554 // scavenge. | |
| 555 inline bool IsWatermarkValid(); | |
| 556 | |
| 557 inline void InvalidateWatermark(bool value); | |
| 558 | |
| 559 inline void ClearGCFields(); | 517 inline void ClearGCFields(); |
| 560 | 518 |
| 561 static const int kAllocationWatermarkOffsetShift = NUM_PAGE_FLAGS; | 519 static inline Page* Initialize(MemoryChunk* chunk, |
| 562 static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1; | 520 Executability executable, |
| 563 static const uint32_t kAllocationWatermarkOffsetMask = | 521 PagedSpace* owner); |
| 564 ((1 << kAllocationWatermarkOffsetBits) - 1) << | |
| 565 kAllocationWatermarkOffsetShift; | |
| 566 | 522 |
| 567 static const uint32_t kFlagsMask = | 523 void InitializeAsAnchor(PagedSpace* owner); |
| 568 ((1 << kAllocationWatermarkOffsetShift) - 1); | |
| 569 | |
| 570 STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >= | |
| 571 kAllocationWatermarkOffsetBits); | |
| 572 | |
| 573 // This field contains the meaning of the WATERMARK_INVALIDATED flag. | |
| 574 // Instead of clearing this flag from all pages we just flip | |
| 575 // its meaning at the beginning of a scavenge. | |
| 576 static intptr_t watermark_invalidated_mark_; | |
| 577 | |
| 578 // See comments for IS_CONTINUOUS flag. | |
| 579 Address linearity_boundary() { return linearity_boundary_; } | |
| 580 void set_linearity_boundary(Address linearity_boundary) { | |
| 581 linearity_boundary_ = linearity_boundary; | |
| 582 } | |
| 583 | |
| 584 private: | |
| 585 static Page* Initialize(MemoryChunk* chunk) { | |
| 586 Page* page = static_cast<Page*>(chunk); | |
| 587 page->allocation_watermark_ = page->body(); | |
| 588 page->InvalidateWatermark(true); | |
| 589 page->SetFlag(IS_CONTINUOUS); | |
| 590 return page; | |
| 591 } | |
| 592 | |
| 593 Address allocation_watermark_; | |
| 594 | |
| 595 // See comments for IS_CONTINUOUS flag. | |
| 596 Address linearity_boundary_; | |
| 597 | 524 |
| 598 friend class MemoryAllocator; | 525 friend class MemoryAllocator; |
| 599 }; | 526 }; |
| 600 | 527 |
| 528 |
| 601 STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize); | 529 STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize); |
| 602 | 530 |
| 531 |
| 603 class LargePage : public MemoryChunk { | 532 class LargePage : public MemoryChunk { |
| 604 public: | 533 public: |
| 605 HeapObject* GetObject() { | 534 HeapObject* GetObject() { |
| 606 return HeapObject::FromAddress(body()); | 535 return HeapObject::FromAddress(body()); |
| 607 } | 536 } |
| 608 | 537 |
| 609 inline LargePage* next_page() const { | 538 inline LargePage* next_page() const { |
| 610 return static_cast<LargePage*>(next_chunk()); | 539 return static_cast<LargePage*>(next_chunk()); |
| 611 } | 540 } |
| 612 | 541 |
| (...skipping 284 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 897 virtual ~ObjectIterator() { } | 826 virtual ~ObjectIterator() { } |
| 898 | 827 |
| 899 virtual HeapObject* next_object() = 0; | 828 virtual HeapObject* next_object() = 0; |
| 900 }; | 829 }; |
| 901 | 830 |
| 902 | 831 |
| 903 // ----------------------------------------------------------------------------- | 832 // ----------------------------------------------------------------------------- |
| 904 // Heap object iterator in new/old/map spaces. | 833 // Heap object iterator in new/old/map spaces. |
| 905 // | 834 // |
| 906 // A HeapObjectIterator iterates objects from the bottom of the given space | 835 // A HeapObjectIterator iterates objects from the bottom of the given space |
| 907 // to it's top or from the bottom of the given page to it's top. | 836 // to its top or from the bottom of the given page to its top. |
| 908 // | 837 // |
| 909 // There are some caveats. | 838 // If objects are allocated in the page during iteration the iterator may |
| 910 // | 839 // or may not iterate over those objects. The caller must create a new |
| 911 // (1) If the space top changes upward during iteration (because of | 840 // iterator in order to be sure to visit these new objects. |
| 912 // allocating new objects), the iterator does not iterate objects | |
| 913 // above the original space top. The caller must create a new | |
| 914 // iterator starting from the old top in order to visit these new | |
| 915 // objects. | |
| 916 // | |
| 917 // (2) If new objects are allocated below the original allocation top | |
| 918 // (e.g., free-list allocation in paged spaces), the new objects | |
| 919 // may or may not be iterated depending on their position with | |
| 920 // respect to the current point of iteration. | |
| 921 // | |
| 922 // (3) The space top should not change downward during iteration, | |
| 923 // otherwise the iterator will return not-necessarily-valid | |
| 924 // objects. | |
| 925 | |
| 926 class HeapObjectIterator: public ObjectIterator { | 841 class HeapObjectIterator: public ObjectIterator { |
| 927 public: | 842 public: |
| 928 // Creates a new object iterator in a given space. | 843 // Creates a new object iterator in a given space. |
| 929 // If the size function is not given, the iterator calls the default | 844 // If the size function is not given, the iterator calls the default |
| 930 // Object::Size(). | 845 // Object::Size(). |
| 931 explicit HeapObjectIterator(PagedSpace* space); | 846 explicit HeapObjectIterator(PagedSpace* space); |
| 932 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func); | 847 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func); |
| 933 HeapObjectIterator(Page* page, HeapObjectCallback size_func); | 848 HeapObjectIterator(Page* page, HeapObjectCallback size_func); |
| 934 | 849 |
| 935 inline HeapObject* next() { | 850 // Advance to the next object, skipping free spaces and other fillers and |
| 936 return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage(); | 851 // skipping the special garbage section of which there is one per space. |
| 852 // Returns NULL when the iteration has ended. |
| 853 inline HeapObject* Next() { |
| 854 do { |
| 855 HeapObject* next_obj = FromCurrentPage(); |
| 856 if (next_obj != NULL) return next_obj; |
| 857 } while (AdvanceToNextPage()); |
| 858 return NULL; |
| 937 } | 859 } |
| 938 | 860 |
| 939 // implementation of ObjectIterator. | 861 virtual HeapObject* next_object() { |
| 940 virtual HeapObject* next_object() { return next(); } | 862 return Next(); |
| 863 } |
| 941 | 864 |
| 942 private: | 865 private: |
| 943 Address cur_addr_; // current iteration point | 866 enum PageMode { kOnePageOnly, kAllPagesInSpace }; |
| 944 Address end_addr_; // end iteration point | |
| 945 Address cur_limit_; // current page limit | |
| 946 HeapObjectCallback size_func_; // size function | |
| 947 Page* end_page_; // caches the page of the end address | |
| 948 | 867 |
| 949 HeapObject* FromCurrentPage() { | 868 Address cur_addr_; // Current iteration point. |
| 950 ASSERT(cur_addr_ < cur_limit_); | 869 Address cur_end_; // End iteration point. |
| 951 HeapObject* obj = HeapObject::FromAddress(cur_addr_); | 870 HeapObjectCallback size_func_; // Size function or NULL. |
| 871 PagedSpace* space_; |
| 872 PageMode page_mode_; |
| 952 | 873 |
| 953 Page* p = Page::FromAddress(cur_addr_); | 874 // Fast (inlined) path of next(). |
| 954 if (p->IsFlagSet(Page::IS_CONTINUOUS)) { | 875 inline HeapObject* FromCurrentPage(); |
| 955 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj); | |
| 956 ASSERT_OBJECT_SIZE(obj_size); | |
| 957 | 876 |
| 958 cur_addr_ += obj_size; | 877 // Slow path of next(), goes into the next page. Returns false if the |
| 959 ASSERT(cur_addr_ <= cur_limit_); | 878 // iteration has ended. |
| 960 } else { | 879 bool AdvanceToNextPage(); |
| 961 AdvanceUsingMarkbits(); | |
| 962 } | |
| 963 | |
| 964 return obj; | |
| 965 } | |
| 966 | |
| 967 void AdvanceUsingMarkbits(); | |
| 968 | |
| 969 // Slow path of next, goes into the next page. | |
| 970 HeapObject* FromNextPage(); | |
| 971 | 880 |
| 972 // Initializes fields. | 881 // Initializes fields. |
| 973 void Initialize(Address start, Address end, HeapObjectCallback size_func); | 882 inline void Initialize(PagedSpace* owner, |
| 883 Address start, |
| 884 Address end, |
| 885 PageMode mode, |
| 886 HeapObjectCallback size_func); |
| 974 | 887 |
| 975 #ifdef DEBUG | 888 #ifdef DEBUG |
| 976 // Verifies whether fields have valid values. | 889 // Verifies whether fields have valid values. |
| 977 void Verify(); | 890 void Verify(); |
| 978 #endif | 891 #endif |
| 979 }; | 892 }; |
| 980 | 893 |
| 981 | 894 |
| 982 // ----------------------------------------------------------------------------- | 895 // ----------------------------------------------------------------------------- |
| 983 // A PageIterator iterates the pages in a paged space. | 896 // A PageIterator iterates the pages in a paged space. |
| 984 // | |
| 985 // The PageIterator class provides three modes for iterating pages in a space: | |
| 986 // PAGES_IN_USE iterates pages containing allocated objects. | |
| 987 // PAGES_USED_BY_MC iterates pages that hold relocated objects during a | |
| 988 // mark-compact collection. | |
| 989 // ALL_PAGES iterates all pages in the space. | |
| 990 // | |
| 991 // There are some caveats. | |
| 992 // | |
| 993 // (1) If the space expands during iteration, new pages will not be | |
| 994 // returned by the iterator in any mode. | |
| 995 // | |
| 996 // (2) If new objects are allocated during iteration, they will appear | |
| 997 // in pages returned by the iterator. Allocation may cause the | |
| 998 // allocation pointer or MC allocation pointer in the last page to | |
| 999 // change between constructing the iterator and iterating the last | |
| 1000 // page. | |
| 1001 // | |
| 1002 // (3) The space should not shrink during iteration, otherwise the | |
| 1003 // iterator will return deallocated pages. | |
| 1004 | 897 |
| 1005 class PageIterator BASE_EMBEDDED { | 898 class PageIterator BASE_EMBEDDED { |
| 1006 public: | 899 public: |
| 1007 enum Mode { | 900 explicit inline PageIterator(PagedSpace* space); |
| 1008 PAGES_IN_USE, | |
| 1009 ALL_PAGES | |
| 1010 }; | |
| 1011 | |
| 1012 PageIterator(PagedSpace* space, Mode mode); | |
| 1013 | 901 |
| 1014 inline bool has_next(); | 902 inline bool has_next(); |
| 1015 inline Page* next(); | 903 inline Page* next(); |
| 1016 | 904 |
| 1017 private: | 905 private: |
| 1018 PagedSpace* space_; | 906 PagedSpace* space_; |
| 1019 Page* prev_page_; // Previous page returned. | 907 Page* prev_page_; // Previous page returned. |
| 1020 Page* stop_page_; // Page to stop at (last page returned by the iterator). | 908 // Next page that will be returned. Cached here so that we can use this |
| 909 // iterator for operations that deallocate pages. |
| 910 Page* next_page_; |
| 1021 }; | 911 }; |
| 1022 | 912 |
| 1023 | 913 |
| 1024 // ----------------------------------------------------------------------------- | 914 // ----------------------------------------------------------------------------- |
| 1025 // A space has a list of pages. The next page can be accessed via | 915 // A space has a circular list of pages. The next page can be accessed via |
| 1026 // Page::next_page() call. The next page of the last page is an | 916 // Page::next_page() call. |
| 1027 // invalid page pointer. A space can expand and shrink dynamically. | |
| 1028 | 917 |
| 1029 // An abstraction of allocation and relocation pointers in a page-structured | 918 // An abstraction of allocation and relocation pointers in a page-structured |
| 1030 // space. | 919 // space. |
| 1031 class AllocationInfo { | 920 class AllocationInfo { |
| 1032 public: | 921 public: |
| 1033 Address top; // current allocation top | 922 Address top; // Current allocation top. |
| 1034 Address limit; // current allocation limit | 923 Address limit; // Current allocation limit. |
| 1035 | 924 |
| 1036 #ifdef DEBUG | 925 #ifdef DEBUG |
| 1037 bool VerifyPagedAllocation() { | 926 bool VerifyPagedAllocation() { |
| 1038 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit)) | 927 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit)) |
| 1039 && (top <= limit); | 928 && (top <= limit); |
| 1040 } | 929 } |
| 1041 #endif | 930 #endif |
| 1042 }; | 931 }; |
| 1043 | 932 |
| 1044 | 933 |
| (...skipping 11 matching lines...) Expand all Loading... |
| 1056 // functions increase or decrease one of the non-capacity stats in | 945 // functions increase or decrease one of the non-capacity stats in |
| 1057 // conjunction with capacity, or else they always balance increases and | 946 // conjunction with capacity, or else they always balance increases and |
| 1058 // decreases to the non-capacity stats. | 947 // decreases to the non-capacity stats. |
| 1059 class AllocationStats BASE_EMBEDDED { | 948 class AllocationStats BASE_EMBEDDED { |
| 1060 public: | 949 public: |
| 1061 AllocationStats() { Clear(); } | 950 AllocationStats() { Clear(); } |
| 1062 | 951 |
| 1063 // Zero out all the allocation statistics (ie, no capacity). | 952 // Zero out all the allocation statistics (ie, no capacity). |
| 1064 void Clear() { | 953 void Clear() { |
| 1065 capacity_ = 0; | 954 capacity_ = 0; |
| 1066 available_ = 0; | |
| 1067 size_ = 0; | 955 size_ = 0; |
| 1068 waste_ = 0; | 956 waste_ = 0; |
| 1069 } | 957 } |
| 1070 | 958 |
| 1071 // Reset the allocation statistics (ie, available = capacity with no | 959 // Reset the allocation statistics (ie, available = capacity with no |
| 1072 // wasted or allocated bytes). | 960 // wasted or allocated bytes). |
| 1073 void Reset() { | 961 void Reset() { |
| 1074 available_ = capacity_; | |
| 1075 size_ = 0; | 962 size_ = 0; |
| 1076 waste_ = 0; | 963 waste_ = 0; |
| 1077 } | 964 } |
| 1078 | 965 |
| 1079 // Accessors for the allocation statistics. | 966 // Accessors for the allocation statistics. |
| 1080 intptr_t Capacity() { return capacity_; } | 967 intptr_t Capacity() { return capacity_; } |
| 1081 intptr_t Available() { return available_; } | |
| 1082 intptr_t Size() { return size_; } | 968 intptr_t Size() { return size_; } |
| 1083 intptr_t Waste() { return waste_; } | 969 intptr_t Waste() { return waste_; } |
| 1084 | 970 |
| 1085 // Grow the space by adding available bytes. | 971 // Grow the space by adding available bytes. They are initially marked as |
| 972 // being in use (part of the size), but will normally be immediately freed, |
| 973 // putting them on the free list and removing them from size_. |
| 1086 void ExpandSpace(int size_in_bytes) { | 974 void ExpandSpace(int size_in_bytes) { |
| 1087 capacity_ += size_in_bytes; | 975 capacity_ += size_in_bytes; |
| 1088 available_ += size_in_bytes; | 976 size_ += size_in_bytes; |
| 1089 } | 977 ASSERT(size_ >= 0); |
| 1090 | |
| 1091 // Shrink the space by removing available bytes. | |
| 1092 void ShrinkSpace(int size_in_bytes) { | |
| 1093 capacity_ -= size_in_bytes; | |
| 1094 available_ -= size_in_bytes; | |
| 1095 } | 978 } |
| 1096 | 979 |
| 1097 // Allocate from available bytes (available -> size). | 980 // Allocate from available bytes (available -> size). |
| 1098 void AllocateBytes(intptr_t size_in_bytes) { | 981 void AllocateBytes(intptr_t size_in_bytes) { |
| 1099 available_ -= size_in_bytes; | |
| 1100 size_ += size_in_bytes; | 982 size_ += size_in_bytes; |
| 983 ASSERT(size_ >= 0); |
| 1101 } | 984 } |
| 1102 | 985 |
| 1103 // Free allocated bytes, making them available (size -> available). | 986 // Free allocated bytes, making them available (size -> available). |
| 1104 void DeallocateBytes(intptr_t size_in_bytes) { | 987 void DeallocateBytes(intptr_t size_in_bytes) { |
| 1105 size_ -= size_in_bytes; | 988 size_ -= size_in_bytes; |
| 1106 available_ += size_in_bytes; | 989 ASSERT(size_ >= 0); |
| 1107 } | 990 } |
| 1108 | 991 |
| 1109 // Waste free bytes (available -> waste). | 992 // Waste free bytes (available -> waste). |
| 1110 void WasteBytes(int size_in_bytes) { | 993 void WasteBytes(int size_in_bytes) { |
| 1111 available_ -= size_in_bytes; | 994 size_ -= size_in_bytes; |
| 1112 waste_ += size_in_bytes; | 995 waste_ += size_in_bytes; |
| 1113 } | 996 ASSERT(size_ >= 0); |
| 1114 | |
| 1115 // Consider the wasted bytes to be allocated, as they contain filler | |
| 1116 // objects (waste -> size). | |
| 1117 void FillWastedBytes(intptr_t size_in_bytes) { | |
| 1118 waste_ -= size_in_bytes; | |
| 1119 size_ += size_in_bytes; | |
| 1120 } | 997 } |
| 1121 | 998 |
| 1122 private: | 999 private: |
| 1123 intptr_t capacity_; | 1000 intptr_t capacity_; |
| 1124 intptr_t available_; | |
| 1125 intptr_t size_; | 1001 intptr_t size_; |
| 1126 intptr_t waste_; | 1002 intptr_t waste_; |
| 1127 }; | 1003 }; |
| 1128 | 1004 |
| 1129 | 1005 |
| 1006 // ----------------------------------------------------------------------------- |
| 1007 // Free lists for old object spaces |
| 1008 // |
| 1009 // Free-list nodes are free blocks in the heap. They look like heap objects |
| 1010 // (free-list node pointers have the heap object tag, and they have a map like |
| 1011 // a heap object). They have a size and a next pointer. The next pointer is |
| 1012 // the raw address of the next free list node (or NULL). |
| 1013 class FreeListNode: public HeapObject { |
| 1014 public: |
| 1015 // Obtain a free-list node from a raw address. This is not a cast because |
| 1016 // it does not check nor require that the first word at the address is a map |
| 1017 // pointer. |
| 1018 static FreeListNode* FromAddress(Address address) { |
| 1019 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address)); |
| 1020 } |
| 1021 |
| 1022 static inline bool IsFreeListNode(HeapObject* object); |
| 1023 |
| 1024 // Set the size in bytes, which can be read with HeapObject::Size(). This |
| 1025 // function also writes a map to the first word of the block so that it |
| 1026 // looks like a heap object to the garbage collector and heap iteration |
| 1027 // functions. |
| 1028 void set_size(int size_in_bytes); |
| 1029 |
| 1030 // Accessors for the next field. |
| 1031 inline FreeListNode* next(); |
| 1032 inline FreeListNode** next_address(); |
| 1033 inline void set_next(FreeListNode* next); |
| 1034 |
| 1035 inline void Zap(); |
| 1036 |
| 1037 private: |
| 1038 static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize); |
| 1039 |
| 1040 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode); |
| 1041 }; |
| 1042 |
| 1043 |
| 1044 // The free list for the old space. The free list is organized in such a way |
| 1045 // as to encourage objects allocated around the same time to be near each |
| 1046 // other. The normal way to allocate is intended to be by bumping a 'top' |
| 1047 // pointer until it hits a 'limit' pointer. When the limit is hit we need to |
| 1048 // find a new space to allocate from. This is done with the free list, which |
| 1049 // is divided up into rough categories to cut down on waste. Having finer |
| 1050 // categories would scatter allocation more. |
| 1051 |
| 1052 // The old space free list is organized in categories. |
| 1053 // 1-31 words: Such small free areas are discarded for efficiency reasons. |
| 1054 // They can be reclaimed by the compactor. However the distance between top |
| 1055 // and limit may be this small. |
| 1056 // 32-255 words: There is a list of spaces this large. It is used for top and |
| 1057 // limit when the object we need to allocate is 1-31 words in size. These |
| 1058 // spaces are called small. |
| 1059 // 256-2047 words: There is a list of spaces this large. It is used for top and |
| 1060 // limit when the object we need to allocate is 32-255 words in size. These |
| 1061 // spaces are called medium. |
| 1062 // 1048-16383 words: There is a list of spaces this large. It is used for top |
| 1063 // and limit when the object we need to allocate is 256-2047 words in size. |
| 1064 // These spaces are call large. |
| 1065 // At least 16384 words. This list is for objects of 2048 words or larger. |
| 1066 // Empty pages are added to this list. These spaces are called huge. |
| 1067 class OldSpaceFreeList BASE_EMBEDDED { |
| 1068 public: |
| 1069 explicit OldSpaceFreeList(PagedSpace* owner); |
| 1070 |
| 1071 // Clear the free list. |
| 1072 void Reset(); |
| 1073 |
| 1074 // Return the number of bytes available on the free list. |
| 1075 intptr_t available() { return available_; } |
| 1076 |
| 1077 // Place a node on the free list. The block of size 'size_in_bytes' |
| 1078 // starting at 'start' is placed on the free list. The return value is the |
| 1079 // number of bytes that have been lost due to internal fragmentation by |
| 1080 // freeing the block. Bookkeeping information will be written to the block, |
| 1081 // ie, its contents will be destroyed. The start address should be word |
| 1082 // aligned, and the size should be a non-zero multiple of the word size. |
| 1083 int Free(Address start, int size_in_bytes); |
| 1084 |
| 1085 // Allocate a block of size 'size_in_bytes' from the free list. The block |
| 1086 // is unitialized. A failure is returned if no block is available. The |
| 1087 // number of bytes lost to fragmentation is returned in the output parameter |
| 1088 // 'wasted_bytes'. The size should be a non-zero multiple of the word size. |
| 1089 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); |
| 1090 |
| 1091 void MarkNodes(); |
| 1092 |
| 1093 #ifdef DEBUG |
| 1094 void Zap(); |
| 1095 static intptr_t SumFreeList(FreeListNode* node); |
| 1096 intptr_t SumFreeLists(); |
| 1097 #endif |
| 1098 |
| 1099 private: |
| 1100 // The size range of blocks, in bytes. |
| 1101 static const int kMinBlockSize = 3 * kPointerSize; |
| 1102 static const int kMaxBlockSize = Page::kMaxHeapObjectSize; |
| 1103 |
| 1104 // The identity of the owning space. |
| 1105 PagedSpace* owner_; |
| 1106 |
| 1107 // Total available bytes in all blocks on this free list. |
| 1108 int available_; |
| 1109 |
| 1110 static const int kSmallListMin = 0x20 * kPointerSize; |
| 1111 static const int kSmallListMax = 0xff * kPointerSize; |
| 1112 static const int kMediumListMax = 0x7ff * kPointerSize; |
| 1113 static const int kLargeListMax = 0x3fff * kPointerSize; |
| 1114 static const int kSmallAllocationMax = kSmallListMin - kPointerSize; |
| 1115 static const int kMediumAllocationMax = kSmallListMax; |
| 1116 static const int kLargeAllocationMax = kMediumListMax; |
| 1117 FreeListNode* small_list_; |
| 1118 FreeListNode* medium_list_; |
| 1119 FreeListNode* large_list_; |
| 1120 FreeListNode* huge_list_; |
| 1121 |
| 1122 DISALLOW_IMPLICIT_CONSTRUCTORS(OldSpaceFreeList); |
| 1123 }; |
| 1124 |
| 1125 |
| 1130 class PagedSpace : public Space { | 1126 class PagedSpace : public Space { |
| 1131 public: | 1127 public: |
| 1132 // Creates a space with a maximum capacity, and an id. | 1128 // Creates a space with a maximum capacity, and an id. |
| 1133 PagedSpace(intptr_t max_capacity, | 1129 PagedSpace(intptr_t max_capacity, |
| 1134 AllocationSpace id, | 1130 AllocationSpace id, |
| 1135 Executability executable); | 1131 Executability executable); |
| 1136 | 1132 |
| 1137 virtual ~PagedSpace() {} | 1133 virtual ~PagedSpace() {} |
| 1138 | 1134 |
| 1139 // Set up the space using the given address range of virtual memory (from | 1135 // Set up the space using the given address range of virtual memory (from |
| (...skipping 13 matching lines...) Expand all Loading... |
| 1153 // Checks whether an object/address is in this space. | 1149 // Checks whether an object/address is in this space. |
| 1154 inline bool Contains(Address a); | 1150 inline bool Contains(Address a); |
| 1155 bool Contains(HeapObject* o) { return Contains(o->address()); } | 1151 bool Contains(HeapObject* o) { return Contains(o->address()); } |
| 1156 | 1152 |
| 1157 // Given an address occupied by a live object, return that object if it is | 1153 // Given an address occupied by a live object, return that object if it is |
| 1158 // in this space, or Failure::Exception() if it is not. The implementation | 1154 // in this space, or Failure::Exception() if it is not. The implementation |
| 1159 // iterates over objects in the page containing the address, the cost is | 1155 // iterates over objects in the page containing the address, the cost is |
| 1160 // linear in the number of objects in the page. It may be slow. | 1156 // linear in the number of objects in the page. It may be slow. |
| 1161 MUST_USE_RESULT MaybeObject* FindObject(Address addr); | 1157 MUST_USE_RESULT MaybeObject* FindObject(Address addr); |
| 1162 | 1158 |
| 1163 // Checks whether page is currently in use by this space. | |
| 1164 bool IsUsed(Page* page); | |
| 1165 | |
| 1166 // Prepares for a mark-compact GC. | 1159 // Prepares for a mark-compact GC. |
| 1167 virtual void PrepareForMarkCompact(bool will_compact); | 1160 virtual void PrepareForMarkCompact(bool will_compact); |
| 1168 | 1161 |
| 1169 // The top of allocation in a page in this space. Undefined if page is unused. | 1162 // Current capacity without growing (Size() + Available()). |
| 1170 Address PageAllocationTop(Page* page) { | |
| 1171 return page == TopPageOf(allocation_info_) ? top() | |
| 1172 : PageAllocationLimit(page); | |
| 1173 } | |
| 1174 | |
| 1175 // The limit of allocation for a page in this space. | |
| 1176 virtual Address PageAllocationLimit(Page* page) = 0; | |
| 1177 | |
| 1178 void FlushTopPageWatermark() { | |
| 1179 AllocationTopPage()->SetCachedAllocationWatermark(top()); | |
| 1180 AllocationTopPage()->InvalidateWatermark(true); | |
| 1181 } | |
| 1182 | |
| 1183 // Current capacity without growing (Size() + Available() + Waste()). | |
| 1184 intptr_t Capacity() { return accounting_stats_.Capacity(); } | 1163 intptr_t Capacity() { return accounting_stats_.Capacity(); } |
| 1185 | 1164 |
| 1186 // Total amount of memory committed for this space. For paged | 1165 // Total amount of memory committed for this space. For paged |
| 1187 // spaces this equals the capacity. | 1166 // spaces this equals the capacity. |
| 1188 intptr_t CommittedMemory() { return Capacity(); } | 1167 intptr_t CommittedMemory() { return Capacity(); } |
| 1189 | 1168 |
| 1190 // Available bytes without growing. | 1169 // Sets the capacity, the available space and the wasted space to zero. |
| 1191 intptr_t Available() { return accounting_stats_.Available(); } | 1170 // The stats are rebuilt during sweeping by adding each page to the |
| 1171 // capacity and the size when it is encountered. As free spaces are |
| 1172 // discovered during the sweeping they are subtracted from the size and added |
| 1173 // to the available and wasted totals. |
| 1174 void ClearStats() { accounting_stats_.Clear(); } |
| 1192 | 1175 |
| 1193 // Allocated bytes in this space. | 1176 // Available bytes without growing. These are the bytes on the free list. |
| 1177 // The bytes in the linear allocation area are not included in this total |
| 1178 // because updating the stats would slow down allocation. New pages are |
| 1179 // immediately added to the free list so they show up here. |
| 1180 intptr_t Available() { return free_list_.available(); } |
| 1181 |
| 1182 // Allocated bytes in this space. Garbage bytes that were not found due to |
| 1183 // lazy sweeping are counted as being allocated! The bytes in the current |
| 1184 // linear allocation area (between top and limit) are also counted here. |
| 1194 virtual intptr_t Size() { return accounting_stats_.Size(); } | 1185 virtual intptr_t Size() { return accounting_stats_.Size(); } |
| 1195 | 1186 |
| 1196 // Wasted bytes due to fragmentation and not recoverable until the | 1187 // As size, but the bytes in the current linear allocation area are not |
| 1197 // next GC of this space. | 1188 // included. |
| 1198 intptr_t Waste() { return accounting_stats_.Waste(); } | 1189 virtual intptr_t SizeOfObjects() { return Size() - (limit() - top()); } |
| 1199 | 1190 |
| 1200 // Returns the address of the first object in this space. | 1191 // Wasted bytes in this space. These are just the bytes that were thrown away |
| 1201 Address bottom() { return first_page_->ObjectAreaStart(); } | 1192 // due to being too small to use for allocation. They do not include the |
| 1193 // free bytes that were not found at all due to lazy sweeping. |
| 1194 virtual intptr_t Waste() { return accounting_stats_.Waste(); } |
| 1202 | 1195 |
| 1203 // Returns the allocation pointer in this space. | 1196 // Returns the allocation pointer in this space. |
| 1204 Address top() { return allocation_info_.top; } | 1197 Address top() { return allocation_info_.top; } |
| 1198 Address limit() { return allocation_info_.limit; } |
| 1205 | 1199 |
| 1206 // Allocate the requested number of bytes in the space if possible, return a | 1200 // Allocate the requested number of bytes in the space if possible, return a |
| 1207 // failure object if not. | 1201 // failure object if not. |
| 1208 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); | 1202 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); |
| 1209 | 1203 |
| 1210 virtual bool ReserveSpace(int bytes); | 1204 virtual bool ReserveSpace(int bytes); |
| 1211 | 1205 |
| 1212 // Used by ReserveSpace. | 1206 // Give a block of memory to the space's free list. It might be added to |
| 1213 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0; | 1207 // the free list or accounted as waste. |
| 1214 | 1208 // If add_to_freelist is false then just accounting stats are updated and |
| 1215 // Free all pages in range from prev (exclusive) to last (inclusive). | 1209 // no attempt to add area to free list is made. |
| 1216 // Freed pages are moved to the end of page list. | 1210 void Free(Address start, int size_in_bytes) { |
| 1217 void FreePages(Page* prev, Page* last); | 1211 int wasted = free_list_.Free(start, size_in_bytes); |
| 1218 | 1212 accounting_stats_.DeallocateBytes(size_in_bytes - wasted); |
| 1219 // Deallocates a block. | 1213 } |
| 1220 virtual void DeallocateBlock(Address start, | |
| 1221 int size_in_bytes, | |
| 1222 bool add_to_freelist) = 0; | |
| 1223 | 1214 |
| 1224 // Set space allocation info. | 1215 // Set space allocation info. |
| 1225 void SetTop(Address top) { | 1216 void SetTop(Address top, Address limit) { |
| 1217 ASSERT(top == limit || |
| 1218 Page::FromAddress(top) == Page::FromAddress(limit - 1)); |
| 1226 allocation_info_.top = top; | 1219 allocation_info_.top = top; |
| 1227 allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top)); | 1220 allocation_info_.limit = limit; |
| 1221 } |
| 1222 |
| 1223 void Allocate(int bytes) { |
| 1224 accounting_stats_.AllocateBytes(bytes); |
| 1225 } |
| 1226 |
| 1227 void IncreaseCapacity(int size) { |
| 1228 accounting_stats_.ExpandSpace(size); |
| 1228 } | 1229 } |
| 1229 | 1230 |
| 1230 // Releases half of unused pages. | 1231 // Releases half of unused pages. |
| 1231 void Shrink(); | 1232 void Shrink(); |
| 1232 | 1233 |
| 1233 // Ensures that the capacity is at least 'capacity'. Returns false on failure. | 1234 // Ensures that the capacity is at least 'capacity'. Returns false on failure. |
| 1234 bool EnsureCapacity(int capacity); | 1235 bool EnsureCapacity(int capacity); |
| 1235 | 1236 |
| 1237 // The dummy page that anchors the linked list of pages. |
| 1238 Page *anchor() { return &anchor_; } |
| 1239 |
| 1236 #ifdef ENABLE_HEAP_PROTECTION | 1240 #ifdef ENABLE_HEAP_PROTECTION |
| 1237 // Protect/unprotect the space by marking it read-only/writable. | 1241 // Protect/unprotect the space by marking it read-only/writable. |
| 1238 void Protect(); | 1242 void Protect(); |
| 1239 void Unprotect(); | 1243 void Unprotect(); |
| 1240 #endif | 1244 #endif |
| 1241 | 1245 |
| 1242 #ifdef DEBUG | 1246 #ifdef DEBUG |
| 1243 // Print meta info and objects in this space. | 1247 // Print meta info and objects in this space. |
| 1244 virtual void Print(); | 1248 virtual void Print(); |
| 1245 | 1249 |
| 1246 // Verify integrity of this space. | 1250 // Verify integrity of this space. |
| 1247 virtual void Verify(ObjectVisitor* visitor); | 1251 virtual void Verify(ObjectVisitor* visitor); |
| 1248 | 1252 |
| 1253 // Reports statistics for the space |
| 1254 void ReportStatistics(); |
| 1255 |
| 1249 // Overridden by subclasses to verify space-specific object | 1256 // Overridden by subclasses to verify space-specific object |
| 1250 // properties (e.g., only maps or free-list nodes are in map space). | 1257 // properties (e.g., only maps or free-list nodes are in map space). |
| 1251 virtual void VerifyObject(HeapObject* obj) {} | 1258 virtual void VerifyObject(HeapObject* obj) {} |
| 1252 | 1259 |
| 1253 // Report code object related statistics | 1260 // Report code object related statistics |
| 1254 void CollectCodeStatistics(); | 1261 void CollectCodeStatistics(); |
| 1255 static void ReportCodeStatistics(); | 1262 static void ReportCodeStatistics(); |
| 1256 static void ResetCodeStatistics(); | 1263 static void ResetCodeStatistics(); |
| 1257 #endif | 1264 #endif |
| 1258 | 1265 |
| 1259 // Returns the page of the allocation pointer. | 1266 bool was_swept_conservatively() { return was_swept_conservatively_; } |
| 1260 Page* AllocationTopPage() { return TopPageOf(allocation_info_); } | 1267 void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; } |
| 1261 | 1268 |
| 1262 protected: | 1269 protected: |
| 1263 // Maximum capacity of this space. | 1270 // Maximum capacity of this space. |
| 1264 intptr_t max_capacity_; | 1271 intptr_t max_capacity_; |
| 1265 | 1272 |
| 1266 // Accounting information for this space. | 1273 // Accounting information for this space. |
| 1267 AllocationStats accounting_stats_; | 1274 AllocationStats accounting_stats_; |
| 1268 | 1275 |
| 1269 // The first page in this space. | 1276 // The dummy page that anchors the double linked list of pages. |
| 1270 Page* first_page_; | 1277 Page anchor_; |
| 1271 | 1278 |
| 1272 // The last page in this space. Initially set in Setup, updated in | 1279 // The space's free list. |
| 1273 // Expand and Shrink. | 1280 OldSpaceFreeList free_list_; |
| 1274 Page* last_page_; | |
| 1275 | 1281 |
| 1276 // Normal allocation information. | 1282 // Normal allocation information. |
| 1277 AllocationInfo allocation_info_; | 1283 AllocationInfo allocation_info_; |
| 1278 | 1284 |
| 1279 // Bytes of each page that cannot be allocated. Possibly non-zero | 1285 // Bytes of each page that cannot be allocated. Possibly non-zero |
| 1280 // for pages in spaces with only fixed-size objects. Always zero | 1286 // for pages in spaces with only fixed-size objects. Always zero |
| 1281 // for pages in spaces with variable sized objects (those pages are | 1287 // for pages in spaces with variable sized objects (those pages are |
| 1282 // padded with free-list nodes). | 1288 // padded with free-list nodes). |
| 1283 int page_extra_; | 1289 int page_extra_; |
| 1284 | 1290 |
| 1285 // Sets allocation pointer to a page bottom. | 1291 bool was_swept_conservatively_; |
| 1286 static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p); | |
| 1287 | 1292 |
| 1288 // Returns the top page specified by an allocation info structure. | 1293 // Sets allocation pointer. If the allocation pointer already pointed to a |
| 1289 static Page* TopPageOf(AllocationInfo alloc_info) { | 1294 // non-zero-length area then that area may be returned to the free list. |
| 1290 return Page::FromAllocationTop(alloc_info.limit); | 1295 void SetAllocationInfo(Address start, Address end); |
| 1291 } | |
| 1292 | |
| 1293 int CountPagesToTop() { | |
| 1294 Page* p = Page::FromAllocationTop(allocation_info_.top); | |
| 1295 PageIterator it(this, PageIterator::ALL_PAGES); | |
| 1296 int counter = 1; | |
| 1297 while (it.has_next()) { | |
| 1298 if (it.next() == p) return counter; | |
| 1299 counter++; | |
| 1300 } | |
| 1301 UNREACHABLE(); | |
| 1302 return -1; | |
| 1303 } | |
| 1304 | 1296 |
| 1305 // Expands the space by allocating a fixed number of pages. Returns false if | 1297 // Expands the space by allocating a fixed number of pages. Returns false if |
| 1306 // it cannot allocate requested number of pages from OS. | 1298 // it cannot allocate requested number of pages from OS. |
| 1307 bool Expand(); | 1299 bool Expand(); |
| 1308 | 1300 |
| 1309 // Generic fast case allocation function that tries linear allocation in | 1301 // Generic fast case allocation function that tries linear allocation at the |
| 1310 // the top page of 'alloc_info'. Returns NULL on failure. | 1302 // address denoted by top in allocation_info_. |
| 1311 inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info, | 1303 inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info, |
| 1312 int size_in_bytes); | 1304 int size_in_bytes); |
| 1313 | 1305 |
| 1314 // During normal allocation or deserialization, roll to the next page in | |
| 1315 // the space (there is assumed to be one) and allocate there. This | |
| 1316 // function is space-dependent. | |
| 1317 virtual HeapObject* AllocateInNextPage(Page* current_page, | |
| 1318 int size_in_bytes) = 0; | |
| 1319 | |
| 1320 // Slow path of AllocateRaw. This function is space-dependent. | 1306 // Slow path of AllocateRaw. This function is space-dependent. |
| 1321 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0; | 1307 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes); |
| 1322 | 1308 |
| 1323 #ifdef DEBUG | 1309 #ifdef DEBUG |
| 1324 // Returns the number of total pages in this space. | 1310 // Returns the number of total pages in this space. |
| 1325 int CountTotalPages(); | 1311 int CountTotalPages(); |
| 1326 #endif | 1312 #endif |
| 1327 private: | 1313 |
| 1328 friend class PageIterator; | 1314 friend class PageIterator; |
| 1329 }; | 1315 }; |
| 1330 | 1316 |
| 1331 | 1317 |
| 1332 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 1318 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
| 1333 class NumberAndSizeInfo BASE_EMBEDDED { | 1319 class NumberAndSizeInfo BASE_EMBEDDED { |
| 1334 public: | 1320 public: |
| 1335 NumberAndSizeInfo() : number_(0), bytes_(0) {} | 1321 NumberAndSizeInfo() : number_(0), bytes_(0) {} |
| 1336 | 1322 |
| 1337 int number() const { return number_; } | 1323 int number() const { return number_; } |
| (...skipping 412 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1750 AllocationInfo* alloc_info); | 1736 AllocationInfo* alloc_info); |
| 1751 | 1737 |
| 1752 friend class SemiSpaceIterator; | 1738 friend class SemiSpaceIterator; |
| 1753 | 1739 |
| 1754 public: | 1740 public: |
| 1755 TRACK_MEMORY("NewSpace") | 1741 TRACK_MEMORY("NewSpace") |
| 1756 }; | 1742 }; |
| 1757 | 1743 |
| 1758 | 1744 |
| 1759 // ----------------------------------------------------------------------------- | 1745 // ----------------------------------------------------------------------------- |
| 1760 // Free lists for old object spaces | |
| 1761 // | |
| 1762 // Free-list nodes are free blocks in the heap. They look like heap objects | |
| 1763 // (free-list node pointers have the heap object tag, and they have a map like | |
| 1764 // a heap object). They have a size and a next pointer. The next pointer is | |
| 1765 // the raw address of the next free list node (or NULL). | |
| 1766 class FreeListNode: public HeapObject { | |
| 1767 public: | |
| 1768 // Obtain a free-list node from a raw address. This is not a cast because | |
| 1769 // it does not check nor require that the first word at the address is a map | |
| 1770 // pointer. | |
| 1771 static FreeListNode* FromAddress(Address address) { | |
| 1772 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address)); | |
| 1773 } | |
| 1774 | |
| 1775 static inline bool IsFreeListNode(HeapObject* object); | |
| 1776 | |
| 1777 // Set the size in bytes, which can be read with HeapObject::Size(). This | |
| 1778 // function also writes a map to the first word of the block so that it | |
| 1779 // looks like a heap object to the garbage collector and heap iteration | |
| 1780 // functions. | |
| 1781 void set_size(int size_in_bytes); | |
| 1782 | |
| 1783 // Accessors for the next field. | |
| 1784 inline Address next(); | |
| 1785 inline void set_next(Address next); | |
| 1786 | |
| 1787 inline void Zap(); | |
| 1788 | |
| 1789 private: | |
| 1790 static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize); | |
| 1791 | |
| 1792 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode); | |
| 1793 }; | |
| 1794 | |
| 1795 | |
| 1796 static const uintptr_t kFreeListZapValue = 0xfeed1eaf; | |
| 1797 | |
| 1798 | |
| 1799 // The free list for the old space. | |
| 1800 class OldSpaceFreeList BASE_EMBEDDED { | |
| 1801 public: | |
| 1802 explicit OldSpaceFreeList(AllocationSpace owner); | |
| 1803 | |
| 1804 // Clear the free list. | |
| 1805 void Reset(); | |
| 1806 | |
| 1807 // Return the number of bytes available on the free list. | |
| 1808 intptr_t available() { return available_; } | |
| 1809 | |
| 1810 // Place a node on the free list. The block of size 'size_in_bytes' | |
| 1811 // starting at 'start' is placed on the free list. The return value is the | |
| 1812 // number of bytes that have been lost due to internal fragmentation by | |
| 1813 // freeing the block. Bookkeeping information will be written to the block, | |
| 1814 // ie, its contents will be destroyed. The start address should be word | |
| 1815 // aligned, and the size should be a non-zero multiple of the word size. | |
| 1816 int Free(Address start, int size_in_bytes); | |
| 1817 | |
| 1818 // Allocate a block of size 'size_in_bytes' from the free list. The block | |
| 1819 // is unitialized. A failure is returned if no block is available. The | |
| 1820 // number of bytes lost to fragmentation is returned in the output parameter | |
| 1821 // 'wasted_bytes'. The size should be a non-zero multiple of the word size. | |
| 1822 MUST_USE_RESULT MaybeObject* Allocate(int size_in_bytes, int* wasted_bytes); | |
| 1823 | |
| 1824 void MarkNodes(); | |
| 1825 | |
| 1826 #ifdef DEBUG | |
| 1827 void Zap(); | |
| 1828 #endif | |
| 1829 | |
| 1830 private: | |
| 1831 // The size range of blocks, in bytes. (Smaller allocations are allowed, but | |
| 1832 // will always result in waste.) | |
| 1833 static const int kMinBlockSize = 2 * kPointerSize; | |
| 1834 static const int kMaxBlockSize = Page::kMaxHeapObjectSize; | |
| 1835 | |
| 1836 // The identity of the owning space, for building allocation Failure | |
| 1837 // objects. | |
| 1838 AllocationSpace owner_; | |
| 1839 | |
| 1840 // Total available bytes in all blocks on this free list. | |
| 1841 int available_; | |
| 1842 | |
| 1843 // Blocks are put on exact free lists in an array, indexed by size in words. | |
| 1844 // The available sizes are kept in an increasingly ordered list. Entries | |
| 1845 // corresponding to sizes < kMinBlockSize always have an empty free list | |
| 1846 // (but index kHead is used for the head of the size list). | |
| 1847 struct SizeNode { | |
| 1848 // Address of the head FreeListNode of the implied block size or NULL. | |
| 1849 Address head_node_; | |
| 1850 // Size (words) of the next larger available size if head_node_ != NULL. | |
| 1851 int next_size_; | |
| 1852 }; | |
| 1853 static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1; | |
| 1854 SizeNode free_[kFreeListsLength]; | |
| 1855 | |
| 1856 // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[. | |
| 1857 static const int kHead = kMinBlockSize / kPointerSize - 1; | |
| 1858 static const int kEnd = kMaxInt; | |
| 1859 | |
| 1860 // We keep a "finger" in the size list to speed up a common pattern: | |
| 1861 // repeated requests for the same or increasing sizes. | |
| 1862 int finger_; | |
| 1863 | |
| 1864 // Starting from *prev, find and return the smallest size >= index (words), | |
| 1865 // or kEnd. Update *prev to be the largest size < index, or kHead. | |
| 1866 int FindSize(int index, int* prev) { | |
| 1867 int cur = free_[*prev].next_size_; | |
| 1868 while (cur < index) { | |
| 1869 *prev = cur; | |
| 1870 cur = free_[cur].next_size_; | |
| 1871 } | |
| 1872 return cur; | |
| 1873 } | |
| 1874 | |
| 1875 // Remove an existing element from the size list. | |
| 1876 void RemoveSize(int index) { | |
| 1877 int prev = kHead; | |
| 1878 int cur = FindSize(index, &prev); | |
| 1879 ASSERT(cur == index); | |
| 1880 free_[prev].next_size_ = free_[cur].next_size_; | |
| 1881 finger_ = prev; | |
| 1882 } | |
| 1883 | |
| 1884 // Insert a new element into the size list. | |
| 1885 void InsertSize(int index) { | |
| 1886 int prev = kHead; | |
| 1887 int cur = FindSize(index, &prev); | |
| 1888 ASSERT(cur != index); | |
| 1889 free_[prev].next_size_ = index; | |
| 1890 free_[index].next_size_ = cur; | |
| 1891 } | |
| 1892 | |
| 1893 // The size list is not updated during a sequence of calls to Free, but is | |
| 1894 // rebuilt before the next allocation. | |
| 1895 void RebuildSizeList(); | |
| 1896 bool needs_rebuild_; | |
| 1897 | |
| 1898 #ifdef DEBUG | |
| 1899 // Does this free list contain a free block located at the address of 'node'? | |
| 1900 bool Contains(FreeListNode* node); | |
| 1901 #endif | |
| 1902 | |
| 1903 DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList); | |
| 1904 }; | |
| 1905 | |
| 1906 | |
| 1907 // The free list for the map space. | |
| 1908 class FixedSizeFreeList BASE_EMBEDDED { | |
| 1909 public: | |
| 1910 FixedSizeFreeList(AllocationSpace owner, int object_size); | |
| 1911 | |
| 1912 // Clear the free list. | |
| 1913 void Reset(); | |
| 1914 | |
| 1915 // Return the number of bytes available on the free list. | |
| 1916 intptr_t available() { return available_; } | |
| 1917 | |
| 1918 // Place a node on the free list. The block starting at 'start' (assumed to | |
| 1919 // have size object_size_) is placed on the free list. Bookkeeping | |
| 1920 // information will be written to the block, ie, its contents will be | |
| 1921 // destroyed. The start address should be word aligned. | |
| 1922 void Free(Address start); | |
| 1923 | |
| 1924 // Allocate a fixed sized block from the free list. The block is unitialized. | |
| 1925 // A failure is returned if no block is available. | |
| 1926 MUST_USE_RESULT MaybeObject* Allocate(); | |
| 1927 | |
| 1928 void MarkNodes(); | |
| 1929 | |
| 1930 #ifdef DEBUG | |
| 1931 void Zap(); | |
| 1932 #endif | |
| 1933 | |
| 1934 private: | |
| 1935 // Available bytes on the free list. | |
| 1936 intptr_t available_; | |
| 1937 | |
| 1938 // The head of the free list. | |
| 1939 Address head_; | |
| 1940 | |
| 1941 // The tail of the free list. | |
| 1942 Address tail_; | |
| 1943 | |
| 1944 // The identity of the owning space, for building allocation Failure | |
| 1945 // objects. | |
| 1946 AllocationSpace owner_; | |
| 1947 | |
| 1948 // The size of the objects in this space. | |
| 1949 int object_size_; | |
| 1950 | |
| 1951 DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList); | |
| 1952 }; | |
| 1953 | |
| 1954 | |
| 1955 // ----------------------------------------------------------------------------- | |
| 1956 // Old object space (excluding map objects) | 1746 // Old object space (excluding map objects) |
| 1957 | 1747 |
| 1958 class OldSpace : public PagedSpace { | 1748 class OldSpace : public PagedSpace { |
| 1959 public: | 1749 public: |
| 1960 // Creates an old space object with a given maximum capacity. | 1750 // Creates an old space object with a given maximum capacity. |
| 1961 // The constructor does not allocate pages from OS. | 1751 // The constructor does not allocate pages from OS. |
| 1962 explicit OldSpace(intptr_t max_capacity, | 1752 explicit OldSpace(intptr_t max_capacity, |
| 1963 AllocationSpace id, | 1753 AllocationSpace id, |
| 1964 Executability executable) | 1754 Executability executable) |
| 1965 : PagedSpace(max_capacity, id, executable), free_list_(id) { | 1755 : PagedSpace(max_capacity, id, executable) { |
| 1966 page_extra_ = 0; | 1756 page_extra_ = 0; |
| 1967 } | 1757 } |
| 1968 | 1758 |
| 1969 // The bytes available on the free list (ie, not above the linear allocation | |
| 1970 // pointer). | |
| 1971 intptr_t AvailableFree() { return free_list_.available(); } | |
| 1972 | |
| 1973 // The limit of allocation for a page in this space. | 1759 // The limit of allocation for a page in this space. |
| 1974 virtual Address PageAllocationLimit(Page* page) { | 1760 virtual Address PageAllocationLimit(Page* page) { |
| 1975 return page->ObjectAreaEnd(); | 1761 return page->ObjectAreaEnd(); |
| 1976 } | 1762 } |
| 1977 | 1763 |
| 1978 // Give a block of memory to the space's free list. It might be added to | |
| 1979 // the free list or accounted as waste. | |
| 1980 // If add_to_freelist is false then just accounting stats are updated and | |
| 1981 // no attempt to add area to free list is made. | |
| 1982 void Free(Address start, int size_in_bytes, bool add_to_freelist) { | |
| 1983 accounting_stats_.DeallocateBytes(size_in_bytes); | |
| 1984 | |
| 1985 if (add_to_freelist) { | |
| 1986 int wasted_bytes = free_list_.Free(start, size_in_bytes); | |
| 1987 accounting_stats_.WasteBytes(wasted_bytes); | |
| 1988 } | |
| 1989 } | |
| 1990 | |
| 1991 virtual void DeallocateBlock(Address start, | |
| 1992 int size_in_bytes, | |
| 1993 bool add_to_freelist); | |
| 1994 | |
| 1995 // Prepare for full garbage collection. Resets the relocation pointer and | 1764 // Prepare for full garbage collection. Resets the relocation pointer and |
| 1996 // clears the free list. | 1765 // clears the free list. |
| 1997 virtual void PrepareForMarkCompact(bool will_compact); | 1766 virtual void PrepareForMarkCompact(bool will_compact); |
| 1998 | 1767 |
| 1999 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page); | |
| 2000 | |
| 2001 void MarkFreeListNodes() { free_list_.MarkNodes(); } | |
| 2002 | |
| 2003 #ifdef DEBUG | |
| 2004 // Reports statistics for the space | |
| 2005 void ReportStatistics(); | |
| 2006 | |
| 2007 OldSpaceFreeList* free_list() { return &free_list_; } | |
| 2008 #endif | |
| 2009 | |
| 2010 protected: | |
| 2011 // Virtual function in the superclass. Slow path of AllocateRaw. | |
| 2012 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes); | |
| 2013 | |
| 2014 // Virtual function in the superclass. Allocate linearly at the start of | |
| 2015 // the page after current_page (there is assumed to be one). | |
| 2016 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes); | |
| 2017 | |
| 2018 private: | |
| 2019 // The space's free list. | |
| 2020 OldSpaceFreeList free_list_; | |
| 2021 | |
| 2022 public: | 1768 public: |
| 2023 TRACK_MEMORY("OldSpace") | 1769 TRACK_MEMORY("OldSpace") |
| 2024 }; | 1770 }; |
| 2025 | 1771 |
| 2026 | 1772 |
| 2027 // ----------------------------------------------------------------------------- | 1773 // ----------------------------------------------------------------------------- |
| 2028 // Old space for objects of a fixed size | 1774 // Old space for objects of a fixed size |
| 2029 | 1775 |
| 2030 class FixedSpace : public PagedSpace { | 1776 class FixedSpace : public PagedSpace { |
| 2031 public: | 1777 public: |
| 2032 FixedSpace(intptr_t max_capacity, | 1778 FixedSpace(intptr_t max_capacity, |
| 2033 AllocationSpace id, | 1779 AllocationSpace id, |
| 2034 int object_size_in_bytes, | 1780 int object_size_in_bytes, |
| 2035 const char* name) | 1781 const char* name) |
| 2036 : PagedSpace(max_capacity, id, NOT_EXECUTABLE), | 1782 : PagedSpace(max_capacity, id, NOT_EXECUTABLE), |
| 2037 object_size_in_bytes_(object_size_in_bytes), | 1783 object_size_in_bytes_(object_size_in_bytes), |
| 2038 name_(name), | 1784 name_(name) { |
| 2039 free_list_(id, object_size_in_bytes) { | |
| 2040 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes; | 1785 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes; |
| 2041 } | 1786 } |
| 2042 | 1787 |
| 2043 // The limit of allocation for a page in this space. | 1788 // The limit of allocation for a page in this space. |
| 2044 virtual Address PageAllocationLimit(Page* page) { | 1789 virtual Address PageAllocationLimit(Page* page) { |
| 2045 return page->ObjectAreaEnd() - page_extra_; | 1790 return page->ObjectAreaEnd() - page_extra_; |
| 2046 } | 1791 } |
| 2047 | 1792 |
| 2048 int object_size_in_bytes() { return object_size_in_bytes_; } | 1793 int object_size_in_bytes() { return object_size_in_bytes_; } |
| 2049 | 1794 |
| 2050 // Give a fixed sized block of memory to the space's free list. | |
| 2051 // If add_to_freelist is false then just accounting stats are updated and | |
| 2052 // no attempt to add area to free list is made. | |
| 2053 void Free(Address start, bool add_to_freelist) { | |
| 2054 if (add_to_freelist) { | |
| 2055 free_list_.Free(start); | |
| 2056 } | |
| 2057 accounting_stats_.DeallocateBytes(object_size_in_bytes_); | |
| 2058 } | |
| 2059 | |
| 2060 // Prepares for a mark-compact GC. | 1795 // Prepares for a mark-compact GC. |
| 2061 virtual void PrepareForMarkCompact(bool will_compact); | 1796 virtual void PrepareForMarkCompact(bool will_compact); |
| 2062 | 1797 |
| 2063 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page); | |
| 2064 | |
| 2065 virtual void DeallocateBlock(Address start, | |
| 2066 int size_in_bytes, | |
| 2067 bool add_to_freelist); | |
| 2068 | |
| 2069 void MarkFreeListNodes() { free_list_.MarkNodes(); } | 1798 void MarkFreeListNodes() { free_list_.MarkNodes(); } |
| 2070 | 1799 |
| 2071 #ifdef DEBUG | |
| 2072 // Reports statistic info of the space | |
| 2073 void ReportStatistics(); | |
| 2074 | |
| 2075 FixedSizeFreeList* free_list() { return &free_list_; } | |
| 2076 #endif | |
| 2077 | |
| 2078 protected: | 1800 protected: |
| 2079 // Virtual function in the superclass. Slow path of AllocateRaw. | |
| 2080 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes); | |
| 2081 | |
| 2082 // Virtual function in the superclass. Allocate linearly at the start of | |
| 2083 // the page after current_page (there is assumed to be one). | |
| 2084 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes); | |
| 2085 | |
| 2086 void ResetFreeList() { | 1801 void ResetFreeList() { |
| 2087 free_list_.Reset(); | 1802 free_list_.Reset(); |
| 2088 } | 1803 } |
| 2089 | 1804 |
| 2090 private: | 1805 private: |
| 2091 // The size of objects in this space. | 1806 // The size of objects in this space. |
| 2092 int object_size_in_bytes_; | 1807 int object_size_in_bytes_; |
| 2093 | 1808 |
| 2094 // The name of this space. | 1809 // The name of this space. |
| 2095 const char* name_; | 1810 const char* name_; |
| 2096 | |
| 2097 // The space's free list. | |
| 2098 FixedSizeFreeList free_list_; | |
| 2099 }; | 1811 }; |
| 2100 | 1812 |
| 2101 | 1813 |
| 2102 // ----------------------------------------------------------------------------- | 1814 // ----------------------------------------------------------------------------- |
| 2103 // Old space for all map objects | 1815 // Old space for all map objects |
| 2104 | 1816 |
| 2105 class MapSpace : public FixedSpace { | 1817 class MapSpace : public FixedSpace { |
| 2106 public: | 1818 public: |
| 2107 // Creates a map space object with a maximum capacity. | 1819 // Creates a map space object with a maximum capacity. |
| 2108 MapSpace(intptr_t max_capacity, int max_map_space_pages, AllocationSpace id) | 1820 MapSpace(intptr_t max_capacity, int max_map_space_pages, AllocationSpace id) |
| (...skipping 178 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2287 | 1999 |
| 2288 private: | 2000 private: |
| 2289 LargePage* current_; | 2001 LargePage* current_; |
| 2290 HeapObjectCallback size_func_; | 2002 HeapObjectCallback size_func_; |
| 2291 }; | 2003 }; |
| 2292 | 2004 |
| 2293 | 2005 |
| 2294 } } // namespace v8::internal | 2006 } } // namespace v8::internal |
| 2295 | 2007 |
| 2296 #endif // V8_SPACES_H_ | 2008 #endif // V8_SPACES_H_ |
| OLD | NEW |