Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2006-2010 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. | 11 // with the distribution. |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 60 // buffer overflow) iterate intergenerational pointers without decoding heap | 60 // buffer overflow) iterate intergenerational pointers without decoding heap |
| 61 // object maps so if the page belongs to old pointer space or large object | 61 // object maps so if the page belongs to old pointer space or large object |
| 62 // space it is essential to guarantee that the page does not contain any | 62 // space it is essential to guarantee that the page does not contain any |
| 63 // garbage pointers to new space: every pointer aligned word which satisfies | 63 // garbage pointers to new space: every pointer aligned word which satisfies |
| 64 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in | 64 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in |
| 65 // new space. Thus objects in old pointer and large object spaces should have a | 65 // new space. Thus objects in old pointer and large object spaces should have a |
| 66 // special layout (e.g. no bare integer fields). This requirement does not | 66 // special layout (e.g. no bare integer fields). This requirement does not |
| 67 // apply to map space which is iterated in a special fashion. However we still | 67 // apply to map space which is iterated in a special fashion. However we still |
| 68 // require pointer fields of dead maps to be cleaned. | 68 // require pointer fields of dead maps to be cleaned. |
| 69 // | 69 // |
| 70 // To enable lazy cleaning of old space pages we use a notion of allocation | 70 // To enable lazy cleaning of old space pages we can mark chunks of the page |
| 71 // watermark. Every pointer under watermark is considered to be well formed. | 71 // as being garbage. Garbage sections are marked with a special map. These |
| 72 // Page allocation watermark is not necessarily equal to page allocation top but | 72 // sections are skipped when scanning the page, even if we are otherwise |
| 73 // all alive objects on page should reside under allocation watermark. | 73 // scanning without regard for object boundaries. Garbage sections are chained |
| 74 // During scavenge allocation watermark might be bumped and invalid pointers | 74 // together to form a free list after a GC. Garbage sections created outside |
| 75 // might appear below it. To avoid following them we store a valid watermark | 75 // of GCs by object trunctation etc. may not be in the free list chain. Very |
| 76 // into special field in the page header and set a page WATERMARK_INVALIDATED | 76 // small free spaces are ignored, they need only be cleaned of bogus pointers |
| 77 // flag. For details see comments in the Page::SetAllocationWatermark() method | 77 // into new space. |
| 78 // body. | |
| 79 // | 78 // |
| 79 // Each page may have up to one special garbage section. The start of this | |
| 80 // section is denoted by the top field in the space. The end of the section | |
| 81 // is denoted by the limit field in the space. This special garbage section | |
| 82 // is not marked with a byte array map in the data. The point of this section | |
| 83 // is to enable linear allocation without having to constantly update the byte | |
| 84 // array every time the top field is updated and a new object is created. The | |
| 85 // special garbage section is not in the chain of garbage sections. | |
| 86 // | |
| 87 // Since the top and limit fields are in the space, not the page, only one page | |
| 88 // has a special garbage section, and if the top and limit are equal then there | |
| 89 // is no special garbage section. | |
| 80 | 90 |
| 81 // Some assertion macros used in the debugging mode. | 91 // Some assertion macros used in the debugging mode. |
| 82 | 92 |
| 83 #define ASSERT_PAGE_ALIGNED(address) \ | 93 #define ASSERT_PAGE_ALIGNED(address) \ |
| 84 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) | 94 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) |
| 85 | 95 |
| 86 #define ASSERT_OBJECT_ALIGNED(address) \ | 96 #define ASSERT_OBJECT_ALIGNED(address) \ |
| 87 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0) | 97 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0) |
| 88 | 98 |
| 89 #define ASSERT_MAP_ALIGNED(address) \ | 99 #define ASSERT_MAP_ALIGNED(address) \ |
| 90 ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0) | 100 ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0) |
| 91 | 101 |
| 92 #define ASSERT_OBJECT_SIZE(size) \ | 102 #define ASSERT_OBJECT_SIZE(size) \ |
| 93 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize)) | 103 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize)) |
| 94 | 104 |
| 95 #define ASSERT_PAGE_OFFSET(offset) \ | 105 #define ASSERT_PAGE_OFFSET(offset) \ |
| 96 ASSERT((Page::kObjectStartOffset <= offset) \ | 106 ASSERT((Page::kObjectStartOffset <= offset) \ |
| 97 && (offset <= Page::kPageSize)) | 107 && (offset <= Page::kPageSize)) |
| 98 | 108 |
| 99 #define ASSERT_MAP_PAGE_INDEX(index) \ | 109 #define ASSERT_MAP_PAGE_INDEX(index) \ |
| 100 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) | 110 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) |
| 101 | 111 |
| 102 | 112 |
| 103 class PagedSpace; | 113 class PagedSpace; |
| 104 class MemoryAllocator; | 114 class MemoryAllocator; |
| 105 class AllocationInfo; | 115 class AllocationInfo; |
| 106 class Space; | 116 class Space; |
| 117 class OldSpaceFreeList; | |
| 107 | 118 |
| 108 | 119 |
| 109 // Bitmap is a sequence of cells each containing fixed number of bits. | 120 // Bitmap is a sequence of cells each containing fixed number of bits. |
| 110 template<typename StorageDescriptor> | 121 template<typename StorageDescriptor> |
| 111 class Bitmap { | 122 class Bitmap { |
| 112 public: | 123 public: |
| 113 typedef uint32_t CellType; | 124 typedef uint32_t CellType; |
| 114 static const uint32_t kBitsPerCell = 32; | 125 static const uint32_t kBitsPerCell = 32; |
| 115 static const uint32_t kBitsPerCellLog2 = 5; | 126 static const uint32_t kBitsPerCellLog2 = 5; |
| 116 static const uint32_t kBitIndexMask = kBitsPerCell - 1; | 127 static const uint32_t kBitIndexMask = kBitsPerCell - 1; |
| (...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 289 public: | 300 public: |
| 290 static MemoryChunk* FromAddress(Address a) { | 301 static MemoryChunk* FromAddress(Address a) { |
| 291 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); | 302 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); |
| 292 } | 303 } |
| 293 | 304 |
| 294 Address address() { return reinterpret_cast<Address>(this); } | 305 Address address() { return reinterpret_cast<Address>(this); } |
| 295 | 306 |
| 296 bool is_valid() { return address() != NULL; } | 307 bool is_valid() { return address() != NULL; } |
| 297 | 308 |
| 298 MemoryChunk* next_chunk() const { return next_chunk_; } | 309 MemoryChunk* next_chunk() const { return next_chunk_; } |
| 310 MemoryChunk* prev_chunk() const { return prev_chunk_; } | |
| 299 | 311 |
| 300 void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; } | 312 void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; } |
| 313 void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; } | |
| 301 | 314 |
| 302 Space* owner() const { return owner_; } | 315 Space* owner() const { return owner_; } |
| 303 | 316 |
| 304 Address body() { return address() + kBodyOffset; } | 317 Address body() { return address() + kObjectStartOffset; } |
| 305 | 318 |
| 306 int body_size() { return size() - kBodyOffset; } | 319 int body_size() { return size() - kObjectStartOffset; } |
| 307 | 320 |
| 308 enum MemoryChunkFlags { | 321 enum MemoryChunkFlags { |
| 309 IS_EXECUTABLE, | 322 IS_EXECUTABLE, |
| 323 WAS_SWEPT_CONSERVATIVELY, | |
| 310 NUM_MEMORY_CHUNK_FLAGS | 324 NUM_MEMORY_CHUNK_FLAGS |
| 311 }; | 325 }; |
| 312 | 326 |
| 313 void SetFlag(int flag) { | 327 void SetFlag(int flag) { |
| 314 flags_ |= 1 << flag; | 328 flags_ |= 1 << flag; |
| 315 } | 329 } |
| 316 | 330 |
| 317 void ClearFlag(int flag) { | 331 void ClearFlag(int flag) { |
| 318 flags_ &= ~(1 << flag); | 332 flags_ &= ~(1 << flag); |
| 319 } | 333 } |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 331 | 345 |
| 332 static const size_t kMarksBitmapLength = | 346 static const size_t kMarksBitmapLength = |
| 333 (1 << kPageSizeBits) >> (kPointerSizeLog2); | 347 (1 << kPageSizeBits) >> (kPointerSizeLog2); |
| 334 | 348 |
| 335 static const size_t kMarksBitmapSize = | 349 static const size_t kMarksBitmapSize = |
| 336 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2); | 350 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2); |
| 337 | 351 |
| 338 static const int kBodyOffset = | 352 static const int kBodyOffset = |
| 339 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + kMarksBitmapSize)); | 353 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + kMarksBitmapSize)); |
| 340 | 354 |
| 355 // The start offset of the object area in a page. Aligned to both maps and | |
| 356 // code alignment to be suitable for both. Also aligned to 32 words because | |
| 357 // the marking bitmap is arranged in 32 bit chunks. | |
| 358 static const int kObjectStartAlignment = 32 * kPointerSize; | |
|
Vyacheslav Egorov (Chromium)
2011/03/15 09:20:09
Why did not you just modify body offset?
Erik Corry
2011/03/17 13:39:17
kObjectStartOffset already existed. I just change
| |
| 359 static const int kObjectStartOffset = kBodyOffset - 1 + | |
| 360 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); | |
| 361 | |
| 341 size_t size() const { return size_; } | 362 size_t size() const { return size_; } |
| 342 | 363 |
| 343 Executability executable() { | 364 Executability executable() { |
| 344 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; | 365 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
| 345 } | 366 } |
| 346 | 367 |
| 347 // --------------------------------------------------------------------- | 368 // --------------------------------------------------------------------- |
| 348 // Markbits support | 369 // Markbits support |
| 349 | 370 |
| 350 class BitmapStorageDescriptor { | 371 class BitmapStorageDescriptor { |
| (...skipping 20 matching lines...) Expand all Loading... | |
| 371 const intptr_t offset = | 392 const intptr_t offset = |
| 372 reinterpret_cast<intptr_t>(addr) & kAlignmentMask; | 393 reinterpret_cast<intptr_t>(addr) & kAlignmentMask; |
| 373 | 394 |
| 374 return static_cast<uint32_t>(offset) >> kPointerSizeLog2; | 395 return static_cast<uint32_t>(offset) >> kPointerSizeLog2; |
| 375 } | 396 } |
| 376 | 397 |
| 377 inline Address MarkbitIndexToAddress(uint32_t index) { | 398 inline Address MarkbitIndexToAddress(uint32_t index) { |
| 378 return this->address() + (index << kPointerSizeLog2); | 399 return this->address() + (index << kPointerSizeLog2); |
| 379 } | 400 } |
| 380 | 401 |
| 402 void InsertAfter(MemoryChunk* other); | |
| 403 void Unlink(); | |
| 404 | |
| 381 protected: | 405 protected: |
| 382 MemoryChunk* next_chunk_; | 406 MemoryChunk* next_chunk_; |
| 407 MemoryChunk* prev_chunk_; | |
| 383 size_t size_; | 408 size_t size_; |
| 384 intptr_t flags_; | 409 intptr_t flags_; |
| 385 Space* owner_; | 410 Space* owner_; |
| 386 | 411 |
| 387 private: | |
| 388 static MemoryChunk* Initialize(Address base, | 412 static MemoryChunk* Initialize(Address base, |
| 389 size_t size, | 413 size_t size, |
| 390 Executability executable, | 414 Executability executable, |
| 391 Space* owner) { | 415 Space* owner) { |
| 392 MemoryChunk* chunk = FromAddress(base); | 416 MemoryChunk* chunk = FromAddress(base); |
| 393 | 417 |
| 394 ASSERT(base == chunk->address()); | 418 ASSERT(base == chunk->address()); |
| 395 | 419 |
| 396 chunk->next_chunk_ = NULL; | |
| 397 chunk->size_ = size; | 420 chunk->size_ = size; |
| 398 chunk->flags_ = 0; | 421 chunk->flags_ = 0; |
| 399 chunk->owner_ = owner; | 422 chunk->owner_ = owner; |
| 400 chunk->markbits()->Clear(); | 423 chunk->markbits()->Clear(); |
| 401 | 424 |
| 402 if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE); | 425 if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE); |
| 403 | 426 |
| 404 return chunk; | 427 return chunk; |
| 405 } | 428 } |
| 406 | 429 |
| 430 private: | |
|
Vyacheslav Egorov (Chromium)
2011/03/15 09:20:09
does not make much sense to me to have a private s
Erik Corry
2011/03/17 13:39:17
Done.
| |
| 407 friend class MemoryAllocator; | 431 friend class MemoryAllocator; |
| 408 }; | 432 }; |
| 409 | 433 |
| 410 STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); | 434 STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); |
| 411 | 435 |
| 412 // ----------------------------------------------------------------------------- | 436 // ----------------------------------------------------------------------------- |
| 413 // A page is a memory chunk of a size 1MB. Large object pages may be larger. | 437 // A page is a memory chunk of a size 1MB. Large object pages may be larger. |
| 414 // | 438 // |
| 415 // The only way to get a page pointer is by calling factory methods: | 439 // The only way to get a page pointer is by calling factory methods: |
| 416 // Page* p = Page::FromAddress(addr); or | 440 // Page* p = Page::FromAddress(addr); or |
| 417 // Page* p = Page::FromAllocationTop(top); | 441 // Page* p = Page::FromAllocationTop(top); |
| 418 class Page : public MemoryChunk { | 442 class Page : public MemoryChunk { |
| 419 public: | 443 public: |
| 420 // Returns the page containing a given address. The address ranges | 444 // Returns the page containing a given address. The address ranges |
| 421 // from [page_addr .. page_addr + kPageSize[ | 445 // from [page_addr .. page_addr + kPageSize[ |
| 422 // | 446 // |
| 423 // Note that this function only works for addresses in normal paged | 447 // Note that this function only works for addresses in normal paged |
| 424 // spaces and addresses in the first 8K of large object pages (i.e., | 448 // spaces and addresses in the first 1Mbyte of large object pages (i.e., |
| 425 // the start of large objects but not necessarily derived pointers | 449 // the start of large objects but not necessarily derived pointers |
| 426 // within them). | 450 // within them). |
| 427 INLINE(static Page* FromAddress(Address a)) { | 451 INLINE(static Page* FromAddress(Address a)) { |
| 428 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask); | 452 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask); |
| 429 } | 453 } |
| 430 | 454 |
| 431 // Returns the page containing an allocation top. Because an allocation | 455 // Returns the page containing an allocation top. Because an allocation |
| 432 // top address can be the upper bound of the page, we need to subtract | 456 // top address can be the upper bound of the page, we need to subtract |
| 433 // it with kPointerSize first. The address ranges from | 457 // it with kPointerSize first. The address ranges from |
| 434 // [page_addr + kObjectStartOffset .. page_addr + kPageSize]. | 458 // [page_addr + kObjectStartOffset .. page_addr + kPageSize]. |
| 435 INLINE(static Page* FromAllocationTop(Address top)) { | 459 INLINE(static Page* FromAllocationTop(Address top)) { |
| 436 Page* p = FromAddress(top - kPointerSize); | 460 Page* p = FromAddress(top - kPointerSize); |
| 437 ASSERT_PAGE_OFFSET(p->Offset(top)); | 461 ASSERT_PAGE_OFFSET(p->Offset(top)); |
| 438 return p; | 462 return p; |
| 439 } | 463 } |
| 440 | 464 |
| 441 // Returns the next page in the chain of pages owned by a space. | 465 // Returns the next page in the chain of pages owned by a space. |
| 442 inline Page* next_page() { | 466 inline Page* next_page(); |
| 443 ASSERT(!next_chunk()->is_valid() || next_chunk()->owner() == owner()); | 467 inline Page* prev_page(); |
| 444 return static_cast<Page*>(next_chunk()); | 468 inline void set_next_page(Page* page); |
| 445 } | 469 inline void set_prev_page(Page* page); |
| 446 | 470 |
| 447 inline void set_next_page(Page* page) { | 471 PagedSpace* owner() const { return reinterpret_cast<PagedSpace*>(owner_); } |
| 448 ASSERT(!page->is_valid() || page->owner() == owner()); | |
| 449 set_next_chunk(page); | |
| 450 } | |
| 451 | |
| 452 // Return the end of allocation in this page. Undefined for unused pages. | |
| 453 inline Address AllocationTop(); | |
| 454 | |
| 455 // Return the allocation watermark for the page. | |
| 456 // For old space pages it is guaranteed that the area under the watermark | |
| 457 // does not contain any garbage pointers to new space. | |
| 458 inline Address AllocationWatermark(); | |
| 459 | |
| 460 // Return the allocation watermark offset from the beginning of the page. | |
| 461 inline uint32_t AllocationWatermarkOffset(); | |
| 462 | |
| 463 inline void SetAllocationWatermark(Address allocation_watermark); | |
| 464 | |
| 465 inline void SetCachedAllocationWatermark(Address allocation_watermark); | |
| 466 inline Address CachedAllocationWatermark(); | |
| 467 | 472 |
| 468 // Returns the start address of the object area in this page. | 473 // Returns the start address of the object area in this page. |
| 469 Address ObjectAreaStart() { return address() + kObjectStartOffset; } | 474 Address ObjectAreaStart() { return address() + kObjectStartOffset; } |
| 470 | 475 |
| 471 // Returns the end address (exclusive) of the object area in this page. | 476 // Returns the end address (exclusive) of the object area in this page. |
| 472 Address ObjectAreaEnd() { return address() + Page::kPageSize; } | 477 Address ObjectAreaEnd() { return address() + Page::kPageSize; } |
| 473 | 478 |
| 474 // Checks whether an address is page aligned. | 479 // Checks whether an address is page aligned. |
| 475 static bool IsAlignedToPageSize(Address a) { | 480 static bool IsAlignedToPageSize(Address a) { |
| 476 return 0 == (OffsetFrom(a) & kPageAlignmentMask); | 481 return 0 == (OffsetFrom(a) & kPageAlignmentMask); |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 490 } | 495 } |
| 491 | 496 |
| 492 // --------------------------------------------------------------------- | 497 // --------------------------------------------------------------------- |
| 493 | 498 |
| 494 // Page size in bytes. This must be a multiple of the OS page size. | 499 // Page size in bytes. This must be a multiple of the OS page size. |
| 495 static const int kPageSize = 1 << kPageSizeBits; | 500 static const int kPageSize = 1 << kPageSizeBits; |
| 496 | 501 |
| 497 // Page size mask. | 502 // Page size mask. |
| 498 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; | 503 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; |
| 499 | 504 |
| 500 // The start offset of the object area in a page. Aligned to both maps and | |
| 501 // code alignment to be suitable for both. | |
| 502 static const int kObjectStartOffset = kBodyOffset; | |
| 503 | |
| 504 // Object area size in bytes. | 505 // Object area size in bytes. |
| 505 static const int kObjectAreaSize = kPageSize - kObjectStartOffset; | 506 static const int kObjectAreaSize = kPageSize - kObjectStartOffset; |
| 506 | 507 |
| 507 // Maximum object size that fits in a page. | 508 // Maximum object size that fits in a page. |
| 508 static const int kMaxHeapObjectSize = kObjectAreaSize; | 509 static const int kMaxHeapObjectSize = kObjectAreaSize; |
| 509 | 510 |
| 510 static const int kFirstUsedCell = | 511 static const int kFirstUsedCell = |
| 511 (kBodyOffset/kPointerSize) >> MarkbitsBitmap::kBitsPerCellLog2; | 512 (kObjectStartOffset/kPointerSize) >> MarkbitsBitmap::kBitsPerCellLog2; |
| 512 | 513 |
| 513 static const int kLastUsedCell = | 514 static const int kLastUsedCell = |
| 514 ((kPageSize - kPointerSize)/kPointerSize) >> | 515 ((kPageSize - kPointerSize)/kPointerSize) >> |
| 515 MarkbitsBitmap::kBitsPerCellLog2; | 516 MarkbitsBitmap::kBitsPerCellLog2; |
| 516 | 517 |
| 517 | |
| 518 enum PageFlag { | |
| 519 // Page allocation watermark was bumped by preallocation during scavenge. | |
| 520 // Correct watermark can be retrieved by CachedAllocationWatermark() method | |
| 521 WATERMARK_INVALIDATED = NUM_MEMORY_CHUNK_FLAGS, | |
| 522 | |
| 523 // We say that memory region [start_addr, end_addr[ is continuous if | |
| 524 // and only if: | |
| 525 // a) start_addr coincides with the start of a valid heap object | |
| 526 // b) for any valid heap object o in this region address | |
| 527 // o->address() + o->Size() is either equal to end_addr or coincides | |
| 528 // with the start of a valid heap object. | |
| 529 // We say that a page is continuous if and only if the region | |
| 530 // [page->ObjectAreaStart(), page->AllocationTop()[ is continuous. | |
| 531 // For non-continuous pages we say that address lb is a linearity boundary | |
| 532 // if and only if [lb, page->AllocationTop()[ is either empty or continuous. | |
| 533 IS_CONTINUOUS, | |
| 534 | |
| 535 NUM_PAGE_FLAGS // Must be last | |
| 536 }; | |
| 537 | |
| 538 // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during | |
| 539 // scavenge we just invalidate the watermark on each old space page after | |
| 540 // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED | |
| 541 // flag at the beginning of the next scavenge and each page becomes marked as | |
| 542 // having a valid watermark. | |
| 543 // | |
| 544 // The following invariant must hold for pages in old pointer and map spaces: | |
| 545 // If page is in use then page is marked as having invalid watermark at | |
| 546 // the beginning and at the end of any GC. | |
| 547 // | |
| 548 // This invariant guarantees that after flipping flag meaning at the | |
| 549 // beginning of scavenge all pages in use will be marked as having valid | |
| 550 // watermark. | |
| 551 static inline void FlipMeaningOfInvalidatedWatermarkFlag(); | |
| 552 | |
| 553 // Returns true if the page allocation watermark was not altered during | |
| 554 // scavenge. | |
| 555 inline bool IsWatermarkValid(); | |
| 556 | |
| 557 inline void InvalidateWatermark(bool value); | |
| 558 | |
| 559 inline void ClearGCFields(); | 518 inline void ClearGCFields(); |
| 560 | 519 |
| 561 static const int kAllocationWatermarkOffsetShift = NUM_PAGE_FLAGS; | 520 static inline Page* Initialize(MemoryChunk* chunk, |
| 562 static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1; | 521 Executability executable, |
| 563 static const uint32_t kAllocationWatermarkOffsetMask = | 522 PagedSpace* owner); |
| 564 ((1 << kAllocationWatermarkOffsetBits) - 1) << | |
| 565 kAllocationWatermarkOffsetShift; | |
| 566 | 523 |
| 567 static const uint32_t kFlagsMask = | 524 void InitializeAsAnchor(PagedSpace* owner); |
| 568 ((1 << kAllocationWatermarkOffsetShift) - 1); | |
| 569 | |
| 570 STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >= | |
| 571 kAllocationWatermarkOffsetBits); | |
| 572 | |
| 573 // This field contains the meaning of the WATERMARK_INVALIDATED flag. | |
| 574 // Instead of clearing this flag from all pages we just flip | |
| 575 // its meaning at the beginning of a scavenge. | |
| 576 static intptr_t watermark_invalidated_mark_; | |
| 577 | |
| 578 // See comments for IS_CONTINUOUS flag. | |
| 579 Address linearity_boundary() { return linearity_boundary_; } | |
| 580 void set_linearity_boundary(Address linearity_boundary) { | |
| 581 linearity_boundary_ = linearity_boundary; | |
| 582 } | |
| 583 | 525 |
| 584 private: | 526 private: |
| 585 static Page* Initialize(MemoryChunk* chunk) { | |
| 586 Page* page = static_cast<Page*>(chunk); | |
| 587 page->allocation_watermark_ = page->body(); | |
| 588 page->InvalidateWatermark(true); | |
| 589 page->SetFlag(IS_CONTINUOUS); | |
| 590 return page; | |
| 591 } | |
| 592 | |
| 593 Address allocation_watermark_; | |
| 594 | |
| 595 // See comments for IS_CONTINUOUS flag. | |
| 596 Address linearity_boundary_; | |
| 597 | |
| 598 friend class MemoryAllocator; | 527 friend class MemoryAllocator; |
| 599 }; | 528 }; |
| 600 | 529 |
| 530 | |
| 601 STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize); | 531 STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize); |
| 602 | 532 |
| 533 | |
| 603 class LargePage : public MemoryChunk { | 534 class LargePage : public MemoryChunk { |
| 604 public: | 535 public: |
| 605 HeapObject* GetObject() { | 536 HeapObject* GetObject() { |
| 606 return HeapObject::FromAddress(body()); | 537 return HeapObject::FromAddress(body()); |
| 607 } | 538 } |
| 608 | 539 |
| 609 inline LargePage* next_page() const { | 540 inline LargePage* next_page() const { |
| 610 return static_cast<LargePage*>(next_chunk()); | 541 return static_cast<LargePage*>(next_chunk()); |
| 611 } | 542 } |
| 612 | 543 |
| (...skipping 284 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 897 virtual ~ObjectIterator() { } | 828 virtual ~ObjectIterator() { } |
| 898 | 829 |
| 899 virtual HeapObject* next_object() = 0; | 830 virtual HeapObject* next_object() = 0; |
| 900 }; | 831 }; |
| 901 | 832 |
| 902 | 833 |
| 903 // ----------------------------------------------------------------------------- | 834 // ----------------------------------------------------------------------------- |
| 904 // Heap object iterator in new/old/map spaces. | 835 // Heap object iterator in new/old/map spaces. |
| 905 // | 836 // |
| 906 // A HeapObjectIterator iterates objects from the bottom of the given space | 837 // A HeapObjectIterator iterates objects from the bottom of the given space |
| 907 // to it's top or from the bottom of the given page to it's top. | 838 // to its top or from the bottom of the given page to its top. |
| 908 // | 839 // |
| 909 // There are some caveats. | 840 // If objects are allocated in the page during iteration the iterator may |
| 841 // or may not iterate over those objects. The caller must create a new | |
| 842 // iterator starting from the old top in order to be sure to visit these new | |
| 843 // objects. | |
|
Vyacheslav Egorov (Chromium)
2011/03/15 09:20:09
what if we allocate inside page in the hole? how s
Erik Corry
2011/03/17 13:39:17
Comment clarified.
| |
| 910 // | 844 // |
| 911 // (1) If the space top changes upward during iteration (because of | |
| 912 // allocating new objects), the iterator does not iterate objects | |
| 913 // above the original space top. The caller must create a new | |
| 914 // iterator starting from the old top in order to visit these new | |
| 915 // objects. | |
| 916 // | |
| 917 // (2) If new objects are allocated below the original allocation top | |
| 918 // (e.g., free-list allocation in paged spaces), the new objects | |
| 919 // may or may not be iterated depending on their position with | |
| 920 // respect to the current point of iteration. | |
| 921 // | |
| 922 // (3) The space top should not change downward during iteration, | |
| 923 // otherwise the iterator will return not-necessarily-valid | |
| 924 // objects. | |
| 925 | 845 |
| 926 class HeapObjectIterator: public ObjectIterator { | 846 class HeapObjectIterator: public ObjectIterator { |
| 927 public: | 847 public: |
| 928 // Creates a new object iterator in a given space. | 848 // Creates a new object iterator in a given space. |
| 929 // If the size function is not given, the iterator calls the default | 849 // If the size function is not given, the iterator calls the default |
| 930 // Object::Size(). | 850 // Object::Size(). |
| 931 explicit HeapObjectIterator(PagedSpace* space); | 851 explicit HeapObjectIterator(PagedSpace* space); |
| 932 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func); | 852 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func); |
| 933 HeapObjectIterator(Page* page, HeapObjectCallback size_func); | 853 HeapObjectIterator(Page* page, HeapObjectCallback size_func); |
| 934 | 854 |
| 935 inline HeapObject* next() { | 855 // Advance to the next object, skipping byte arrays and other fillers and |
| 936 return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage(); | 856 // skipping the special garbage section of which there is one per space. |
| 857 // Returns NULL when the iteration has ended. | |
| 858 inline HeapObject* Next() { | |
|
Vyacheslav Egorov (Chromium)
2011/03/15 09:20:09
What if bytearray is really something important wh
Erik Corry
2011/03/17 13:39:17
I have created FreeSpace, which is used where the
| |
| 859 do { | |
| 860 HeapObject* next_obj = FromCurrentPage(); | |
| 861 if (next_obj != NULL) return next_obj; | |
| 862 } while (AdvanceToNextPage()); | |
| 863 return NULL; | |
| 937 } | 864 } |
| 938 | 865 |
| 939 // implementation of ObjectIterator. | 866 virtual HeapObject* next_object() { |
| 940 virtual HeapObject* next_object() { return next(); } | 867 return Next(); |
| 868 } | |
| 941 | 869 |
| 942 private: | 870 private: |
| 943 Address cur_addr_; // current iteration point | 871 enum PageMode { kOnePageOnly, kAllPagesInSpace }; |
| 944 Address end_addr_; // end iteration point | |
| 945 Address cur_limit_; // current page limit | |
| 946 HeapObjectCallback size_func_; // size function | |
| 947 Page* end_page_; // caches the page of the end address | |
| 948 | 872 |
| 949 HeapObject* FromCurrentPage() { | 873 Address cur_addr_; // Current iteration point. |
| 950 ASSERT(cur_addr_ < cur_limit_); | 874 Address cur_end_; // End iteration point. |
| 951 HeapObject* obj = HeapObject::FromAddress(cur_addr_); | 875 HeapObjectCallback size_func_; // Size function or NULL. |
| 876 PagedSpace* space_; | |
| 877 PageMode page_mode_; | |
| 952 | 878 |
| 953 Page* p = Page::FromAddress(cur_addr_); | 879 // Fast (inlined) path of next(). |
| 954 if (p->IsFlagSet(Page::IS_CONTINUOUS)) { | 880 inline HeapObject* FromCurrentPage(); |
| 955 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj); | |
| 956 ASSERT_OBJECT_SIZE(obj_size); | |
| 957 | 881 |
| 958 cur_addr_ += obj_size; | 882 // Slow path of next(), goes into the next page. Returns false if the |
| 959 ASSERT(cur_addr_ <= cur_limit_); | 883 // iteration has ended. |
| 960 } else { | 884 bool AdvanceToNextPage(); |
| 961 AdvanceUsingMarkbits(); | |
| 962 } | |
| 963 | |
| 964 return obj; | |
| 965 } | |
| 966 | |
| 967 void AdvanceUsingMarkbits(); | |
| 968 | |
| 969 // Slow path of next, goes into the next page. | |
| 970 HeapObject* FromNextPage(); | |
| 971 | 885 |
| 972 // Initializes fields. | 886 // Initializes fields. |
| 973 void Initialize(Address start, Address end, HeapObjectCallback size_func); | 887 inline void Initialize(PagedSpace* owner, |
| 888 Address start, | |
| 889 Address end, | |
| 890 PageMode mode, | |
| 891 HeapObjectCallback size_func); | |
| 974 | 892 |
| 975 #ifdef DEBUG | 893 #ifdef DEBUG |
| 976 // Verifies whether fields have valid values. | 894 // Verifies whether fields have valid values. |
| 977 void Verify(); | 895 void Verify(); |
| 978 #endif | 896 #endif |
| 979 }; | 897 }; |
| 980 | 898 |
| 981 | 899 |
| 982 // ----------------------------------------------------------------------------- | 900 // ----------------------------------------------------------------------------- |
| 983 // A PageIterator iterates the pages in a paged space. | 901 // A PageIterator iterates the pages in a paged space. |
| 984 // | |
| 985 // The PageIterator class provides three modes for iterating pages in a space: | |
| 986 // PAGES_IN_USE iterates pages containing allocated objects. | |
| 987 // PAGES_USED_BY_MC iterates pages that hold relocated objects during a | |
| 988 // mark-compact collection. | |
| 989 // ALL_PAGES iterates all pages in the space. | |
| 990 // | |
| 991 // There are some caveats. | |
| 992 // | |
| 993 // (1) If the space expands during iteration, new pages will not be | |
| 994 // returned by the iterator in any mode. | |
| 995 // | |
| 996 // (2) If new objects are allocated during iteration, they will appear | |
| 997 // in pages returned by the iterator. Allocation may cause the | |
| 998 // allocation pointer or MC allocation pointer in the last page to | |
| 999 // change between constructing the iterator and iterating the last | |
| 1000 // page. | |
| 1001 // | |
| 1002 // (3) The space should not shrink during iteration, otherwise the | |
| 1003 // iterator will return deallocated pages. | |
| 1004 | 902 |
| 1005 class PageIterator BASE_EMBEDDED { | 903 class PageIterator BASE_EMBEDDED { |
| 1006 public: | 904 public: |
| 1007 enum Mode { | 905 explicit inline PageIterator(PagedSpace* space); |
| 1008 PAGES_IN_USE, | |
| 1009 ALL_PAGES | |
| 1010 }; | |
| 1011 | |
| 1012 PageIterator(PagedSpace* space, Mode mode); | |
| 1013 | 906 |
| 1014 inline bool has_next(); | 907 inline bool has_next(); |
| 1015 inline Page* next(); | 908 inline Page* next(); |
| 1016 | 909 |
| 1017 private: | 910 private: |
| 1018 PagedSpace* space_; | 911 PagedSpace* space_; |
| 1019 Page* prev_page_; // Previous page returned. | 912 Page* prev_page_; // Previous page returned. |
| 1020 Page* stop_page_; // Page to stop at (last page returned by the iterator). | 913 // Next page that will be returned. Cached here so that we can use this |
| 914 // iterator for operations that deallocate pages. | |
| 915 Page* next_page_; | |
| 1021 }; | 916 }; |
| 1022 | 917 |
| 1023 | 918 |
| 1024 // ----------------------------------------------------------------------------- | 919 // ----------------------------------------------------------------------------- |
| 1025 // A space has a list of pages. The next page can be accessed via | 920 // A space has a circular list of pages. The next page can be accessed via |
| 1026 // Page::next_page() call. The next page of the last page is an | 921 // Page::next_page() call. |
| 1027 // invalid page pointer. A space can expand and shrink dynamically. | |
| 1028 | 922 |
| 1029 // An abstraction of allocation and relocation pointers in a page-structured | 923 // An abstraction of allocation and relocation pointers in a page-structured |
| 1030 // space. | 924 // space. |
| 1031 class AllocationInfo { | 925 class AllocationInfo { |
| 1032 public: | 926 public: |
| 1033 Address top; // current allocation top | 927 Address top; // Current allocation top. |
| 1034 Address limit; // current allocation limit | 928 Address limit; // Current allocation limit. |
| 1035 | 929 |
| 1036 #ifdef DEBUG | 930 #ifdef DEBUG |
| 1037 bool VerifyPagedAllocation() { | 931 bool VerifyPagedAllocation() { |
| 1038 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit)) | 932 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit)) |
| 1039 && (top <= limit); | 933 && (top <= limit); |
| 1040 } | 934 } |
| 1041 #endif | 935 #endif |
| 1042 }; | 936 }; |
| 1043 | 937 |
| 1044 | 938 |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 1056 // functions increase or decrease one of the non-capacity stats in | 950 // functions increase or decrease one of the non-capacity stats in |
| 1057 // conjunction with capacity, or else they always balance increases and | 951 // conjunction with capacity, or else they always balance increases and |
| 1058 // decreases to the non-capacity stats. | 952 // decreases to the non-capacity stats. |
| 1059 class AllocationStats BASE_EMBEDDED { | 953 class AllocationStats BASE_EMBEDDED { |
| 1060 public: | 954 public: |
| 1061 AllocationStats() { Clear(); } | 955 AllocationStats() { Clear(); } |
| 1062 | 956 |
| 1063 // Zero out all the allocation statistics (ie, no capacity). | 957 // Zero out all the allocation statistics (ie, no capacity). |
| 1064 void Clear() { | 958 void Clear() { |
| 1065 capacity_ = 0; | 959 capacity_ = 0; |
| 1066 available_ = 0; | |
| 1067 size_ = 0; | 960 size_ = 0; |
| 1068 waste_ = 0; | 961 waste_ = 0; |
| 1069 } | 962 } |
| 1070 | 963 |
| 1071 // Reset the allocation statistics (ie, available = capacity with no | 964 // Reset the allocation statistics (ie, available = capacity with no |
| 1072 // wasted or allocated bytes). | 965 // wasted or allocated bytes). |
| 1073 void Reset() { | 966 void Reset() { |
| 1074 available_ = capacity_; | |
| 1075 size_ = 0; | 967 size_ = 0; |
| 1076 waste_ = 0; | 968 waste_ = 0; |
| 1077 } | 969 } |
| 1078 | 970 |
| 1079 // Accessors for the allocation statistics. | 971 // Accessors for the allocation statistics. |
| 1080 intptr_t Capacity() { return capacity_; } | 972 intptr_t Capacity() { return capacity_; } |
| 1081 intptr_t Available() { return available_; } | |
| 1082 intptr_t Size() { return size_; } | 973 intptr_t Size() { return size_; } |
| 1083 intptr_t Waste() { return waste_; } | 974 intptr_t Waste() { return waste_; } |
| 1084 | 975 |
| 1085 // Grow the space by adding available bytes. | 976 // Grow the space by adding available bytes. They are initially marked as |
| 977 // being in use (part of the size), but will normally be immediately freed, | |
| 978 // putting them on the free list and removing them from size_. | |
| 1086 void ExpandSpace(int size_in_bytes) { | 979 void ExpandSpace(int size_in_bytes) { |
| 1087 capacity_ += size_in_bytes; | 980 capacity_ += size_in_bytes; |
| 1088 available_ += size_in_bytes; | 981 size_ += size_in_bytes; |
| 1089 } | |
| 1090 | |
| 1091 // Shrink the space by removing available bytes. | |
| 1092 void ShrinkSpace(int size_in_bytes) { | |
| 1093 capacity_ -= size_in_bytes; | |
| 1094 available_ -= size_in_bytes; | |
| 1095 } | 982 } |
| 1096 | 983 |
| 1097 // Allocate from available bytes (available -> size). | 984 // Allocate from available bytes (available -> size). |
| 1098 void AllocateBytes(intptr_t size_in_bytes) { | 985 void AllocateBytes(intptr_t size_in_bytes) { |
| 1099 available_ -= size_in_bytes; | |
| 1100 size_ += size_in_bytes; | 986 size_ += size_in_bytes; |
| 1101 } | 987 } |
| 1102 | 988 |
| 1103 // Free allocated bytes, making them available (size -> available). | 989 // Free allocated bytes, making them available (size -> available). |
| 1104 void DeallocateBytes(intptr_t size_in_bytes) { | 990 void DeallocateBytes(intptr_t size_in_bytes) { |
| 1105 size_ -= size_in_bytes; | 991 size_ -= size_in_bytes; |
| 1106 available_ += size_in_bytes; | 992 ASSERT(size_ >= 0); |
| 1107 } | 993 } |
| 1108 | 994 |
| 1109 // Waste free bytes (available -> waste). | 995 // Waste free bytes (available -> waste). |
| 1110 void WasteBytes(int size_in_bytes) { | 996 void WasteBytes(int size_in_bytes) { |
| 1111 available_ -= size_in_bytes; | 997 size_ -= size_in_bytes; |
| 1112 waste_ += size_in_bytes; | 998 waste_ += size_in_bytes; |
| 1113 } | 999 } |
| 1114 | 1000 |
| 1115 // Consider the wasted bytes to be allocated, as they contain filler | |
| 1116 // objects (waste -> size). | |
| 1117 void FillWastedBytes(intptr_t size_in_bytes) { | |
| 1118 waste_ -= size_in_bytes; | |
| 1119 size_ += size_in_bytes; | |
| 1120 } | |
| 1121 | |
| 1122 private: | 1001 private: |
| 1123 intptr_t capacity_; | 1002 intptr_t capacity_; |
| 1124 intptr_t available_; | |
| 1125 intptr_t size_; | 1003 intptr_t size_; |
| 1126 intptr_t waste_; | 1004 intptr_t waste_; |
| 1127 }; | 1005 }; |
| 1128 | 1006 |
| 1129 | 1007 |
| 1008 // ----------------------------------------------------------------------------- | |
| 1009 // Free lists for old object spaces | |
| 1010 // | |
| 1011 // Free-list nodes are free blocks in the heap. They look like heap objects | |
| 1012 // (free-list node pointers have the heap object tag, and they have a map like | |
| 1013 // a heap object). They have a size and a next pointer. The next pointer is | |
| 1014 // the raw address of the next free list node (or NULL). | |
| 1015 class FreeListNode: public HeapObject { | |
| 1016 public: | |
| 1017 // Obtain a free-list node from a raw address. This is not a cast because | |
| 1018 // it does not check nor require that the first word at the address is a map | |
| 1019 // pointer. | |
| 1020 static FreeListNode* FromAddress(Address address) { | |
| 1021 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address)); | |
| 1022 } | |
| 1023 | |
| 1024 static inline bool IsFreeListNode(HeapObject* object); | |
| 1025 | |
| 1026 // Set the size in bytes, which can be read with HeapObject::Size(). This | |
| 1027 // function also writes a map to the first word of the block so that it | |
| 1028 // looks like a heap object to the garbage collector and heap iteration | |
| 1029 // functions. | |
| 1030 void set_size(int size_in_bytes); | |
| 1031 | |
| 1032 // Accessors for the next field. | |
| 1033 inline FreeListNode* next(); | |
| 1034 inline FreeListNode** next_address(); | |
| 1035 inline void set_next(FreeListNode* next); | |
| 1036 | |
| 1037 inline void Zap(); | |
| 1038 | |
| 1039 private: | |
| 1040 static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize); | |
| 1041 | |
| 1042 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode); | |
| 1043 }; | |
| 1044 | |
| 1045 | |
| 1046 static const uintptr_t kFreeListZapValue = 0xfeed1eaf; | |
|
Vyacheslav Egorov (Chromium)
2011/03/15 09:20:09
please move it to other zap values.
Erik Corry
2011/03/17 13:39:17
Done.
| |
| 1047 | |
| 1048 | |
| 1049 // The free list for the old space. The free list is organized in such a way | |
| 1050 // as to encourage objects allocated around the same time to be near each | |
| 1051 // other. The normal way to allocate is intended to be by bumping a 'top' | |
| 1052 // pointer until it hits a 'limit' pointer. When the limit is hit we need to | |
| 1053 // find a new space to allocate from. This is done with the free list, which | |
| 1054 // is divided up into rough categories to cut down on waste. Having finer | |
| 1055 // categories would scatter allocation more. | |
| 1056 | |
| 1057 // The old space free list is organized in categories. | |
| 1058 // 1-31 words: Such small free areas are discarded for efficiency reasons. | |
| 1059 // They can be reclaimed by the compactor. However the distance between top | |
| 1060 // and limit may be this small. | |
| 1061 // 32-255 words: There is a list of spaces this large. It is used for top and | |
| 1062 // limit when the object we need to allocate is 1-31 words in size. These | |
| 1063 // spaces are called small. | |
| 1064 // 256-2047 words: There is a list of spaces this large. It is used for top and | |
| 1065 // limit when the object we need to allocate is 32-255 words in size. These | |
| 1066 // spaces are called medium. | |
| 1067 // 1048-16383 words: There is a list of spaces this large. It is used for top | |
| 1068 // and limit when the object we need to allocate is 256-2047 words in size. | |
| 1069 // These spaces are call large. | |
| 1070 // At least 16384 words. This list is for objects of 2048 words or larger. | |
| 1071 // Empty pages are added to this list. These spaces are called huge. | |
| 1072 class OldSpaceFreeList BASE_EMBEDDED { | |
| 1073 public: | |
| 1074 explicit OldSpaceFreeList(PagedSpace* owner); | |
| 1075 | |
| 1076 // Clear the free list. | |
| 1077 void Reset(); | |
| 1078 | |
| 1079 // Return the number of bytes available on the free list. | |
| 1080 intptr_t available() { return available_; } | |
| 1081 | |
| 1082 // Place a node on the free list. The block of size 'size_in_bytes' | |
| 1083 // starting at 'start' is placed on the free list. The return value is the | |
| 1084 // number of bytes that have been lost due to internal fragmentation by | |
| 1085 // freeing the block. Bookkeeping information will be written to the block, | |
| 1086 // ie, its contents will be destroyed. The start address should be word | |
| 1087 // aligned, and the size should be a non-zero multiple of the word size. | |
| 1088 int Free(Address start, int size_in_bytes); | |
| 1089 | |
| 1090 // Allocate a block of size 'size_in_bytes' from the free list. The block | |
| 1091 // is unitialized. A failure is returned if no block is available. The | |
| 1092 // number of bytes lost to fragmentation is returned in the output parameter | |
| 1093 // 'wasted_bytes'. The size should be a non-zero multiple of the word size. | |
| 1094 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); | |
| 1095 | |
| 1096 void MarkNodes(); | |
| 1097 | |
| 1098 #ifdef DEBUG | |
| 1099 void Zap(); | |
| 1100 static intptr_t SumFreeList(FreeListNode* node); | |
| 1101 intptr_t SumFreeLists(); | |
| 1102 #endif | |
| 1103 | |
| 1104 private: | |
| 1105 // The size range of blocks, in bytes. | |
| 1106 static const int kMinBlockSize = 3 * kPointerSize; | |
| 1107 static const int kMaxBlockSize = Page::kMaxHeapObjectSize; | |
| 1108 | |
| 1109 // The identity of the owning space. | |
| 1110 PagedSpace* owner_; | |
| 1111 | |
| 1112 // Total available bytes in all blocks on this free list. | |
| 1113 int available_; | |
| 1114 | |
| 1115 static const int kSmallListMin = 0x20 * kPointerSize; | |
| 1116 static const int kSmallListMax = 0xff * kPointerSize; | |
| 1117 static const int kMediumListMax = 0x7ff * kPointerSize; | |
| 1118 static const int kLargeListMax = 0x3fff * kPointerSize; | |
| 1119 static const int kSmallAllocationMax = kSmallListMin - kPointerSize; | |
| 1120 static const int kMediumAllocationMax = kSmallListMax; | |
| 1121 static const int kLargeAllocationMax = kMediumListMax; | |
| 1122 FreeListNode* small_list_; | |
| 1123 FreeListNode* medium_list_; | |
| 1124 FreeListNode* large_list_; | |
| 1125 FreeListNode* huge_list_; | |
| 1126 | |
| 1127 DISALLOW_IMPLICIT_CONSTRUCTORS(OldSpaceFreeList); | |
| 1128 }; | |
| 1129 | |
| 1130 | |
| 1130 class PagedSpace : public Space { | 1131 class PagedSpace : public Space { |
| 1131 public: | 1132 public: |
| 1132 // Creates a space with a maximum capacity, and an id. | 1133 // Creates a space with a maximum capacity, and an id. |
| 1133 PagedSpace(intptr_t max_capacity, | 1134 PagedSpace(intptr_t max_capacity, |
| 1134 AllocationSpace id, | 1135 AllocationSpace id, |
| 1135 Executability executable); | 1136 Executability executable); |
| 1136 | 1137 |
| 1137 virtual ~PagedSpace() {} | 1138 virtual ~PagedSpace() {} |
| 1138 | 1139 |
| 1139 // Set up the space using the given address range of virtual memory (from | 1140 // Set up the space using the given address range of virtual memory (from |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 1153 // Checks whether an object/address is in this space. | 1154 // Checks whether an object/address is in this space. |
| 1154 inline bool Contains(Address a); | 1155 inline bool Contains(Address a); |
| 1155 bool Contains(HeapObject* o) { return Contains(o->address()); } | 1156 bool Contains(HeapObject* o) { return Contains(o->address()); } |
| 1156 | 1157 |
| 1157 // Given an address occupied by a live object, return that object if it is | 1158 // Given an address occupied by a live object, return that object if it is |
| 1158 // in this space, or Failure::Exception() if it is not. The implementation | 1159 // in this space, or Failure::Exception() if it is not. The implementation |
| 1159 // iterates over objects in the page containing the address, the cost is | 1160 // iterates over objects in the page containing the address, the cost is |
| 1160 // linear in the number of objects in the page. It may be slow. | 1161 // linear in the number of objects in the page. It may be slow. |
| 1161 MUST_USE_RESULT MaybeObject* FindObject(Address addr); | 1162 MUST_USE_RESULT MaybeObject* FindObject(Address addr); |
| 1162 | 1163 |
| 1163 // Checks whether page is currently in use by this space. | |
| 1164 bool IsUsed(Page* page); | |
| 1165 | |
| 1166 // Prepares for a mark-compact GC. | 1164 // Prepares for a mark-compact GC. |
| 1167 virtual void PrepareForMarkCompact(bool will_compact); | 1165 virtual void PrepareForMarkCompact(bool will_compact); |
| 1168 | 1166 |
| 1169 // The top of allocation in a page in this space. Undefined if page is unused. | 1167 // Current capacity without growing (Size() + Available()). |
| 1170 Address PageAllocationTop(Page* page) { | |
| 1171 return page == TopPageOf(allocation_info_) ? top() | |
| 1172 : PageAllocationLimit(page); | |
| 1173 } | |
| 1174 | |
| 1175 // The limit of allocation for a page in this space. | |
| 1176 virtual Address PageAllocationLimit(Page* page) = 0; | |
| 1177 | |
| 1178 void FlushTopPageWatermark() { | |
| 1179 AllocationTopPage()->SetCachedAllocationWatermark(top()); | |
| 1180 AllocationTopPage()->InvalidateWatermark(true); | |
| 1181 } | |
| 1182 | |
| 1183 // Current capacity without growing (Size() + Available() + Waste()). | |
| 1184 intptr_t Capacity() { return accounting_stats_.Capacity(); } | 1168 intptr_t Capacity() { return accounting_stats_.Capacity(); } |
| 1185 | 1169 |
| 1186 // Total amount of memory committed for this space. For paged | 1170 // Total amount of memory committed for this space. For paged |
| 1187 // spaces this equals the capacity. | 1171 // spaces this equals the capacity. |
| 1188 intptr_t CommittedMemory() { return Capacity(); } | 1172 intptr_t CommittedMemory() { return Capacity(); } |
| 1189 | 1173 |
| 1190 // Available bytes without growing. | 1174 // Sets the capacity, the available space and the wasted space to zero. |
| 1191 intptr_t Available() { return accounting_stats_.Available(); } | 1175 // The stats are rebuilt during sweeping by adding each page to the |
| 1176 // capacity and the size when it is encountered. As free spaces are | |
| 1177 // discovered during the sweeping they are subtracted from the size and added | |
| 1178 // to the available and wasted totals. | |
| 1179 void ClearStats() { accounting_stats_.Clear(); } | |
| 1192 | 1180 |
| 1193 // Allocated bytes in this space. | 1181 // Available bytes without growing. These are the bytes on the free list. |
| 1182 // The bytes in the linear allocation area are not included in this total | |
| 1183 // because updating the stats would slow down allocation. New pages are | |
| 1184 // immediately added to the free list so they show up here. | |
| 1185 intptr_t Available() { return free_list_.available(); } | |
| 1186 | |
| 1187 // Allocated bytes in this space. Garbage bytes that were not found due to | |
| 1188 // lazy sweeping are counted as being allocated! The bytes in the current | |
| 1189 // linear allocation area (between top and limit) are also counted here. | |
| 1194 virtual intptr_t Size() { return accounting_stats_.Size(); } | 1190 virtual intptr_t Size() { return accounting_stats_.Size(); } |
| 1195 | 1191 |
| 1196 // Wasted bytes due to fragmentation and not recoverable until the | 1192 // As size, but the bytes in the current linear allocation area are not |
| 1197 // next GC of this space. | 1193 // included. |
| 1198 intptr_t Waste() { return accounting_stats_.Waste(); } | 1194 virtual intptr_t SizeOfObjects() { return Size() - (limit() - top()); } |
| 1199 | 1195 |
| 1200 // Returns the address of the first object in this space. | 1196 // Wasted bytes in this space. These are just the bytes that were thrown away |
| 1201 Address bottom() { return first_page_->ObjectAreaStart(); } | 1197 // due to being too small to use for allocation. They do not include the |
| 1198 // free bytes that were not found at all due to lazy sweeping. | |
| 1199 virtual intptr_t Waste() { return accounting_stats_.Waste(); } | |
| 1202 | 1200 |
| 1203 // Returns the allocation pointer in this space. | 1201 // Returns the allocation pointer in this space. |
| 1204 Address top() { return allocation_info_.top; } | 1202 Address top() { return allocation_info_.top; } |
| 1203 Address limit() { return allocation_info_.limit; } | |
| 1205 | 1204 |
| 1206 // Allocate the requested number of bytes in the space if possible, return a | 1205 // Allocate the requested number of bytes in the space if possible, return a |
| 1207 // failure object if not. | 1206 // failure object if not. |
| 1208 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); | 1207 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); |
| 1209 | 1208 |
| 1210 virtual bool ReserveSpace(int bytes); | 1209 virtual bool ReserveSpace(int bytes); |
| 1211 | 1210 |
| 1212 // Used by ReserveSpace. | 1211 // Give a block of memory to the space's free list. It might be added to |
| 1213 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0; | 1212 // the free list or accounted as waste. |
| 1214 | 1213 // If add_to_freelist is false then just accounting stats are updated and |
| 1215 // Free all pages in range from prev (exclusive) to last (inclusive). | 1214 // no attempt to add area to free list is made. |
| 1216 // Freed pages are moved to the end of page list. | 1215 void Free(Address start, int size_in_bytes) { |
| 1217 void FreePages(Page* prev, Page* last); | 1216 int wasted = free_list_.Free(start, size_in_bytes); |
| 1218 | 1217 accounting_stats_.DeallocateBytes(size_in_bytes - wasted); |
| 1219 // Deallocates a block. | 1218 } |
| 1220 virtual void DeallocateBlock(Address start, | |
| 1221 int size_in_bytes, | |
| 1222 bool add_to_freelist) = 0; | |
| 1223 | 1219 |
| 1224 // Set space allocation info. | 1220 // Set space allocation info. |
| 1225 void SetTop(Address top) { | 1221 void SetTop(Address top, Address limit) { |
| 1222 ASSERT(top == limit || | |
| 1223 Page::FromAddress(top) == Page::FromAddress(limit - 1)); | |
| 1226 allocation_info_.top = top; | 1224 allocation_info_.top = top; |
| 1227 allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top)); | 1225 allocation_info_.limit = limit; |
| 1226 accounting_stats_.AllocateBytes(limit - top); | |
| 1227 } | |
| 1228 | |
| 1229 void IncreaseCapacity(int size) { | |
| 1230 accounting_stats_.ExpandSpace(size); | |
| 1228 } | 1231 } |
| 1229 | 1232 |
| 1230 // Releases half of unused pages. | 1233 // Releases half of unused pages. |
| 1231 void Shrink(); | 1234 void Shrink(); |
| 1232 | 1235 |
| 1233 // Ensures that the capacity is at least 'capacity'. Returns false on failure. | 1236 // Ensures that the capacity is at least 'capacity'. Returns false on failure. |
| 1234 bool EnsureCapacity(int capacity); | 1237 bool EnsureCapacity(int capacity); |
| 1235 | 1238 |
| 1239 // The dummy page that anchors the linked list of pages. | |
| 1240 Page *anchor() { return &anchor_; } | |
| 1241 | |
| 1236 #ifdef ENABLE_HEAP_PROTECTION | 1242 #ifdef ENABLE_HEAP_PROTECTION |
| 1237 // Protect/unprotect the space by marking it read-only/writable. | 1243 // Protect/unprotect the space by marking it read-only/writable. |
| 1238 void Protect(); | 1244 void Protect(); |
| 1239 void Unprotect(); | 1245 void Unprotect(); |
| 1240 #endif | 1246 #endif |
| 1241 | 1247 |
| 1242 #ifdef DEBUG | 1248 #ifdef DEBUG |
| 1243 // Print meta info and objects in this space. | 1249 // Print meta info and objects in this space. |
| 1244 virtual void Print(); | 1250 virtual void Print(); |
| 1245 | 1251 |
| 1246 // Verify integrity of this space. | 1252 // Verify integrity of this space. |
| 1247 virtual void Verify(ObjectVisitor* visitor); | 1253 virtual void Verify(ObjectVisitor* visitor); |
| 1248 | 1254 |
| 1255 // Reports statistics for the space | |
| 1256 void ReportStatistics(); | |
| 1257 | |
| 1249 // Overridden by subclasses to verify space-specific object | 1258 // Overridden by subclasses to verify space-specific object |
| 1250 // properties (e.g., only maps or free-list nodes are in map space). | 1259 // properties (e.g., only maps or free-list nodes are in map space). |
| 1251 virtual void VerifyObject(HeapObject* obj) {} | 1260 virtual void VerifyObject(HeapObject* obj) {} |
| 1252 | 1261 |
| 1253 // Report code object related statistics | 1262 // Report code object related statistics |
| 1254 void CollectCodeStatistics(); | 1263 void CollectCodeStatistics(); |
| 1255 static void ReportCodeStatistics(); | 1264 static void ReportCodeStatistics(); |
| 1256 static void ResetCodeStatistics(); | 1265 static void ResetCodeStatistics(); |
| 1257 #endif | 1266 #endif |
| 1258 | 1267 |
| 1259 // Returns the page of the allocation pointer. | 1268 bool was_swept_conservatively() { return was_swept_conservatively_; } |
| 1260 Page* AllocationTopPage() { return TopPageOf(allocation_info_); } | 1269 void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; } |
| 1261 | 1270 |
| 1262 protected: | 1271 protected: |
| 1263 // Maximum capacity of this space. | 1272 // Maximum capacity of this space. |
| 1264 intptr_t max_capacity_; | 1273 intptr_t max_capacity_; |
| 1265 | 1274 |
| 1266 // Accounting information for this space. | 1275 // Accounting information for this space. |
| 1267 AllocationStats accounting_stats_; | 1276 AllocationStats accounting_stats_; |
| 1268 | 1277 |
| 1269 // The first page in this space. | 1278 // The dummy page that anchors the double linked list of pages. |
| 1270 Page* first_page_; | 1279 Page anchor_; |
| 1271 | 1280 |
| 1272 // The last page in this space. Initially set in Setup, updated in | 1281 // The space's free list. |
| 1273 // Expand and Shrink. | 1282 OldSpaceFreeList free_list_; |
| 1274 Page* last_page_; | |
| 1275 | 1283 |
| 1276 // Normal allocation information. | 1284 // Normal allocation information. |
| 1277 AllocationInfo allocation_info_; | 1285 AllocationInfo allocation_info_; |
| 1278 | 1286 |
| 1279 // Bytes of each page that cannot be allocated. Possibly non-zero | 1287 // Bytes of each page that cannot be allocated. Possibly non-zero |
| 1280 // for pages in spaces with only fixed-size objects. Always zero | 1288 // for pages in spaces with only fixed-size objects. Always zero |
| 1281 // for pages in spaces with variable sized objects (those pages are | 1289 // for pages in spaces with variable sized objects (those pages are |
| 1282 // padded with free-list nodes). | 1290 // padded with free-list nodes). |
| 1283 int page_extra_; | 1291 int page_extra_; |
| 1284 | 1292 |
| 1285 // Sets allocation pointer to a page bottom. | 1293 bool was_swept_conservatively_; |
| 1286 static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p); | |
| 1287 | 1294 |
| 1288 // Returns the top page specified by an allocation info structure. | 1295 // Sets allocation pointer. If the allocation pointer already pointed to a |
| 1289 static Page* TopPageOf(AllocationInfo alloc_info) { | 1296 // non-zero-length area then that area may be returned to the free list. |
| 1290 return Page::FromAllocationTop(alloc_info.limit); | 1297 void SetAllocationInfo(Address start, Address end); |
| 1291 } | |
| 1292 | |
| 1293 int CountPagesToTop() { | |
| 1294 Page* p = Page::FromAllocationTop(allocation_info_.top); | |
| 1295 PageIterator it(this, PageIterator::ALL_PAGES); | |
| 1296 int counter = 1; | |
| 1297 while (it.has_next()) { | |
| 1298 if (it.next() == p) return counter; | |
| 1299 counter++; | |
| 1300 } | |
| 1301 UNREACHABLE(); | |
| 1302 return -1; | |
| 1303 } | |
| 1304 | 1298 |
| 1305 // Expands the space by allocating a fixed number of pages. Returns false if | 1299 // Expands the space by allocating a fixed number of pages. Returns false if |
| 1306 // it cannot allocate requested number of pages from OS. | 1300 // it cannot allocate requested number of pages from OS. |
| 1307 bool Expand(); | 1301 bool Expand(); |
| 1308 | 1302 |
| 1309 // Generic fast case allocation function that tries linear allocation in | 1303 // Generic fast case allocation function that tries linear allocation at the |
| 1310 // the top page of 'alloc_info'. Returns NULL on failure. | 1304 // address denoted by top in allocation_info_. |
| 1311 inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info, | 1305 inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info, |
| 1312 int size_in_bytes); | 1306 int size_in_bytes); |
| 1313 | 1307 |
| 1314 // During normal allocation or deserialization, roll to the next page in | |
| 1315 // the space (there is assumed to be one) and allocate there. This | |
| 1316 // function is space-dependent. | |
| 1317 virtual HeapObject* AllocateInNextPage(Page* current_page, | |
| 1318 int size_in_bytes) = 0; | |
| 1319 | |
| 1320 // Slow path of AllocateRaw. This function is space-dependent. | 1308 // Slow path of AllocateRaw. This function is space-dependent. |
| 1321 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0; | 1309 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes); |
| 1322 | 1310 |
| 1323 #ifdef DEBUG | 1311 #ifdef DEBUG |
| 1324 // Returns the number of total pages in this space. | 1312 // Returns the number of total pages in this space. |
| 1325 int CountTotalPages(); | 1313 int CountTotalPages(); |
| 1326 #endif | 1314 #endif |
| 1327 private: | 1315 private: |
| 1328 friend class PageIterator; | 1316 friend class PageIterator; |
| 1329 }; | 1317 }; |
| 1330 | 1318 |
| 1331 | 1319 |
| (...skipping 418 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1750 AllocationInfo* alloc_info); | 1738 AllocationInfo* alloc_info); |
| 1751 | 1739 |
| 1752 friend class SemiSpaceIterator; | 1740 friend class SemiSpaceIterator; |
| 1753 | 1741 |
| 1754 public: | 1742 public: |
| 1755 TRACK_MEMORY("NewSpace") | 1743 TRACK_MEMORY("NewSpace") |
| 1756 }; | 1744 }; |
| 1757 | 1745 |
| 1758 | 1746 |
| 1759 // ----------------------------------------------------------------------------- | 1747 // ----------------------------------------------------------------------------- |
| 1760 // Free lists for old object spaces | |
| 1761 // | |
| 1762 // Free-list nodes are free blocks in the heap. They look like heap objects | |
| 1763 // (free-list node pointers have the heap object tag, and they have a map like | |
| 1764 // a heap object). They have a size and a next pointer. The next pointer is | |
| 1765 // the raw address of the next free list node (or NULL). | |
| 1766 class FreeListNode: public HeapObject { | |
| 1767 public: | |
| 1768 // Obtain a free-list node from a raw address. This is not a cast because | |
| 1769 // it does not check nor require that the first word at the address is a map | |
| 1770 // pointer. | |
| 1771 static FreeListNode* FromAddress(Address address) { | |
| 1772 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address)); | |
| 1773 } | |
| 1774 | |
| 1775 static inline bool IsFreeListNode(HeapObject* object); | |
| 1776 | |
| 1777 // Set the size in bytes, which can be read with HeapObject::Size(). This | |
| 1778 // function also writes a map to the first word of the block so that it | |
| 1779 // looks like a heap object to the garbage collector and heap iteration | |
| 1780 // functions. | |
| 1781 void set_size(int size_in_bytes); | |
| 1782 | |
| 1783 // Accessors for the next field. | |
| 1784 inline Address next(); | |
| 1785 inline void set_next(Address next); | |
| 1786 | |
| 1787 inline void Zap(); | |
| 1788 | |
| 1789 private: | |
| 1790 static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize); | |
| 1791 | |
| 1792 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode); | |
| 1793 }; | |
| 1794 | |
| 1795 | |
| 1796 static const uintptr_t kFreeListZapValue = 0xfeed1eaf; | |
| 1797 | |
| 1798 | |
| 1799 // The free list for the old space. | |
| 1800 class OldSpaceFreeList BASE_EMBEDDED { | |
| 1801 public: | |
| 1802 explicit OldSpaceFreeList(AllocationSpace owner); | |
| 1803 | |
| 1804 // Clear the free list. | |
| 1805 void Reset(); | |
| 1806 | |
| 1807 // Return the number of bytes available on the free list. | |
| 1808 intptr_t available() { return available_; } | |
| 1809 | |
| 1810 // Place a node on the free list. The block of size 'size_in_bytes' | |
| 1811 // starting at 'start' is placed on the free list. The return value is the | |
| 1812 // number of bytes that have been lost due to internal fragmentation by | |
| 1813 // freeing the block. Bookkeeping information will be written to the block, | |
| 1814 // ie, its contents will be destroyed. The start address should be word | |
| 1815 // aligned, and the size should be a non-zero multiple of the word size. | |
| 1816 int Free(Address start, int size_in_bytes); | |
| 1817 | |
| 1818 // Allocate a block of size 'size_in_bytes' from the free list. The block | |
| 1819 // is unitialized. A failure is returned if no block is available. The | |
| 1820 // number of bytes lost to fragmentation is returned in the output parameter | |
| 1821 // 'wasted_bytes'. The size should be a non-zero multiple of the word size. | |
| 1822 MUST_USE_RESULT MaybeObject* Allocate(int size_in_bytes, int* wasted_bytes); | |
| 1823 | |
| 1824 void MarkNodes(); | |
| 1825 | |
| 1826 #ifdef DEBUG | |
| 1827 void Zap(); | |
| 1828 #endif | |
| 1829 | |
| 1830 private: | |
| 1831 // The size range of blocks, in bytes. (Smaller allocations are allowed, but | |
| 1832 // will always result in waste.) | |
| 1833 static const int kMinBlockSize = 2 * kPointerSize; | |
| 1834 static const int kMaxBlockSize = Page::kMaxHeapObjectSize; | |
| 1835 | |
| 1836 // The identity of the owning space, for building allocation Failure | |
| 1837 // objects. | |
| 1838 AllocationSpace owner_; | |
| 1839 | |
| 1840 // Total available bytes in all blocks on this free list. | |
| 1841 int available_; | |
| 1842 | |
| 1843 // Blocks are put on exact free lists in an array, indexed by size in words. | |
| 1844 // The available sizes are kept in an increasingly ordered list. Entries | |
| 1845 // corresponding to sizes < kMinBlockSize always have an empty free list | |
| 1846 // (but index kHead is used for the head of the size list). | |
| 1847 struct SizeNode { | |
| 1848 // Address of the head FreeListNode of the implied block size or NULL. | |
| 1849 Address head_node_; | |
| 1850 // Size (words) of the next larger available size if head_node_ != NULL. | |
| 1851 int next_size_; | |
| 1852 }; | |
| 1853 static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1; | |
| 1854 SizeNode free_[kFreeListsLength]; | |
| 1855 | |
| 1856 // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[. | |
| 1857 static const int kHead = kMinBlockSize / kPointerSize - 1; | |
| 1858 static const int kEnd = kMaxInt; | |
| 1859 | |
| 1860 // We keep a "finger" in the size list to speed up a common pattern: | |
| 1861 // repeated requests for the same or increasing sizes. | |
| 1862 int finger_; | |
| 1863 | |
| 1864 // Starting from *prev, find and return the smallest size >= index (words), | |
| 1865 // or kEnd. Update *prev to be the largest size < index, or kHead. | |
| 1866 int FindSize(int index, int* prev) { | |
| 1867 int cur = free_[*prev].next_size_; | |
| 1868 while (cur < index) { | |
| 1869 *prev = cur; | |
| 1870 cur = free_[cur].next_size_; | |
| 1871 } | |
| 1872 return cur; | |
| 1873 } | |
| 1874 | |
| 1875 // Remove an existing element from the size list. | |
| 1876 void RemoveSize(int index) { | |
| 1877 int prev = kHead; | |
| 1878 int cur = FindSize(index, &prev); | |
| 1879 ASSERT(cur == index); | |
| 1880 free_[prev].next_size_ = free_[cur].next_size_; | |
| 1881 finger_ = prev; | |
| 1882 } | |
| 1883 | |
| 1884 // Insert a new element into the size list. | |
| 1885 void InsertSize(int index) { | |
| 1886 int prev = kHead; | |
| 1887 int cur = FindSize(index, &prev); | |
| 1888 ASSERT(cur != index); | |
| 1889 free_[prev].next_size_ = index; | |
| 1890 free_[index].next_size_ = cur; | |
| 1891 } | |
| 1892 | |
| 1893 // The size list is not updated during a sequence of calls to Free, but is | |
| 1894 // rebuilt before the next allocation. | |
| 1895 void RebuildSizeList(); | |
| 1896 bool needs_rebuild_; | |
| 1897 | |
| 1898 #ifdef DEBUG | |
| 1899 // Does this free list contain a free block located at the address of 'node'? | |
| 1900 bool Contains(FreeListNode* node); | |
| 1901 #endif | |
| 1902 | |
| 1903 DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList); | |
| 1904 }; | |
| 1905 | |
| 1906 | |
| 1907 // The free list for the map space. | |
| 1908 class FixedSizeFreeList BASE_EMBEDDED { | |
| 1909 public: | |
| 1910 FixedSizeFreeList(AllocationSpace owner, int object_size); | |
| 1911 | |
| 1912 // Clear the free list. | |
| 1913 void Reset(); | |
| 1914 | |
| 1915 // Return the number of bytes available on the free list. | |
| 1916 intptr_t available() { return available_; } | |
| 1917 | |
| 1918 // Place a node on the free list. The block starting at 'start' (assumed to | |
| 1919 // have size object_size_) is placed on the free list. Bookkeeping | |
| 1920 // information will be written to the block, ie, its contents will be | |
| 1921 // destroyed. The start address should be word aligned. | |
| 1922 void Free(Address start); | |
| 1923 | |
| 1924 // Allocate a fixed sized block from the free list. The block is unitialized. | |
| 1925 // A failure is returned if no block is available. | |
| 1926 MUST_USE_RESULT MaybeObject* Allocate(); | |
| 1927 | |
| 1928 void MarkNodes(); | |
| 1929 | |
| 1930 #ifdef DEBUG | |
| 1931 void Zap(); | |
| 1932 #endif | |
| 1933 | |
| 1934 private: | |
| 1935 // Available bytes on the free list. | |
| 1936 intptr_t available_; | |
| 1937 | |
| 1938 // The head of the free list. | |
| 1939 Address head_; | |
| 1940 | |
| 1941 // The tail of the free list. | |
| 1942 Address tail_; | |
| 1943 | |
| 1944 // The identity of the owning space, for building allocation Failure | |
| 1945 // objects. | |
| 1946 AllocationSpace owner_; | |
| 1947 | |
| 1948 // The size of the objects in this space. | |
| 1949 int object_size_; | |
| 1950 | |
| 1951 DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList); | |
| 1952 }; | |
| 1953 | |
| 1954 | |
| 1955 // ----------------------------------------------------------------------------- | |
| 1956 // Old object space (excluding map objects) | 1748 // Old object space (excluding map objects) |
| 1957 | 1749 |
| 1958 class OldSpace : public PagedSpace { | 1750 class OldSpace : public PagedSpace { |
| 1959 public: | 1751 public: |
| 1960 // Creates an old space object with a given maximum capacity. | 1752 // Creates an old space object with a given maximum capacity. |
| 1961 // The constructor does not allocate pages from OS. | 1753 // The constructor does not allocate pages from OS. |
| 1962 explicit OldSpace(intptr_t max_capacity, | 1754 explicit OldSpace(intptr_t max_capacity, |
| 1963 AllocationSpace id, | 1755 AllocationSpace id, |
| 1964 Executability executable) | 1756 Executability executable) |
| 1965 : PagedSpace(max_capacity, id, executable), free_list_(id) { | 1757 : PagedSpace(max_capacity, id, executable) { |
| 1966 page_extra_ = 0; | 1758 page_extra_ = 0; |
| 1967 } | 1759 } |
| 1968 | 1760 |
| 1969 // The bytes available on the free list (ie, not above the linear allocation | |
| 1970 // pointer). | |
| 1971 intptr_t AvailableFree() { return free_list_.available(); } | |
| 1972 | |
| 1973 // The limit of allocation for a page in this space. | 1761 // The limit of allocation for a page in this space. |
| 1974 virtual Address PageAllocationLimit(Page* page) { | 1762 virtual Address PageAllocationLimit(Page* page) { |
| 1975 return page->ObjectAreaEnd(); | 1763 return page->ObjectAreaEnd(); |
| 1976 } | 1764 } |
| 1977 | 1765 |
| 1978 // Give a block of memory to the space's free list. It might be added to | |
| 1979 // the free list or accounted as waste. | |
| 1980 // If add_to_freelist is false then just accounting stats are updated and | |
| 1981 // no attempt to add area to free list is made. | |
| 1982 void Free(Address start, int size_in_bytes, bool add_to_freelist) { | |
| 1983 accounting_stats_.DeallocateBytes(size_in_bytes); | |
| 1984 | |
| 1985 if (add_to_freelist) { | |
| 1986 int wasted_bytes = free_list_.Free(start, size_in_bytes); | |
| 1987 accounting_stats_.WasteBytes(wasted_bytes); | |
| 1988 } | |
| 1989 } | |
| 1990 | |
| 1991 virtual void DeallocateBlock(Address start, | |
| 1992 int size_in_bytes, | |
| 1993 bool add_to_freelist); | |
| 1994 | |
| 1995 // Prepare for full garbage collection. Resets the relocation pointer and | 1766 // Prepare for full garbage collection. Resets the relocation pointer and |
| 1996 // clears the free list. | 1767 // clears the free list. |
| 1997 virtual void PrepareForMarkCompact(bool will_compact); | 1768 virtual void PrepareForMarkCompact(bool will_compact); |
| 1998 | 1769 |
| 1999 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page); | |
| 2000 | |
| 2001 void MarkFreeListNodes() { free_list_.MarkNodes(); } | |
| 2002 | |
| 2003 #ifdef DEBUG | |
| 2004 // Reports statistics for the space | |
| 2005 void ReportStatistics(); | |
| 2006 | |
| 2007 OldSpaceFreeList* free_list() { return &free_list_; } | |
| 2008 #endif | |
| 2009 | |
| 2010 protected: | |
| 2011 // Virtual function in the superclass. Slow path of AllocateRaw. | |
| 2012 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes); | |
| 2013 | |
| 2014 // Virtual function in the superclass. Allocate linearly at the start of | |
| 2015 // the page after current_page (there is assumed to be one). | |
| 2016 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes); | |
| 2017 | |
| 2018 private: | |
| 2019 // The space's free list. | |
| 2020 OldSpaceFreeList free_list_; | |
| 2021 | |
| 2022 public: | 1770 public: |
| 2023 TRACK_MEMORY("OldSpace") | 1771 TRACK_MEMORY("OldSpace") |
| 2024 }; | 1772 }; |
| 2025 | 1773 |
| 2026 | 1774 |
| 2027 // ----------------------------------------------------------------------------- | 1775 // ----------------------------------------------------------------------------- |
| 2028 // Old space for objects of a fixed size | 1776 // Old space for objects of a fixed size |
| 2029 | 1777 |
| 2030 class FixedSpace : public PagedSpace { | 1778 class FixedSpace : public PagedSpace { |
| 2031 public: | 1779 public: |
| 2032 FixedSpace(intptr_t max_capacity, | 1780 FixedSpace(intptr_t max_capacity, |
| 2033 AllocationSpace id, | 1781 AllocationSpace id, |
| 2034 int object_size_in_bytes, | 1782 int object_size_in_bytes, |
| 2035 const char* name) | 1783 const char* name) |
| 2036 : PagedSpace(max_capacity, id, NOT_EXECUTABLE), | 1784 : PagedSpace(max_capacity, id, NOT_EXECUTABLE), |
| 2037 object_size_in_bytes_(object_size_in_bytes), | 1785 object_size_in_bytes_(object_size_in_bytes), |
| 2038 name_(name), | 1786 name_(name) { |
| 2039 free_list_(id, object_size_in_bytes) { | |
| 2040 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes; | 1787 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes; |
| 2041 } | 1788 } |
| 2042 | 1789 |
| 2043 // The limit of allocation for a page in this space. | 1790 // The limit of allocation for a page in this space. |
| 2044 virtual Address PageAllocationLimit(Page* page) { | 1791 virtual Address PageAllocationLimit(Page* page) { |
| 2045 return page->ObjectAreaEnd() - page_extra_; | 1792 return page->ObjectAreaEnd() - page_extra_; |
| 2046 } | 1793 } |
| 2047 | 1794 |
| 2048 int object_size_in_bytes() { return object_size_in_bytes_; } | 1795 int object_size_in_bytes() { return object_size_in_bytes_; } |
| 2049 | 1796 |
| 2050 // Give a fixed sized block of memory to the space's free list. | |
| 2051 // If add_to_freelist is false then just accounting stats are updated and | |
| 2052 // no attempt to add area to free list is made. | |
| 2053 void Free(Address start, bool add_to_freelist) { | |
| 2054 if (add_to_freelist) { | |
| 2055 free_list_.Free(start); | |
| 2056 } | |
| 2057 accounting_stats_.DeallocateBytes(object_size_in_bytes_); | |
| 2058 } | |
| 2059 | |
| 2060 // Prepares for a mark-compact GC. | 1797 // Prepares for a mark-compact GC. |
| 2061 virtual void PrepareForMarkCompact(bool will_compact); | 1798 virtual void PrepareForMarkCompact(bool will_compact); |
| 2062 | 1799 |
| 2063 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page); | |
| 2064 | |
| 2065 virtual void DeallocateBlock(Address start, | |
| 2066 int size_in_bytes, | |
| 2067 bool add_to_freelist); | |
| 2068 | |
| 2069 void MarkFreeListNodes() { free_list_.MarkNodes(); } | 1800 void MarkFreeListNodes() { free_list_.MarkNodes(); } |
| 2070 | 1801 |
| 2071 #ifdef DEBUG | |
| 2072 // Reports statistic info of the space | |
| 2073 void ReportStatistics(); | |
| 2074 | |
| 2075 FixedSizeFreeList* free_list() { return &free_list_; } | |
| 2076 #endif | |
| 2077 | |
| 2078 protected: | 1802 protected: |
| 2079 // Virtual function in the superclass. Slow path of AllocateRaw. | |
| 2080 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes); | |
| 2081 | |
| 2082 // Virtual function in the superclass. Allocate linearly at the start of | |
| 2083 // the page after current_page (there is assumed to be one). | |
| 2084 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes); | |
| 2085 | |
| 2086 void ResetFreeList() { | 1803 void ResetFreeList() { |
| 2087 free_list_.Reset(); | 1804 free_list_.Reset(); |
| 2088 } | 1805 } |
| 2089 | 1806 |
| 2090 private: | 1807 private: |
| 2091 // The size of objects in this space. | 1808 // The size of objects in this space. |
| 2092 int object_size_in_bytes_; | 1809 int object_size_in_bytes_; |
| 2093 | 1810 |
| 2094 // The name of this space. | 1811 // The name of this space. |
| 2095 const char* name_; | 1812 const char* name_; |
| 2096 | |
| 2097 // The space's free list. | |
| 2098 FixedSizeFreeList free_list_; | |
| 2099 }; | 1813 }; |
| 2100 | 1814 |
| 2101 | 1815 |
| 2102 // ----------------------------------------------------------------------------- | 1816 // ----------------------------------------------------------------------------- |
| 2103 // Old space for all map objects | 1817 // Old space for all map objects |
| 2104 | 1818 |
| 2105 class MapSpace : public FixedSpace { | 1819 class MapSpace : public FixedSpace { |
| 2106 public: | 1820 public: |
| 2107 // Creates a map space object with a maximum capacity. | 1821 // Creates a map space object with a maximum capacity. |
| 2108 MapSpace(intptr_t max_capacity, int max_map_space_pages, AllocationSpace id) | 1822 MapSpace(intptr_t max_capacity, int max_map_space_pages, AllocationSpace id) |
| (...skipping 178 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2287 | 2001 |
| 2288 private: | 2002 private: |
| 2289 LargePage* current_; | 2003 LargePage* current_; |
| 2290 HeapObjectCallback size_func_; | 2004 HeapObjectCallback size_func_; |
| 2291 }; | 2005 }; |
| 2292 | 2006 |
| 2293 | 2007 |
| 2294 } } // namespace v8::internal | 2008 } } // namespace v8::internal |
| 2295 | 2009 |
| 2296 #endif // V8_SPACES_H_ | 2010 #endif // V8_SPACES_H_ |
| OLD | NEW |