| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 42 // | 42 // |
| 43 // A JS heap consists of a young generation, an old generation, and a large | 43 // A JS heap consists of a young generation, an old generation, and a large |
| 44 // object space. The young generation is divided into two semispaces. A | 44 // object space. The young generation is divided into two semispaces. A |
| 45 // scavenger implements Cheney's copying algorithm. The old generation is | 45 // scavenger implements Cheney's copying algorithm. The old generation is |
| 46 // separated into a map space and an old object space. The map space contains | 46 // separated into a map space and an old object space. The map space contains |
| 47 // all (and only) map objects, the rest of old objects go into the old space. | 47 // all (and only) map objects, the rest of old objects go into the old space. |
| 48 // The old generation is collected by a mark-sweep-compact collector. | 48 // The old generation is collected by a mark-sweep-compact collector. |
| 49 // | 49 // |
| 50 // The semispaces of the young generation are contiguous. The old and map | 50 // The semispaces of the young generation are contiguous. The old and map |
| 51 // spaces consists of a list of pages. A page has a page header and an object | 51 // spaces consists of a list of pages. A page has a page header and an object |
| 52 // area. A page size is deliberately chosen as 8K bytes. | 52 // area. |
| 53 // The first word of a page is an opaque page header that has the | |
| 54 // address of the next page and its ownership information. The second word may | |
| 55 // have the allocation top address of this page. Heap objects are aligned to the | |
| 56 // pointer size. | |
| 57 // | 53 // |
| 58 // There is a separate large object space for objects larger than | 54 // There is a separate large object space for objects larger than |
| 59 // Page::kMaxHeapObjectSize, so that they do not have to move during | 55 // Page::kMaxHeapObjectSize, so that they do not have to move during |
| 60 // collection. The large object space is paged. Pages in large object space | 56 // collection. The large object space is paged. Pages in large object space |
| 61 // may be larger than 8K. | 57 // may be larger than the page size. |
| 62 // | 58 // |
| 63 // A card marking write barrier is used to keep track of intergenerational | 59 // A store-buffer based write barrier is used to keep track of intergenerational |
| 64 // references. Old space pages are divided into regions of Page::kRegionSize | 60 // references. See store-buffer.h. |
| 65 // size. Each region has a corresponding dirty bit in the page header which is | |
| 66 // set if the region might contain pointers to new space. For details about | |
| 67 // dirty bits encoding see comments in the Page::GetRegionNumberForAddress() | |
| 68 // method body. | |
| 69 // | 61 // |
| 70 // During scavenges and mark-sweep collections we iterate intergenerational | 62 // During scavenges and mark-sweep collections we sometimes (after a store |
| 71 // pointers without decoding heap object maps so if the page belongs to old | 63 // buffer overflow) iterate intergenerational pointers without decoding heap |
| 72 // pointer space or large object space it is essential to guarantee that | 64 // object maps so if the page belongs to old pointer space or large object |
| 73 // the page does not contain any garbage pointers to new space: every pointer | 65 // space it is essential to guarantee that the page does not contain any |
| 74 // aligned word which satisfies the Heap::InNewSpace() predicate must be a | 66 // garbage pointers to new space: every pointer aligned word which satisfies |
| 75 // pointer to a live heap object in new space. Thus objects in old pointer | 67 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in |
| 76 // and large object spaces should have a special layout (e.g. no bare integer | 68 // new space. Thus objects in old pointer and large object spaces should have a |
| 77 // fields). This requirement does not apply to map space which is iterated in | 69 // special layout (e.g. no bare integer fields). This requirement does not |
| 78 // a special fashion. However we still require pointer fields of dead maps to | 70 // apply to map space which is iterated in a special fashion. However we still |
| 79 // be cleaned. | 71 // require pointer fields of dead maps to be cleaned. |
| 80 // | 72 // |
| 81 // To enable lazy cleaning of old space pages we use a notion of allocation | 73 // To enable lazy cleaning of old space pages we can mark chunks of the page |
| 82 // watermark. Every pointer under watermark is considered to be well formed. | 74 // as being garbage. Garbage sections are marked with a special map. These |
| 83 // Page allocation watermark is not necessarily equal to page allocation top but | 75 // sections are skipped when scanning the page, even if we are otherwise |
| 84 // all alive objects on page should reside under allocation watermark. | 76 // scanning without regard for object boundaries. Garbage sections are chained |
| 85 // During scavenge allocation watermark might be bumped and invalid pointers | 77 // together to form a free list after a GC. Garbage sections created outside |
| 86 // might appear below it. To avoid following them we store a valid watermark | 78 // of GCs by object trunctation etc. may not be in the free list chain. Very |
| 87 // into special field in the page header and set a page WATERMARK_INVALIDATED | 79 // small free spaces are ignored, they need only be cleaned of bogus pointers |
| 88 // flag. For details see comments in the Page::SetAllocationWatermark() method | 80 // into new space. |
| 89 // body. | |
| 90 // | 81 // |
| 82 // Each page may have up to one special garbage section. The start of this |
| 83 // section is denoted by the top field in the space. The end of the section |
| 84 // is denoted by the limit field in the space. This special garbage section |
| 85 // is not marked with a free space map in the data. The point of this section |
| 86 // is to enable linear allocation without having to constantly update the byte |
| 87 // array every time the top field is updated and a new object is created. The |
| 88 // special garbage section is not in the chain of garbage sections. |
| 89 // |
| 90 // Since the top and limit fields are in the space, not the page, only one page |
| 91 // has a special garbage section, and if the top and limit are equal then there |
| 92 // is no special garbage section. |
| 91 | 93 |
| 92 // Some assertion macros used in the debugging mode. | 94 // Some assertion macros used in the debugging mode. |
| 93 | 95 |
| 94 #define ASSERT_PAGE_ALIGNED(address) \ | 96 #define ASSERT_PAGE_ALIGNED(address) \ |
| 95 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) | 97 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) |
| 96 | 98 |
| 97 #define ASSERT_OBJECT_ALIGNED(address) \ | 99 #define ASSERT_OBJECT_ALIGNED(address) \ |
| 98 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0) | 100 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0) |
| 99 | 101 |
| 100 #define ASSERT_MAP_ALIGNED(address) \ | 102 #define ASSERT_MAP_ALIGNED(address) \ |
| 101 ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0) | 103 ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0) |
| 102 | 104 |
| 103 #define ASSERT_OBJECT_SIZE(size) \ | 105 #define ASSERT_OBJECT_SIZE(size) \ |
| 104 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize)) | 106 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize)) |
| 105 | 107 |
| 106 #define ASSERT_PAGE_OFFSET(offset) \ | 108 #define ASSERT_PAGE_OFFSET(offset) \ |
| 107 ASSERT((Page::kObjectStartOffset <= offset) \ | 109 ASSERT((Page::kObjectStartOffset <= offset) \ |
| 108 && (offset <= Page::kPageSize)) | 110 && (offset <= Page::kPageSize)) |
| 109 | 111 |
| 110 #define ASSERT_MAP_PAGE_INDEX(index) \ | 112 #define ASSERT_MAP_PAGE_INDEX(index) \ |
| 111 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) | 113 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) |
| 112 | 114 |
| 113 | 115 |
| 114 class PagedSpace; | 116 class PagedSpace; |
| 115 class MemoryAllocator; | 117 class MemoryAllocator; |
| 116 class AllocationInfo; | 118 class AllocationInfo; |
| 119 class Space; |
| 120 class FreeList; |
| 121 class MemoryChunk; |
| 122 |
| 123 class MarkBit { |
| 124 public: |
| 125 typedef uint32_t CellType; |
| 126 |
| 127 inline MarkBit(CellType* cell, CellType mask, bool data_only) |
| 128 : cell_(cell), mask_(mask), data_only_(data_only) { } |
| 129 |
| 130 inline CellType* cell() { return cell_; } |
| 131 inline CellType mask() { return mask_; } |
| 132 |
| 133 #ifdef DEBUG |
| 134 bool operator==(const MarkBit& other) { |
| 135 return cell_ == other.cell_ && mask_ == other.mask_; |
| 136 } |
| 137 #endif |
| 138 |
| 139 inline void Set() { *cell_ |= mask_; } |
| 140 inline bool Get() { return (*cell_ & mask_) != 0; } |
| 141 inline void Clear() { *cell_ &= ~mask_; } |
| 142 |
| 143 inline bool data_only() { return data_only_; } |
| 144 |
| 145 inline MarkBit Next() { |
| 146 CellType new_mask = mask_ << 1; |
| 147 if (new_mask == 0) { |
| 148 return MarkBit(cell_ + 1, 1, data_only_); |
| 149 } else { |
| 150 return MarkBit(cell_, new_mask, data_only_); |
| 151 } |
| 152 } |
| 153 |
| 154 private: |
| 155 CellType* cell_; |
| 156 CellType mask_; |
| 157 // This boolean indicates that the object is in a data-only space with no |
| 158 // pointers. This enables some optimizations when marking. |
| 159 // It is expected that this field is inlined and turned into control flow |
| 160 // at the place where the MarkBit object is created. |
| 161 bool data_only_; |
| 162 }; |
| 163 |
| 164 |
| 165 // Bitmap is a sequence of cells each containing fixed number of bits. |
| 166 class Bitmap { |
| 167 public: |
| 168 static const uint32_t kBitsPerCell = 32; |
| 169 static const uint32_t kBitsPerCellLog2 = 5; |
| 170 static const uint32_t kBitIndexMask = kBitsPerCell - 1; |
| 171 static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte; |
| 172 static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2; |
| 173 |
| 174 static const size_t kLength = |
| 175 (1 << kPageSizeBits) >> (kPointerSizeLog2); |
| 176 |
| 177 static const size_t kSize = |
| 178 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2); |
| 179 |
| 180 |
| 181 static int CellsForLength(int length) { |
| 182 return (length + kBitsPerCell - 1) >> kBitsPerCellLog2; |
| 183 } |
| 184 |
| 185 int CellsCount() { |
| 186 return CellsForLength(kLength); |
| 187 } |
| 188 |
| 189 static int SizeFor(int cells_count) { |
| 190 return sizeof(MarkBit::CellType) * cells_count; |
| 191 } |
| 192 |
| 193 INLINE(static uint32_t IndexToCell(uint32_t index)) { |
| 194 return index >> kBitsPerCellLog2; |
| 195 } |
| 196 |
| 197 INLINE(static uint32_t CellToIndex(uint32_t index)) { |
| 198 return index << kBitsPerCellLog2; |
| 199 } |
| 200 |
| 201 INLINE(static uint32_t CellAlignIndex(uint32_t index)) { |
| 202 return (index + kBitIndexMask) & ~kBitIndexMask; |
| 203 } |
| 204 |
| 205 INLINE(MarkBit::CellType* cells()) { |
| 206 return reinterpret_cast<MarkBit::CellType*>(this); |
| 207 } |
| 208 |
| 209 INLINE(Address address()) { |
| 210 return reinterpret_cast<Address>(this); |
| 211 } |
| 212 |
| 213 INLINE(static Bitmap* FromAddress(Address addr)) { |
| 214 return reinterpret_cast<Bitmap*>(addr); |
| 215 } |
| 216 |
| 217 inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) { |
| 218 MarkBit::CellType mask = 1 << (index & kBitIndexMask); |
| 219 MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2); |
| 220 return MarkBit(cell, mask, data_only); |
| 221 } |
| 222 |
| 223 static inline void Clear(MemoryChunk* chunk); |
| 224 |
| 225 static void PrintWord(uint32_t word, uint32_t himask = 0) { |
| 226 for (uint32_t mask = 1; mask != 0; mask <<= 1) { |
| 227 if ((mask & himask) != 0) PrintF("["); |
| 228 PrintF((mask & word) ? "1" : "0"); |
| 229 if ((mask & himask) != 0) PrintF("]"); |
| 230 } |
| 231 } |
| 232 |
| 233 class CellPrinter { |
| 234 public: |
| 235 CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { } |
| 236 |
| 237 void Print(uint32_t pos, uint32_t cell) { |
| 238 if (cell == seq_type) { |
| 239 seq_length++; |
| 240 return; |
| 241 } |
| 242 |
| 243 Flush(); |
| 244 |
| 245 if (IsSeq(cell)) { |
| 246 seq_start = pos; |
| 247 seq_length = 0; |
| 248 seq_type = cell; |
| 249 return; |
| 250 } |
| 251 |
| 252 PrintF("%d: ", pos); |
| 253 PrintWord(cell); |
| 254 PrintF("\n"); |
| 255 } |
| 256 |
| 257 void Flush() { |
| 258 if (seq_length > 0) { |
| 259 PrintF("%d: %dx%d\n", |
| 260 seq_start, |
| 261 seq_type == 0 ? 0 : 1, |
| 262 seq_length * kBitsPerCell); |
| 263 seq_length = 0; |
| 264 } |
| 265 } |
| 266 |
| 267 static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; } |
| 268 |
| 269 private: |
| 270 uint32_t seq_start; |
| 271 uint32_t seq_type; |
| 272 uint32_t seq_length; |
| 273 }; |
| 274 |
| 275 void Print() { |
| 276 CellPrinter printer; |
| 277 for (int i = 0; i < CellsCount(); i++) { |
| 278 printer.Print(i, cells()[i]); |
| 279 } |
| 280 printer.Flush(); |
| 281 PrintF("\n"); |
| 282 } |
| 283 |
| 284 bool IsClean() { |
| 285 for (int i = 0; i < CellsCount(); i++) { |
| 286 if (cells()[i] != 0) return false; |
| 287 } |
| 288 return true; |
| 289 } |
| 290 }; |
| 291 |
| 292 |
| 293 class SkipList; |
| 294 class SlotsBuffer; |
| 295 |
| 296 // MemoryChunk represents a memory region owned by a specific space. |
| 297 // It is divided into the header and the body. Chunk start is always |
| 298 // 1MB aligned. Start of the body is aligned so it can accomodate |
| 299 // any heap object. |
| 300 class MemoryChunk { |
| 301 public: |
| 302 // Only works if the pointer is in the first kPageSize of the MemoryChunk. |
| 303 static MemoryChunk* FromAddress(Address a) { |
| 304 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); |
| 305 } |
| 306 |
| 307 // Only works for addresses in pointer spaces, not data or code spaces. |
| 308 static inline MemoryChunk* FromAnyPointerAddress(Address addr); |
| 309 |
| 310 Address address() { return reinterpret_cast<Address>(this); } |
| 311 |
| 312 bool is_valid() { return address() != NULL; } |
| 313 |
| 314 MemoryChunk* next_chunk() const { return next_chunk_; } |
| 315 MemoryChunk* prev_chunk() const { return prev_chunk_; } |
| 316 |
| 317 void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; } |
| 318 void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; } |
| 319 |
| 320 Space* owner() const { |
| 321 if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) == |
| 322 kFailureTag) { |
| 323 return reinterpret_cast<Space*>(owner_ - kFailureTag); |
| 324 } else { |
| 325 return NULL; |
| 326 } |
| 327 } |
| 328 |
| 329 void set_owner(Space* space) { |
| 330 ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0); |
| 331 owner_ = reinterpret_cast<Address>(space) + kFailureTag; |
| 332 ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) == |
| 333 kFailureTag); |
| 334 } |
| 335 |
| 336 VirtualMemory* reserved_memory() { |
| 337 return &reservation_; |
| 338 } |
| 339 |
| 340 void InitializeReservedMemory() { |
| 341 reservation_.Reset(); |
| 342 } |
| 343 |
| 344 void set_reserved_memory(VirtualMemory* reservation) { |
| 345 ASSERT_NOT_NULL(reservation); |
| 346 reservation_.TakeControl(reservation); |
| 347 } |
| 348 |
| 349 bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); } |
| 350 void initialize_scan_on_scavenge(bool scan) { |
| 351 if (scan) { |
| 352 SetFlag(SCAN_ON_SCAVENGE); |
| 353 } else { |
| 354 ClearFlag(SCAN_ON_SCAVENGE); |
| 355 } |
| 356 } |
| 357 inline void set_scan_on_scavenge(bool scan); |
| 358 |
| 359 int store_buffer_counter() { return store_buffer_counter_; } |
| 360 void set_store_buffer_counter(int counter) { |
| 361 store_buffer_counter_ = counter; |
| 362 } |
| 363 |
| 364 Address body() { return address() + kObjectStartOffset; } |
| 365 |
| 366 Address body_limit() { return address() + size(); } |
| 367 |
| 368 int body_size() { return static_cast<int>(size() - kObjectStartOffset); } |
| 369 |
| 370 bool Contains(Address addr) { |
| 371 return addr >= body() && addr < address() + size(); |
| 372 } |
| 373 |
| 374 // Checks whether addr can be a limit of addresses in this page. |
| 375 // It's a limit if it's in the page, or if it's just after the |
| 376 // last byte of the page. |
| 377 bool ContainsLimit(Address addr) { |
| 378 return addr >= body() && addr <= address() + size(); |
| 379 } |
| 380 |
| 381 enum MemoryChunkFlags { |
| 382 IS_EXECUTABLE, |
| 383 ABOUT_TO_BE_FREED, |
| 384 POINTERS_TO_HERE_ARE_INTERESTING, |
| 385 POINTERS_FROM_HERE_ARE_INTERESTING, |
| 386 SCAN_ON_SCAVENGE, |
| 387 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. |
| 388 IN_TO_SPACE, // All pages in new space has one of these two set. |
| 389 NEW_SPACE_BELOW_AGE_MARK, |
| 390 CONTAINS_ONLY_DATA, |
| 391 EVACUATION_CANDIDATE, |
| 392 RESCAN_ON_EVACUATION, |
| 393 |
| 394 // Pages swept precisely can be iterated, hitting only the live objects. |
| 395 // Whereas those swept conservatively cannot be iterated over. Both flags |
| 396 // indicate that marking bits have been cleared by the sweeper, otherwise |
| 397 // marking bits are still intact. |
| 398 WAS_SWEPT_PRECISELY, |
| 399 WAS_SWEPT_CONSERVATIVELY, |
| 400 |
| 401 // Last flag, keep at bottom. |
| 402 NUM_MEMORY_CHUNK_FLAGS |
| 403 }; |
| 404 |
| 405 |
| 406 static const int kPointersToHereAreInterestingMask = |
| 407 1 << POINTERS_TO_HERE_ARE_INTERESTING; |
| 408 |
| 409 static const int kPointersFromHereAreInterestingMask = |
| 410 1 << POINTERS_FROM_HERE_ARE_INTERESTING; |
| 411 |
| 412 static const int kEvacuationCandidateMask = |
| 413 1 << EVACUATION_CANDIDATE; |
| 414 |
| 415 static const int kSkipEvacuationSlotsRecordingMask = |
| 416 (1 << EVACUATION_CANDIDATE) | |
| 417 (1 << RESCAN_ON_EVACUATION) | |
| 418 (1 << IN_FROM_SPACE) | |
| 419 (1 << IN_TO_SPACE); |
| 420 |
| 421 |
| 422 void SetFlag(int flag) { |
| 423 flags_ |= static_cast<uintptr_t>(1) << flag; |
| 424 } |
| 425 |
| 426 void ClearFlag(int flag) { |
| 427 flags_ &= ~(static_cast<uintptr_t>(1) << flag); |
| 428 } |
| 429 |
| 430 void SetFlagTo(int flag, bool value) { |
| 431 if (value) { |
| 432 SetFlag(flag); |
| 433 } else { |
| 434 ClearFlag(flag); |
| 435 } |
| 436 } |
| 437 |
| 438 bool IsFlagSet(int flag) { |
| 439 return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0; |
| 440 } |
| 441 |
| 442 // Set or clear multiple flags at a time. The flags in the mask |
| 443 // are set to the value in "flags", the rest retain the current value |
| 444 // in flags_. |
| 445 void SetFlags(intptr_t flags, intptr_t mask) { |
| 446 flags_ = (flags_ & ~mask) | (flags & mask); |
| 447 } |
| 448 |
| 449 // Return all current flags. |
| 450 intptr_t GetFlags() { return flags_; } |
| 451 |
| 452 // Manage live byte count (count of bytes known to be live, |
| 453 // because they are marked black). |
| 454 void ResetLiveBytes() { |
| 455 if (FLAG_trace_live_byte_count) { |
| 456 PrintF("ResetLiveBytes:%p:%x->0\n", |
| 457 static_cast<void*>(this), live_byte_count_); |
| 458 } |
| 459 live_byte_count_ = 0; |
| 460 } |
| 461 void IncrementLiveBytes(int by) { |
| 462 ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_); |
| 463 if (FLAG_trace_live_byte_count) { |
| 464 printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", |
| 465 static_cast<void*>(this), live_byte_count_, |
| 466 ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by), |
| 467 live_byte_count_ + by); |
| 468 } |
| 469 live_byte_count_ += by; |
| 470 ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_); |
| 471 } |
| 472 int LiveBytes() { |
| 473 ASSERT(static_cast<unsigned>(live_byte_count_) <= size_); |
| 474 return live_byte_count_; |
| 475 } |
| 476 static void IncrementLiveBytes(Address address, int by) { |
| 477 MemoryChunk::FromAddress(address)->IncrementLiveBytes(by); |
| 478 } |
| 479 |
| 480 static const intptr_t kAlignment = |
| 481 (static_cast<uintptr_t>(1) << kPageSizeBits); |
| 482 |
| 483 static const intptr_t kAlignmentMask = kAlignment - 1; |
| 484 |
| 485 static const intptr_t kSizeOffset = kPointerSize + kPointerSize; |
| 486 |
| 487 static const intptr_t kLiveBytesOffset = |
| 488 kSizeOffset + kPointerSize + kPointerSize + kPointerSize + |
| 489 kPointerSize + kPointerSize + kPointerSize + kIntSize; |
| 490 |
| 491 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize; |
| 492 |
| 493 static const size_t kHeaderSize = |
| 494 kSlotsBufferOffset + kPointerSize + kPointerSize; |
| 495 |
| 496 static const int kBodyOffset = |
| 497 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize)); |
| 498 |
| 499 // The start offset of the object area in a page. Aligned to both maps and |
| 500 // code alignment to be suitable for both. Also aligned to 32 words because |
| 501 // the marking bitmap is arranged in 32 bit chunks. |
| 502 static const int kObjectStartAlignment = 32 * kPointerSize; |
| 503 static const int kObjectStartOffset = kBodyOffset - 1 + |
| 504 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); |
| 505 |
| 506 size_t size() const { return size_; } |
| 507 |
| 508 Executability executable() { |
| 509 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
| 510 } |
| 511 |
| 512 bool ContainsOnlyData() { |
| 513 return IsFlagSet(CONTAINS_ONLY_DATA); |
| 514 } |
| 515 |
| 516 bool InNewSpace() { |
| 517 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0; |
| 518 } |
| 519 |
| 520 bool InToSpace() { |
| 521 return IsFlagSet(IN_TO_SPACE); |
| 522 } |
| 523 |
| 524 bool InFromSpace() { |
| 525 return IsFlagSet(IN_FROM_SPACE); |
| 526 } |
| 527 |
| 528 // --------------------------------------------------------------------- |
| 529 // Markbits support |
| 530 |
| 531 inline Bitmap* markbits() { |
| 532 return Bitmap::FromAddress(address() + kHeaderSize); |
| 533 } |
| 534 |
| 535 void PrintMarkbits() { markbits()->Print(); } |
| 536 |
| 537 inline uint32_t AddressToMarkbitIndex(Address addr) { |
| 538 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2; |
| 539 } |
| 540 |
| 541 inline static uint32_t FastAddressToMarkbitIndex(Address addr) { |
| 542 const intptr_t offset = |
| 543 reinterpret_cast<intptr_t>(addr) & kAlignmentMask; |
| 544 |
| 545 return static_cast<uint32_t>(offset) >> kPointerSizeLog2; |
| 546 } |
| 547 |
| 548 inline Address MarkbitIndexToAddress(uint32_t index) { |
| 549 return this->address() + (index << kPointerSizeLog2); |
| 550 } |
| 551 |
| 552 void InsertAfter(MemoryChunk* other); |
| 553 void Unlink(); |
| 554 |
| 555 inline Heap* heap() { return heap_; } |
| 556 |
| 557 static const int kFlagsOffset = kPointerSize * 3; |
| 558 |
| 559 bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); } |
| 560 |
| 561 bool ShouldSkipEvacuationSlotRecording() { |
| 562 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; |
| 563 } |
| 564 |
| 565 inline SkipList* skip_list() { |
| 566 return skip_list_; |
| 567 } |
| 568 |
| 569 inline void set_skip_list(SkipList* skip_list) { |
| 570 skip_list_ = skip_list; |
| 571 } |
| 572 |
| 573 inline SlotsBuffer* slots_buffer() { |
| 574 return slots_buffer_; |
| 575 } |
| 576 |
| 577 inline SlotsBuffer** slots_buffer_address() { |
| 578 return &slots_buffer_; |
| 579 } |
| 580 |
| 581 void MarkEvacuationCandidate() { |
| 582 ASSERT(slots_buffer_ == NULL); |
| 583 SetFlag(EVACUATION_CANDIDATE); |
| 584 } |
| 585 |
| 586 void ClearEvacuationCandidate() { |
| 587 ASSERT(slots_buffer_ == NULL); |
| 588 ClearFlag(EVACUATION_CANDIDATE); |
| 589 } |
| 590 |
| 591 |
| 592 protected: |
| 593 MemoryChunk* next_chunk_; |
| 594 MemoryChunk* prev_chunk_; |
| 595 size_t size_; |
| 596 intptr_t flags_; |
| 597 // If the chunk needs to remember its memory reservation, it is stored here. |
| 598 VirtualMemory reservation_; |
| 599 // The identity of the owning space. This is tagged as a failure pointer, but |
| 600 // no failure can be in an object, so this can be distinguished from any entry |
| 601 // in a fixed array. |
| 602 Address owner_; |
| 603 Heap* heap_; |
| 604 // Used by the store buffer to keep track of which pages to mark scan-on- |
| 605 // scavenge. |
| 606 int store_buffer_counter_; |
| 607 // Count of bytes marked black on page. |
| 608 int live_byte_count_; |
| 609 SlotsBuffer* slots_buffer_; |
| 610 SkipList* skip_list_; |
| 611 |
| 612 static MemoryChunk* Initialize(Heap* heap, |
| 613 Address base, |
| 614 size_t size, |
| 615 Executability executable, |
| 616 Space* owner); |
| 617 |
| 618 friend class MemoryAllocator; |
| 619 }; |
| 620 |
| 621 STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); |
| 117 | 622 |
| 118 // ----------------------------------------------------------------------------- | 623 // ----------------------------------------------------------------------------- |
| 119 // A page normally has 8K bytes. Large object pages may be larger. A page | 624 // A page is a memory chunk of a size 1MB. Large object pages may be larger. |
| 120 // address is always aligned to the 8K page size. | |
| 121 // | |
| 122 // Each page starts with a header of Page::kPageHeaderSize size which contains | |
| 123 // bookkeeping data. | |
| 124 // | |
| 125 // The mark-compact collector transforms a map pointer into a page index and a | |
| 126 // page offset. The exact encoding is described in the comments for | |
| 127 // class MapWord in objects.h. | |
| 128 // | 625 // |
| 129 // The only way to get a page pointer is by calling factory methods: | 626 // The only way to get a page pointer is by calling factory methods: |
| 130 // Page* p = Page::FromAddress(addr); or | 627 // Page* p = Page::FromAddress(addr); or |
| 131 // Page* p = Page::FromAllocationTop(top); | 628 // Page* p = Page::FromAllocationTop(top); |
| 132 class Page { | 629 class Page : public MemoryChunk { |
| 133 public: | 630 public: |
| 134 // Returns the page containing a given address. The address ranges | 631 // Returns the page containing a given address. The address ranges |
| 135 // from [page_addr .. page_addr + kPageSize[ | 632 // from [page_addr .. page_addr + kPageSize[ |
| 136 // | 633 // This only works if the object is in fact in a page. See also MemoryChunk:: |
| 137 // Note that this function only works for addresses in normal paged | 634 // FromAddress() and FromAnyAddress(). |
| 138 // spaces and addresses in the first 8K of large object pages (i.e., | |
| 139 // the start of large objects but not necessarily derived pointers | |
| 140 // within them). | |
| 141 INLINE(static Page* FromAddress(Address a)) { | 635 INLINE(static Page* FromAddress(Address a)) { |
| 142 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask); | 636 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask); |
| 143 } | 637 } |
| 144 | 638 |
| 145 // Returns the page containing an allocation top. Because an allocation | 639 // Returns the page containing an allocation top. Because an allocation |
| 146 // top address can be the upper bound of the page, we need to subtract | 640 // top address can be the upper bound of the page, we need to subtract |
| 147 // it with kPointerSize first. The address ranges from | 641 // it with kPointerSize first. The address ranges from |
| 148 // [page_addr + kObjectStartOffset .. page_addr + kPageSize]. | 642 // [page_addr + kObjectStartOffset .. page_addr + kPageSize]. |
| 149 INLINE(static Page* FromAllocationTop(Address top)) { | 643 INLINE(static Page* FromAllocationTop(Address top)) { |
| 150 Page* p = FromAddress(top - kPointerSize); | 644 Page* p = FromAddress(top - kPointerSize); |
| 151 ASSERT_PAGE_OFFSET(p->Offset(top)); | 645 ASSERT_PAGE_OFFSET(p->Offset(top)); |
| 152 return p; | 646 return p; |
| 153 } | 647 } |
| 154 | 648 |
| 155 // Returns the start address of this page. | 649 // Returns the next page in the chain of pages owned by a space. |
| 156 Address address() { return reinterpret_cast<Address>(this); } | |
| 157 | |
| 158 // Checks whether this is a valid page address. | |
| 159 bool is_valid() { return address() != NULL; } | |
| 160 | |
| 161 // Returns the next page of this page. | |
| 162 inline Page* next_page(); | 650 inline Page* next_page(); |
| 163 | 651 inline Page* prev_page(); |
| 164 // Return the end of allocation in this page. Undefined for unused pages. | 652 inline void set_next_page(Page* page); |
| 165 inline Address AllocationTop(); | 653 inline void set_prev_page(Page* page); |
| 166 | |
| 167 // Return the allocation watermark for the page. | |
| 168 // For old space pages it is guaranteed that the area under the watermark | |
| 169 // does not contain any garbage pointers to new space. | |
| 170 inline Address AllocationWatermark(); | |
| 171 | |
| 172 // Return the allocation watermark offset from the beginning of the page. | |
| 173 inline uint32_t AllocationWatermarkOffset(); | |
| 174 | |
| 175 inline void SetAllocationWatermark(Address allocation_watermark); | |
| 176 | |
| 177 inline void SetCachedAllocationWatermark(Address allocation_watermark); | |
| 178 inline Address CachedAllocationWatermark(); | |
| 179 | 654 |
| 180 // Returns the start address of the object area in this page. | 655 // Returns the start address of the object area in this page. |
| 181 Address ObjectAreaStart() { return address() + kObjectStartOffset; } | 656 Address ObjectAreaStart() { return address() + kObjectStartOffset; } |
| 182 | 657 |
| 183 // Returns the end address (exclusive) of the object area in this page. | 658 // Returns the end address (exclusive) of the object area in this page. |
| 184 Address ObjectAreaEnd() { return address() + Page::kPageSize; } | 659 Address ObjectAreaEnd() { return address() + Page::kPageSize; } |
| 185 | 660 |
| 186 // Checks whether an address is page aligned. | 661 // Checks whether an address is page aligned. |
| 187 static bool IsAlignedToPageSize(Address a) { | 662 static bool IsAlignedToPageSize(Address a) { |
| 188 return 0 == (OffsetFrom(a) & kPageAlignmentMask); | 663 return 0 == (OffsetFrom(a) & kPageAlignmentMask); |
| 189 } | 664 } |
| 190 | 665 |
| 191 // True if this page was in use before current compaction started. | |
| 192 // Result is valid only for pages owned by paged spaces and | |
| 193 // only after PagedSpace::PrepareForMarkCompact was called. | |
| 194 inline bool WasInUseBeforeMC(); | |
| 195 | |
| 196 inline void SetWasInUseBeforeMC(bool was_in_use); | |
| 197 | |
| 198 // True if this page is a large object page. | |
| 199 inline bool IsLargeObjectPage(); | |
| 200 | |
| 201 inline void SetIsLargeObjectPage(bool is_large_object_page); | |
| 202 | |
| 203 inline Executability PageExecutability(); | |
| 204 | |
| 205 inline void SetPageExecutability(Executability executable); | |
| 206 | |
| 207 // Returns the offset of a given address to this page. | 666 // Returns the offset of a given address to this page. |
| 208 INLINE(int Offset(Address a)) { | 667 INLINE(int Offset(Address a)) { |
| 209 int offset = static_cast<int>(a - address()); | 668 int offset = static_cast<int>(a - address()); |
| 210 ASSERT_PAGE_OFFSET(offset); | 669 ASSERT_PAGE_OFFSET(offset); |
| 211 return offset; | 670 return offset; |
| 212 } | 671 } |
| 213 | 672 |
| 214 // Returns the address for a given offset to the this page. | 673 // Returns the address for a given offset to the this page. |
| 215 Address OffsetToAddress(int offset) { | 674 Address OffsetToAddress(int offset) { |
| 216 ASSERT_PAGE_OFFSET(offset); | 675 ASSERT_PAGE_OFFSET(offset); |
| 217 return address() + offset; | 676 return address() + offset; |
| 218 } | 677 } |
| 219 | 678 |
| 220 // --------------------------------------------------------------------- | 679 // --------------------------------------------------------------------- |
| 221 // Card marking support | |
| 222 | |
| 223 static const uint32_t kAllRegionsCleanMarks = 0x0; | |
| 224 static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF; | |
| 225 | |
| 226 inline uint32_t GetRegionMarks(); | |
| 227 inline void SetRegionMarks(uint32_t dirty); | |
| 228 | |
| 229 inline uint32_t GetRegionMaskForAddress(Address addr); | |
| 230 inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes); | |
| 231 inline int GetRegionNumberForAddress(Address addr); | |
| 232 | |
| 233 inline void MarkRegionDirty(Address addr); | |
| 234 inline bool IsRegionDirty(Address addr); | |
| 235 | |
| 236 inline void ClearRegionMarks(Address start, | |
| 237 Address end, | |
| 238 bool reaches_limit); | |
| 239 | 680 |
| 240 // Page size in bytes. This must be a multiple of the OS page size. | 681 // Page size in bytes. This must be a multiple of the OS page size. |
| 241 static const int kPageSize = 1 << kPageSizeBits; | 682 static const int kPageSize = 1 << kPageSizeBits; |
| 242 | 683 |
| 243 // Page size mask. | 684 // Page size mask. |
| 244 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; | 685 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; |
| 245 | 686 |
| 246 static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize + | |
| 247 kIntSize + kPointerSize + kPointerSize; | |
| 248 | |
| 249 // The start offset of the object area in a page. Aligned to both maps and | |
| 250 // code alignment to be suitable for both. | |
| 251 static const int kObjectStartOffset = | |
| 252 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize)); | |
| 253 | |
| 254 // Object area size in bytes. | 687 // Object area size in bytes. |
| 255 static const int kObjectAreaSize = kPageSize - kObjectStartOffset; | 688 static const int kObjectAreaSize = kPageSize - kObjectStartOffset; |
| 256 | 689 |
| 257 // Maximum object size that fits in a page. | 690 // Maximum object size that fits in a page. |
| 258 static const int kMaxHeapObjectSize = kObjectAreaSize; | 691 static const int kMaxHeapObjectSize = kObjectAreaSize; |
| 259 | 692 |
| 260 static const int kDirtyFlagOffset = 2 * kPointerSize; | 693 static const int kFirstUsedCell = |
| 261 static const int kRegionSizeLog2 = 8; | 694 (kObjectStartOffset/kPointerSize) >> Bitmap::kBitsPerCellLog2; |
| 262 static const int kRegionSize = 1 << kRegionSizeLog2; | |
| 263 static const intptr_t kRegionAlignmentMask = (kRegionSize - 1); | |
| 264 | 695 |
| 265 STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt); | 696 static const int kLastUsedCell = |
| 266 | 697 ((kPageSize - kPointerSize)/kPointerSize) >> |
| 267 enum PageFlag { | 698 Bitmap::kBitsPerCellLog2; |
| 268 IS_NORMAL_PAGE = 0, | |
| 269 WAS_IN_USE_BEFORE_MC, | |
| 270 | |
| 271 // Page allocation watermark was bumped by preallocation during scavenge. | |
| 272 // Correct watermark can be retrieved by CachedAllocationWatermark() method | |
| 273 WATERMARK_INVALIDATED, | |
| 274 IS_EXECUTABLE, | |
| 275 NUM_PAGE_FLAGS // Must be last | |
| 276 }; | |
| 277 static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1; | |
| 278 | |
| 279 // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during | |
| 280 // scavenge we just invalidate the watermark on each old space page after | |
| 281 // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED | |
| 282 // flag at the beginning of the next scavenge and each page becomes marked as | |
| 283 // having a valid watermark. | |
| 284 // | |
| 285 // The following invariant must hold for pages in old pointer and map spaces: | |
| 286 // If page is in use then page is marked as having invalid watermark at | |
| 287 // the beginning and at the end of any GC. | |
| 288 // | |
| 289 // This invariant guarantees that after flipping flag meaning at the | |
| 290 // beginning of scavenge all pages in use will be marked as having valid | |
| 291 // watermark. | |
| 292 static inline void FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap); | |
| 293 | |
| 294 // Returns true if the page allocation watermark was not altered during | |
| 295 // scavenge. | |
| 296 inline bool IsWatermarkValid(); | |
| 297 | |
| 298 inline void InvalidateWatermark(bool value); | |
| 299 | |
| 300 inline bool GetPageFlag(PageFlag flag); | |
| 301 inline void SetPageFlag(PageFlag flag, bool value); | |
| 302 inline void ClearPageFlags(); | |
| 303 | 699 |
| 304 inline void ClearGCFields(); | 700 inline void ClearGCFields(); |
| 305 | 701 |
| 306 static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1; | 702 static inline Page* Initialize(Heap* heap, |
| 307 static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1; | 703 MemoryChunk* chunk, |
| 308 static const uint32_t kAllocationWatermarkOffsetMask = | 704 Executability executable, |
| 309 ((1 << kAllocationWatermarkOffsetBits) - 1) << | 705 PagedSpace* owner); |
| 310 kAllocationWatermarkOffsetShift; | |
| 311 | 706 |
| 312 static const uint32_t kFlagsMask = | 707 void InitializeAsAnchor(PagedSpace* owner); |
| 313 ((1 << kAllocationWatermarkOffsetShift) - 1); | |
| 314 | 708 |
| 315 STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >= | 709 bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); } |
| 316 kAllocationWatermarkOffsetBits); | 710 bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); } |
| 711 bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); } |
| 317 | 712 |
| 318 //--------------------------------------------------------------------------- | 713 void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); } |
| 319 // Page header description. | 714 void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); } |
| 320 // | |
| 321 // If a page is not in the large object space, the first word, | |
| 322 // opaque_header, encodes the next page address (aligned to kPageSize 8K) | |
| 323 // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use | |
| 324 // opaque_header. The value range of the opaque_header is [0..kPageSize[, | |
| 325 // or [next_page_start, next_page_end[. It cannot point to a valid address | |
| 326 // in the current page. If a page is in the large object space, the first | |
| 327 // word *may* (if the page start and large object chunk start are the | |
| 328 // same) contain the address of the next large object chunk. | |
| 329 intptr_t opaque_header; | |
| 330 | 715 |
| 331 // If the page is not in the large object space, the low-order bit of the | 716 void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); } |
| 332 // second word is set. If the page is in the large object space, the | 717 void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); } |
| 333 // second word *may* (if the page start and large object chunk start are | |
| 334 // the same) contain the large object chunk size. In either case, the | |
| 335 // low-order bit for large object pages will be cleared. | |
| 336 // For normal pages this word is used to store page flags and | |
| 337 // offset of allocation top. | |
| 338 intptr_t flags_; | |
| 339 | 718 |
| 340 // This field contains dirty marks for regions covering the page. Only dirty | 719 #ifdef DEBUG |
| 341 // regions might contain intergenerational references. | 720 void Print(); |
| 342 // Only 32 dirty marks are supported so for large object pages several regions | 721 #endif // DEBUG |
| 343 // might be mapped to a single dirty mark. | |
| 344 uint32_t dirty_regions_; | |
| 345 | 722 |
| 346 // The index of the page in its owner space. | 723 friend class MemoryAllocator; |
| 347 int mc_page_index; | |
| 348 | |
| 349 // During mark-compact collections this field contains the forwarding address | |
| 350 // of the first live object in this page. | |
| 351 // During scavenge collection this field is used to store allocation watermark | |
| 352 // if it is altered during scavenge. | |
| 353 Address mc_first_forwarded; | |
| 354 | |
| 355 Heap* heap_; | |
| 356 }; | 724 }; |
| 357 | 725 |
| 358 | 726 |
| 727 STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize); |
| 728 |
| 729 |
| 730 class LargePage : public MemoryChunk { |
| 731 public: |
| 732 HeapObject* GetObject() { |
| 733 return HeapObject::FromAddress(body()); |
| 734 } |
| 735 |
| 736 inline LargePage* next_page() const { |
| 737 return static_cast<LargePage*>(next_chunk()); |
| 738 } |
| 739 |
| 740 inline void set_next_page(LargePage* page) { |
| 741 set_next_chunk(page); |
| 742 } |
| 743 private: |
| 744 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk); |
| 745 |
| 746 friend class MemoryAllocator; |
| 747 }; |
| 748 |
| 749 STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize); |
| 750 |
| 359 // ---------------------------------------------------------------------------- | 751 // ---------------------------------------------------------------------------- |
| 360 // Space is the abstract superclass for all allocation spaces. | 752 // Space is the abstract superclass for all allocation spaces. |
| 361 class Space : public Malloced { | 753 class Space : public Malloced { |
| 362 public: | 754 public: |
| 363 Space(Heap* heap, AllocationSpace id, Executability executable) | 755 Space(Heap* heap, AllocationSpace id, Executability executable) |
| 364 : heap_(heap), id_(id), executable_(executable) {} | 756 : heap_(heap), id_(id), executable_(executable) {} |
| 365 | 757 |
| 366 virtual ~Space() {} | 758 virtual ~Space() {} |
| 367 | 759 |
| 368 Heap* heap() const { return heap_; } | 760 Heap* heap() const { return heap_; } |
| 369 | 761 |
| 370 // Does the space need executable memory? | 762 // Does the space need executable memory? |
| 371 Executability executable() { return executable_; } | 763 Executability executable() { return executable_; } |
| 372 | 764 |
| 373 // Identity used in error reporting. | 765 // Identity used in error reporting. |
| 374 AllocationSpace identity() { return id_; } | 766 AllocationSpace identity() { return id_; } |
| 375 | 767 |
| 376 // Returns allocated size. | 768 // Returns allocated size. |
| 377 virtual intptr_t Size() = 0; | 769 virtual intptr_t Size() = 0; |
| 378 | 770 |
| 379 // Returns size of objects. Can differ from the allocated size | 771 // Returns size of objects. Can differ from the allocated size |
| 380 // (e.g. see LargeObjectSpace). | 772 // (e.g. see LargeObjectSpace). |
| 381 virtual intptr_t SizeOfObjects() { return Size(); } | 773 virtual intptr_t SizeOfObjects() { return Size(); } |
| 382 | 774 |
| 775 virtual int RoundSizeDownToObjectAlignment(int size) { |
| 776 if (id_ == CODE_SPACE) { |
| 777 return RoundDown(size, kCodeAlignment); |
| 778 } else { |
| 779 return RoundDown(size, kPointerSize); |
| 780 } |
| 781 } |
| 782 |
| 383 #ifdef DEBUG | 783 #ifdef DEBUG |
| 384 virtual void Print() = 0; | 784 virtual void Print() = 0; |
| 385 #endif | 785 #endif |
| 386 | 786 |
| 387 // After calling this we can allocate a certain number of bytes using only | 787 // After calling this we can allocate a certain number of bytes using only |
| 388 // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope) | 788 // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope) |
| 389 // without using freelists or causing a GC. This is used by partial | 789 // without using freelists or causing a GC. This is used by partial |
| 390 // snapshots. It returns true of space was reserved or false if a GC is | 790 // snapshots. It returns true of space was reserved or false if a GC is |
| 391 // needed. For paged spaces the space requested must include the space wasted | 791 // needed. For paged spaces the space requested must include the space wasted |
| 392 // at the end of each when allocating linearly. | 792 // at the end of each when allocating linearly. |
| (...skipping 30 matching lines...) Expand all Loading... |
| 423 bool exists() { return this != NULL && code_range_ != NULL; } | 823 bool exists() { return this != NULL && code_range_ != NULL; } |
| 424 bool contains(Address address) { | 824 bool contains(Address address) { |
| 425 if (this == NULL || code_range_ == NULL) return false; | 825 if (this == NULL || code_range_ == NULL) return false; |
| 426 Address start = static_cast<Address>(code_range_->address()); | 826 Address start = static_cast<Address>(code_range_->address()); |
| 427 return start <= address && address < start + code_range_->size(); | 827 return start <= address && address < start + code_range_->size(); |
| 428 } | 828 } |
| 429 | 829 |
| 430 // Allocates a chunk of memory from the large-object portion of | 830 // Allocates a chunk of memory from the large-object portion of |
| 431 // the code range. On platforms with no separate code range, should | 831 // the code range. On platforms with no separate code range, should |
| 432 // not be called. | 832 // not be called. |
| 433 MUST_USE_RESULT void* AllocateRawMemory(const size_t requested, | 833 MUST_USE_RESULT Address AllocateRawMemory(const size_t requested, |
| 434 size_t* allocated); | 834 size_t* allocated); |
| 435 void FreeRawMemory(void* buf, size_t length); | 835 void FreeRawMemory(Address buf, size_t length); |
| 436 | 836 |
| 437 private: | 837 private: |
| 438 Isolate* isolate_; | 838 Isolate* isolate_; |
| 439 | 839 |
| 440 // The reserved range of virtual memory that all code objects are put in. | 840 // The reserved range of virtual memory that all code objects are put in. |
| 441 VirtualMemory* code_range_; | 841 VirtualMemory* code_range_; |
| 442 // Plain old data class, just a struct plus a constructor. | 842 // Plain old data class, just a struct plus a constructor. |
| 443 class FreeBlock { | 843 class FreeBlock { |
| 444 public: | 844 public: |
| 445 FreeBlock(Address start_arg, size_t size_arg) | 845 FreeBlock(Address start_arg, size_t size_arg) |
| 446 : start(start_arg), size(size_arg) {} | 846 : start(start_arg), size(size_arg) { |
| 847 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); |
| 848 ASSERT(size >= static_cast<size_t>(Page::kPageSize)); |
| 849 } |
| 447 FreeBlock(void* start_arg, size_t size_arg) | 850 FreeBlock(void* start_arg, size_t size_arg) |
| 448 : start(static_cast<Address>(start_arg)), size(size_arg) {} | 851 : start(static_cast<Address>(start_arg)), size(size_arg) { |
| 852 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); |
| 853 ASSERT(size >= static_cast<size_t>(Page::kPageSize)); |
| 854 } |
| 449 | 855 |
| 450 Address start; | 856 Address start; |
| 451 size_t size; | 857 size_t size; |
| 452 }; | 858 }; |
| 453 | 859 |
| 454 // Freed blocks of memory are added to the free list. When the allocation | 860 // Freed blocks of memory are added to the free list. When the allocation |
| 455 // list is exhausted, the free list is sorted and merged to make the new | 861 // list is exhausted, the free list is sorted and merged to make the new |
| 456 // allocation list. | 862 // allocation list. |
| 457 List<FreeBlock> free_list_; | 863 List<FreeBlock> free_list_; |
| 458 // Memory is allocated from the free blocks on the allocation list. | 864 // Memory is allocated from the free blocks on the allocation list. |
| 459 // The block at current_allocation_block_index_ is the current block. | 865 // The block at current_allocation_block_index_ is the current block. |
| 460 List<FreeBlock> allocation_list_; | 866 List<FreeBlock> allocation_list_; |
| 461 int current_allocation_block_index_; | 867 int current_allocation_block_index_; |
| 462 | 868 |
| 463 // Finds a block on the allocation list that contains at least the | 869 // Finds a block on the allocation list that contains at least the |
| 464 // requested amount of memory. If none is found, sorts and merges | 870 // requested amount of memory. If none is found, sorts and merges |
| 465 // the existing free memory blocks, and searches again. | 871 // the existing free memory blocks, and searches again. |
| 466 // If none can be found, terminates V8 with FatalProcessOutOfMemory. | 872 // If none can be found, terminates V8 with FatalProcessOutOfMemory. |
| 467 void GetNextAllocationBlock(size_t requested); | 873 void GetNextAllocationBlock(size_t requested); |
| 468 // Compares the start addresses of two free blocks. | 874 // Compares the start addresses of two free blocks. |
| 469 static int CompareFreeBlockAddress(const FreeBlock* left, | 875 static int CompareFreeBlockAddress(const FreeBlock* left, |
| 470 const FreeBlock* right); | 876 const FreeBlock* right); |
| 471 | 877 |
| 472 DISALLOW_COPY_AND_ASSIGN(CodeRange); | 878 DISALLOW_COPY_AND_ASSIGN(CodeRange); |
| 473 }; | 879 }; |
| 474 | 880 |
| 475 | 881 |
| 882 class SkipList { |
| 883 public: |
| 884 SkipList() { |
| 885 Clear(); |
| 886 } |
| 887 |
| 888 void Clear() { |
| 889 for (int idx = 0; idx < kSize; idx++) { |
| 890 starts_[idx] = reinterpret_cast<Address>(-1); |
| 891 } |
| 892 } |
| 893 |
| 894 Address StartFor(Address addr) { |
| 895 return starts_[RegionNumber(addr)]; |
| 896 } |
| 897 |
| 898 void AddObject(Address addr, int size) { |
| 899 int start_region = RegionNumber(addr); |
| 900 int end_region = RegionNumber(addr + size - kPointerSize); |
| 901 for (int idx = start_region; idx <= end_region; idx++) { |
| 902 if (starts_[idx] > addr) starts_[idx] = addr; |
| 903 } |
| 904 } |
| 905 |
| 906 static inline int RegionNumber(Address addr) { |
| 907 return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2; |
| 908 } |
| 909 |
| 910 static void Update(Address addr, int size) { |
| 911 Page* page = Page::FromAddress(addr); |
| 912 SkipList* list = page->skip_list(); |
| 913 if (list == NULL) { |
| 914 list = new SkipList(); |
| 915 page->set_skip_list(list); |
| 916 } |
| 917 |
| 918 list->AddObject(addr, size); |
| 919 } |
| 920 |
| 921 private: |
| 922 static const int kRegionSizeLog2 = 13; |
| 923 static const int kRegionSize = 1 << kRegionSizeLog2; |
| 924 static const int kSize = Page::kPageSize / kRegionSize; |
| 925 |
| 926 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0); |
| 927 |
| 928 Address starts_[kSize]; |
| 929 }; |
| 930 |
| 931 |
| 476 // ---------------------------------------------------------------------------- | 932 // ---------------------------------------------------------------------------- |
| 477 // A space acquires chunks of memory from the operating system. The memory | 933 // A space acquires chunks of memory from the operating system. The memory |
| 478 // allocator manages chunks for the paged heap spaces (old space and map | 934 // allocator allocated and deallocates pages for the paged heap spaces and large |
| 479 // space). A paged chunk consists of pages. Pages in a chunk have contiguous | 935 // pages for large object space. |
| 480 // addresses and are linked as a list. | |
| 481 // | 936 // |
| 482 // The allocator keeps an initial chunk which is used for the new space. The | 937 // Each space has to manage it's own pages. |
| 483 // leftover regions of the initial chunk are used for the initial chunks of | |
| 484 // old space and map space if they are big enough to hold at least one page. | |
| 485 // The allocator assumes that there is one old space and one map space, each | |
| 486 // expands the space by allocating kPagesPerChunk pages except the last | |
| 487 // expansion (before running out of space). The first chunk may contain fewer | |
| 488 // than kPagesPerChunk pages as well. | |
| 489 // | 938 // |
| 490 // The memory allocator also allocates chunks for the large object space, but | |
| 491 // they are managed by the space itself. The new space does not expand. | |
| 492 // | |
| 493 // The fact that pages for paged spaces are allocated and deallocated in chunks | |
| 494 // induces a constraint on the order of pages in a linked lists. We say that | |
| 495 // pages are linked in the chunk-order if and only if every two consecutive | |
| 496 // pages from the same chunk are consecutive in the linked list. | |
| 497 // | |
| 498 | |
| 499 | |
| 500 class MemoryAllocator { | 939 class MemoryAllocator { |
| 501 public: | 940 public: |
| 502 explicit MemoryAllocator(Isolate* isolate); | 941 explicit MemoryAllocator(Isolate* isolate); |
| 503 | 942 |
| 504 // Initializes its internal bookkeeping structures. | 943 // Initializes its internal bookkeeping structures. |
| 505 // Max capacity of the total space and executable memory limit. | 944 // Max capacity of the total space and executable memory limit. |
| 506 bool Setup(intptr_t max_capacity, intptr_t capacity_executable); | 945 bool Setup(intptr_t max_capacity, intptr_t capacity_executable); |
| 507 | 946 |
| 508 // Deletes valid chunks. | |
| 509 void TearDown(); | 947 void TearDown(); |
| 510 | 948 |
| 511 // Reserves an initial address range of virtual memory to be split between | 949 Page* AllocatePage(PagedSpace* owner, Executability executable); |
| 512 // the two new space semispaces, the old space, and the map space. The | |
| 513 // memory is not yet committed or assigned to spaces and split into pages. | |
| 514 // The initial chunk is unmapped when the memory allocator is torn down. | |
| 515 // This function should only be called when there is not already a reserved | |
| 516 // initial chunk (initial_chunk_ should be NULL). It returns the start | |
| 517 // address of the initial chunk if successful, with the side effect of | |
| 518 // setting the initial chunk, or else NULL if unsuccessful and leaves the | |
| 519 // initial chunk NULL. | |
| 520 void* ReserveInitialChunk(const size_t requested); | |
| 521 | 950 |
| 522 // Commits pages from an as-yet-unmanaged block of virtual memory into a | 951 LargePage* AllocateLargePage(intptr_t object_size, |
| 523 // paged space. The block should be part of the initial chunk reserved via | 952 Executability executable, |
| 524 // a call to ReserveInitialChunk. The number of pages is always returned in | 953 Space* owner); |
| 525 // the output parameter num_pages. This function assumes that the start | |
| 526 // address is non-null and that it is big enough to hold at least one | |
| 527 // page-aligned page. The call always succeeds, and num_pages is always | |
| 528 // greater than zero. | |
| 529 Page* CommitPages(Address start, size_t size, PagedSpace* owner, | |
| 530 int* num_pages); | |
| 531 | 954 |
| 532 // Commit a contiguous block of memory from the initial chunk. Assumes that | 955 void Free(MemoryChunk* chunk); |
| 533 // the address is not NULL, the size is greater than zero, and that the | |
| 534 // block is contained in the initial chunk. Returns true if it succeeded | |
| 535 // and false otherwise. | |
| 536 bool CommitBlock(Address start, size_t size, Executability executable); | |
| 537 | |
| 538 // Uncommit a contiguous block of memory [start..(start+size)[. | |
| 539 // start is not NULL, the size is greater than zero, and the | |
| 540 // block is contained in the initial chunk. Returns true if it succeeded | |
| 541 // and false otherwise. | |
| 542 bool UncommitBlock(Address start, size_t size); | |
| 543 | |
| 544 // Zaps a contiguous block of memory [start..(start+size)[ thus | |
| 545 // filling it up with a recognizable non-NULL bit pattern. | |
| 546 void ZapBlock(Address start, size_t size); | |
| 547 | |
| 548 // Attempts to allocate the requested (non-zero) number of pages from the | |
| 549 // OS. Fewer pages might be allocated than requested. If it fails to | |
| 550 // allocate memory for the OS or cannot allocate a single page, this | |
| 551 // function returns an invalid page pointer (NULL). The caller must check | |
| 552 // whether the returned page is valid (by calling Page::is_valid()). It is | |
| 553 // guaranteed that allocated pages have contiguous addresses. The actual | |
| 554 // number of allocated pages is returned in the output parameter | |
| 555 // allocated_pages. If the PagedSpace owner is executable and there is | |
| 556 // a code range, the pages are allocated from the code range. | |
| 557 Page* AllocatePages(int requested_pages, int* allocated_pages, | |
| 558 PagedSpace* owner); | |
| 559 | |
| 560 // Frees pages from a given page and after. Requires pages to be | |
| 561 // linked in chunk-order (see comment for class). | |
| 562 // If 'p' is the first page of a chunk, pages from 'p' are freed | |
| 563 // and this function returns an invalid page pointer. | |
| 564 // Otherwise, the function searches a page after 'p' that is | |
| 565 // the first page of a chunk. Pages after the found page | |
| 566 // are freed and the function returns 'p'. | |
| 567 Page* FreePages(Page* p); | |
| 568 | |
| 569 // Frees all pages owned by given space. | |
| 570 void FreeAllPages(PagedSpace* space); | |
| 571 | |
| 572 // Allocates and frees raw memory of certain size. | |
| 573 // These are just thin wrappers around OS::Allocate and OS::Free, | |
| 574 // but keep track of allocated bytes as part of heap. | |
| 575 // If the flag is EXECUTABLE and a code range exists, the requested | |
| 576 // memory is allocated from the code range. If a code range exists | |
| 577 // and the freed memory is in it, the code range manages the freed memory. | |
| 578 MUST_USE_RESULT void* AllocateRawMemory(const size_t requested, | |
| 579 size_t* allocated, | |
| 580 Executability executable); | |
| 581 void FreeRawMemory(void* buf, | |
| 582 size_t length, | |
| 583 Executability executable); | |
| 584 void PerformAllocationCallback(ObjectSpace space, | |
| 585 AllocationAction action, | |
| 586 size_t size); | |
| 587 | |
| 588 void AddMemoryAllocationCallback(MemoryAllocationCallback callback, | |
| 589 ObjectSpace space, | |
| 590 AllocationAction action); | |
| 591 void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback); | |
| 592 bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback); | |
| 593 | 956 |
| 594 // Returns the maximum available bytes of heaps. | 957 // Returns the maximum available bytes of heaps. |
| 595 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } | 958 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } |
| 596 | 959 |
| 597 // Returns allocated spaces in bytes. | 960 // Returns allocated spaces in bytes. |
| 598 intptr_t Size() { return size_; } | 961 intptr_t Size() { return size_; } |
| 599 | 962 |
| 600 // Returns the maximum available executable bytes of heaps. | 963 // Returns the maximum available executable bytes of heaps. |
| 601 intptr_t AvailableExecutable() { | 964 intptr_t AvailableExecutable() { |
| 602 if (capacity_executable_ < size_executable_) return 0; | 965 if (capacity_executable_ < size_executable_) return 0; |
| 603 return capacity_executable_ - size_executable_; | 966 return capacity_executable_ - size_executable_; |
| 604 } | 967 } |
| 605 | 968 |
| 606 // Returns allocated executable spaces in bytes. | 969 // Returns allocated executable spaces in bytes. |
| 607 intptr_t SizeExecutable() { return size_executable_; } | 970 intptr_t SizeExecutable() { return size_executable_; } |
| 608 | 971 |
| 609 // Returns maximum available bytes that the old space can have. | 972 // Returns maximum available bytes that the old space can have. |
| 610 intptr_t MaxAvailable() { | 973 intptr_t MaxAvailable() { |
| 611 return (Available() / Page::kPageSize) * Page::kObjectAreaSize; | 974 return (Available() / Page::kPageSize) * Page::kObjectAreaSize; |
| 612 } | 975 } |
| 613 | 976 |
| 614 // Links two pages. | |
| 615 inline void SetNextPage(Page* prev, Page* next); | |
| 616 | |
| 617 // Returns the next page of a given page. | |
| 618 inline Page* GetNextPage(Page* p); | |
| 619 | |
| 620 // Checks whether a page belongs to a space. | |
| 621 inline bool IsPageInSpace(Page* p, PagedSpace* space); | |
| 622 | |
| 623 // Returns the space that owns the given page. | |
| 624 inline PagedSpace* PageOwner(Page* page); | |
| 625 | |
| 626 // Finds the first/last page in the same chunk as a given page. | |
| 627 Page* FindFirstPageInSameChunk(Page* p); | |
| 628 Page* FindLastPageInSameChunk(Page* p); | |
| 629 | |
| 630 // Relinks list of pages owned by space to make it chunk-ordered. | |
| 631 // Returns new first and last pages of space. | |
| 632 // Also returns last page in relinked list which has WasInUsedBeforeMC | |
| 633 // flag set. | |
| 634 void RelinkPageListInChunkOrder(PagedSpace* space, | |
| 635 Page** first_page, | |
| 636 Page** last_page, | |
| 637 Page** last_page_in_use); | |
| 638 | |
| 639 #ifdef DEBUG | 977 #ifdef DEBUG |
| 640 // Reports statistic info of the space. | 978 // Reports statistic info of the space. |
| 641 void ReportStatistics(); | 979 void ReportStatistics(); |
| 642 #endif | 980 #endif |
| 643 | 981 |
| 644 // Due to encoding limitation, we can only have 8K chunks. | 982 MemoryChunk* AllocateChunk(intptr_t body_size, |
| 645 static const int kMaxNofChunks = 1 << kPageSizeBits; | 983 Executability executable, |
| 646 // If a chunk has at least 16 pages, the maximum heap size is about | 984 Space* space); |
| 647 // 8K * 8K * 16 = 1G bytes. | 985 |
| 648 #ifdef V8_TARGET_ARCH_X64 | 986 Address ReserveAlignedMemory(size_t requested, |
| 649 static const int kPagesPerChunk = 32; | 987 size_t alignment, |
| 650 // On 64 bit the chunk table consists of 4 levels of 4096-entry tables. | 988 VirtualMemory* controller); |
| 651 static const int kChunkTableLevels = 4; | 989 Address AllocateAlignedMemory(size_t requested, |
| 652 static const int kChunkTableBitsPerLevel = 12; | 990 size_t alignment, |
| 653 #else | 991 Executability executable, |
| 654 static const int kPagesPerChunk = 16; | 992 VirtualMemory* controller); |
| 655 // On 32 bit the chunk table consists of 2 levels of 256-entry tables. | 993 |
| 656 static const int kChunkTableLevels = 2; | 994 void FreeMemory(VirtualMemory* reservation, Executability executable); |
| 657 static const int kChunkTableBitsPerLevel = 8; | 995 void FreeMemory(Address addr, size_t size, Executability executable); |
| 658 #endif | 996 |
| 997 // Commit a contiguous block of memory from the initial chunk. Assumes that |
| 998 // the address is not NULL, the size is greater than zero, and that the |
| 999 // block is contained in the initial chunk. Returns true if it succeeded |
| 1000 // and false otherwise. |
| 1001 bool CommitBlock(Address start, size_t size, Executability executable); |
| 1002 |
| 1003 // Uncommit a contiguous block of memory [start..(start+size)[. |
| 1004 // start is not NULL, the size is greater than zero, and the |
| 1005 // block is contained in the initial chunk. Returns true if it succeeded |
| 1006 // and false otherwise. |
| 1007 bool UncommitBlock(Address start, size_t size); |
| 1008 |
| 1009 // Zaps a contiguous block of memory [start..(start+size)[ thus |
| 1010 // filling it up with a recognizable non-NULL bit pattern. |
| 1011 void ZapBlock(Address start, size_t size); |
| 1012 |
| 1013 void PerformAllocationCallback(ObjectSpace space, |
| 1014 AllocationAction action, |
| 1015 size_t size); |
| 1016 |
| 1017 void AddMemoryAllocationCallback(MemoryAllocationCallback callback, |
| 1018 ObjectSpace space, |
| 1019 AllocationAction action); |
| 1020 |
| 1021 void RemoveMemoryAllocationCallback( |
| 1022 MemoryAllocationCallback callback); |
| 1023 |
| 1024 bool MemoryAllocationCallbackRegistered( |
| 1025 MemoryAllocationCallback callback); |
| 659 | 1026 |
| 660 private: | 1027 private: |
| 661 static const int kChunkSize = kPagesPerChunk * Page::kPageSize; | |
| 662 | |
| 663 Isolate* isolate_; | 1028 Isolate* isolate_; |
| 664 | 1029 |
| 665 // Maximum space size in bytes. | 1030 // Maximum space size in bytes. |
| 666 intptr_t capacity_; | 1031 size_t capacity_; |
| 667 // Maximum subset of capacity_ that can be executable | 1032 // Maximum subset of capacity_ that can be executable |
| 668 intptr_t capacity_executable_; | 1033 size_t capacity_executable_; |
| 669 | 1034 |
| 670 // Allocated space size in bytes. | 1035 // Allocated space size in bytes. |
| 671 intptr_t size_; | 1036 size_t size_; |
| 672 | |
| 673 // Allocated executable space size in bytes. | 1037 // Allocated executable space size in bytes. |
| 674 intptr_t size_executable_; | 1038 size_t size_executable_; |
| 675 | 1039 |
| 676 struct MemoryAllocationCallbackRegistration { | 1040 struct MemoryAllocationCallbackRegistration { |
| 677 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, | 1041 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, |
| 678 ObjectSpace space, | 1042 ObjectSpace space, |
| 679 AllocationAction action) | 1043 AllocationAction action) |
| 680 : callback(callback), space(space), action(action) { | 1044 : callback(callback), space(space), action(action) { |
| 681 } | 1045 } |
| 682 MemoryAllocationCallback callback; | 1046 MemoryAllocationCallback callback; |
| 683 ObjectSpace space; | 1047 ObjectSpace space; |
| 684 AllocationAction action; | 1048 AllocationAction action; |
| 685 }; | 1049 }; |
| 1050 |
| 686 // A List of callback that are triggered when memory is allocated or free'd | 1051 // A List of callback that are triggered when memory is allocated or free'd |
| 687 List<MemoryAllocationCallbackRegistration> | 1052 List<MemoryAllocationCallbackRegistration> |
| 688 memory_allocation_callbacks_; | 1053 memory_allocation_callbacks_; |
| 689 | 1054 |
| 690 // The initial chunk of virtual memory. | |
| 691 VirtualMemory* initial_chunk_; | |
| 692 | |
| 693 // Allocated chunk info: chunk start address, chunk size, and owning space. | |
| 694 class ChunkInfo BASE_EMBEDDED { | |
| 695 public: | |
| 696 ChunkInfo() : address_(NULL), | |
| 697 size_(0), | |
| 698 owner_(NULL), | |
| 699 executable_(NOT_EXECUTABLE), | |
| 700 owner_identity_(FIRST_SPACE) {} | |
| 701 inline void init(Address a, size_t s, PagedSpace* o); | |
| 702 Address address() { return address_; } | |
| 703 size_t size() { return size_; } | |
| 704 PagedSpace* owner() { return owner_; } | |
| 705 // We save executability of the owner to allow using it | |
| 706 // when collecting stats after the owner has been destroyed. | |
| 707 Executability executable() const { return executable_; } | |
| 708 AllocationSpace owner_identity() const { return owner_identity_; } | |
| 709 | |
| 710 private: | |
| 711 Address address_; | |
| 712 size_t size_; | |
| 713 PagedSpace* owner_; | |
| 714 Executability executable_; | |
| 715 AllocationSpace owner_identity_; | |
| 716 }; | |
| 717 | |
| 718 // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids. | |
| 719 List<ChunkInfo> chunks_; | |
| 720 List<int> free_chunk_ids_; | |
| 721 int max_nof_chunks_; | |
| 722 int top_; | |
| 723 | |
| 724 // Push/pop a free chunk id onto/from the stack. | |
| 725 void Push(int free_chunk_id); | |
| 726 int Pop(); | |
| 727 bool OutOfChunkIds() { return top_ == 0; } | |
| 728 | |
| 729 // Frees a chunk. | |
| 730 void DeleteChunk(int chunk_id); | |
| 731 | |
| 732 // Basic check whether a chunk id is in the valid range. | |
| 733 inline bool IsValidChunkId(int chunk_id); | |
| 734 | |
| 735 // Checks whether a chunk id identifies an allocated chunk. | |
| 736 inline bool IsValidChunk(int chunk_id); | |
| 737 | |
| 738 // Returns the chunk id that a page belongs to. | |
| 739 inline int GetChunkId(Page* p); | |
| 740 | |
| 741 // True if the address lies in the initial chunk. | |
| 742 inline bool InInitialChunk(Address address); | |
| 743 | |
| 744 // Initializes pages in a chunk. Returns the first page address. | 1055 // Initializes pages in a chunk. Returns the first page address. |
| 745 // This function and GetChunkId() are provided for the mark-compact | 1056 // This function and GetChunkId() are provided for the mark-compact |
| 746 // collector to rebuild page headers in the from space, which is | 1057 // collector to rebuild page headers in the from space, which is |
| 747 // used as a marking stack and its page headers are destroyed. | 1058 // used as a marking stack and its page headers are destroyed. |
| 748 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, | 1059 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, |
| 749 PagedSpace* owner); | 1060 PagedSpace* owner); |
| 750 | 1061 |
| 751 Page* RelinkPagesInChunk(int chunk_id, | 1062 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); |
| 752 Address chunk_start, | |
| 753 size_t chunk_size, | |
| 754 Page* prev, | |
| 755 Page** last_page_in_use); | |
| 756 | |
| 757 DISALLOW_COPY_AND_ASSIGN(MemoryAllocator); | |
| 758 }; | 1063 }; |
| 759 | 1064 |
| 760 | 1065 |
| 761 // ----------------------------------------------------------------------------- | 1066 // ----------------------------------------------------------------------------- |
| 762 // Interface for heap object iterator to be implemented by all object space | 1067 // Interface for heap object iterator to be implemented by all object space |
| 763 // object iterators. | 1068 // object iterators. |
| 764 // | 1069 // |
| 765 // NOTE: The space specific object iterators also implements the own next() | 1070 // NOTE: The space specific object iterators also implements the own next() |
| 766 // method which is used to avoid using virtual functions | 1071 // method which is used to avoid using virtual functions |
| 767 // iterating a specific space. | 1072 // iterating a specific space. |
| 768 | 1073 |
| 769 class ObjectIterator : public Malloced { | 1074 class ObjectIterator : public Malloced { |
| 770 public: | 1075 public: |
| 771 virtual ~ObjectIterator() { } | 1076 virtual ~ObjectIterator() { } |
| 772 | 1077 |
| 773 virtual HeapObject* next_object() = 0; | 1078 virtual HeapObject* next_object() = 0; |
| 774 }; | 1079 }; |
| 775 | 1080 |
| 776 | 1081 |
| 777 // ----------------------------------------------------------------------------- | 1082 // ----------------------------------------------------------------------------- |
| 778 // Heap object iterator in new/old/map spaces. | 1083 // Heap object iterator in new/old/map spaces. |
| 779 // | 1084 // |
| 780 // A HeapObjectIterator iterates objects from a given address to the | 1085 // A HeapObjectIterator iterates objects from the bottom of the given space |
| 781 // top of a space. The given address must be below the current | 1086 // to its top or from the bottom of the given page to its top. |
| 782 // allocation pointer (space top). There are some caveats. | |
| 783 // | 1087 // |
| 784 // (1) If the space top changes upward during iteration (because of | 1088 // If objects are allocated in the page during iteration the iterator may |
| 785 // allocating new objects), the iterator does not iterate objects | 1089 // or may not iterate over those objects. The caller must create a new |
| 786 // above the original space top. The caller must create a new | 1090 // iterator in order to be sure to visit these new objects. |
| 787 // iterator starting from the old top in order to visit these new | |
| 788 // objects. | |
| 789 // | |
| 790 // (2) If new objects are allocated below the original allocation top | |
| 791 // (e.g., free-list allocation in paged spaces), the new objects | |
| 792 // may or may not be iterated depending on their position with | |
| 793 // respect to the current point of iteration. | |
| 794 // | |
| 795 // (3) The space top should not change downward during iteration, | |
| 796 // otherwise the iterator will return not-necessarily-valid | |
| 797 // objects. | |
| 798 | |
| 799 class HeapObjectIterator: public ObjectIterator { | 1091 class HeapObjectIterator: public ObjectIterator { |
| 800 public: | 1092 public: |
| 801 // Creates a new object iterator in a given space. If a start | 1093 // Creates a new object iterator in a given space. |
| 802 // address is not given, the iterator starts from the space bottom. | |
| 803 // If the size function is not given, the iterator calls the default | 1094 // If the size function is not given, the iterator calls the default |
| 804 // Object::Size(). | 1095 // Object::Size(). |
| 805 explicit HeapObjectIterator(PagedSpace* space); | 1096 explicit HeapObjectIterator(PagedSpace* space); |
| 806 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func); | 1097 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func); |
| 807 HeapObjectIterator(PagedSpace* space, Address start); | |
| 808 HeapObjectIterator(PagedSpace* space, | |
| 809 Address start, | |
| 810 HeapObjectCallback size_func); | |
| 811 HeapObjectIterator(Page* page, HeapObjectCallback size_func); | 1098 HeapObjectIterator(Page* page, HeapObjectCallback size_func); |
| 812 | 1099 |
| 813 inline HeapObject* next() { | 1100 // Advance to the next object, skipping free spaces and other fillers and |
| 814 return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage(); | 1101 // skipping the special garbage section of which there is one per space. |
| 1102 // Returns NULL when the iteration has ended. |
| 1103 inline HeapObject* Next() { |
| 1104 do { |
| 1105 HeapObject* next_obj = FromCurrentPage(); |
| 1106 if (next_obj != NULL) return next_obj; |
| 1107 } while (AdvanceToNextPage()); |
| 1108 return NULL; |
| 815 } | 1109 } |
| 816 | 1110 |
| 817 // implementation of ObjectIterator. | 1111 virtual HeapObject* next_object() { |
| 818 virtual HeapObject* next_object() { return next(); } | 1112 return Next(); |
| 1113 } |
| 819 | 1114 |
| 820 private: | 1115 private: |
| 821 Address cur_addr_; // current iteration point | 1116 enum PageMode { kOnePageOnly, kAllPagesInSpace }; |
| 822 Address end_addr_; // end iteration point | |
| 823 Address cur_limit_; // current page limit | |
| 824 HeapObjectCallback size_func_; // size function | |
| 825 Page* end_page_; // caches the page of the end address | |
| 826 | 1117 |
| 827 HeapObject* FromCurrentPage() { | 1118 Address cur_addr_; // Current iteration point. |
| 828 ASSERT(cur_addr_ < cur_limit_); | 1119 Address cur_end_; // End iteration point. |
| 1120 HeapObjectCallback size_func_; // Size function or NULL. |
| 1121 PagedSpace* space_; |
| 1122 PageMode page_mode_; |
| 829 | 1123 |
| 830 HeapObject* obj = HeapObject::FromAddress(cur_addr_); | 1124 // Fast (inlined) path of next(). |
| 831 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj); | 1125 inline HeapObject* FromCurrentPage(); |
| 832 ASSERT_OBJECT_SIZE(obj_size); | |
| 833 | 1126 |
| 834 cur_addr_ += obj_size; | 1127 // Slow path of next(), goes into the next page. Returns false if the |
| 835 ASSERT(cur_addr_ <= cur_limit_); | 1128 // iteration has ended. |
| 836 | 1129 bool AdvanceToNextPage(); |
| 837 return obj; | |
| 838 } | |
| 839 | |
| 840 // Slow path of next, goes into the next page. | |
| 841 HeapObject* FromNextPage(); | |
| 842 | 1130 |
| 843 // Initializes fields. | 1131 // Initializes fields. |
| 844 void Initialize(Address start, Address end, HeapObjectCallback size_func); | 1132 inline void Initialize(PagedSpace* owner, |
| 1133 Address start, |
| 1134 Address end, |
| 1135 PageMode mode, |
| 1136 HeapObjectCallback size_func); |
| 845 | 1137 |
| 846 #ifdef DEBUG | 1138 #ifdef DEBUG |
| 847 // Verifies whether fields have valid values. | 1139 // Verifies whether fields have valid values. |
| 848 void Verify(); | 1140 void Verify(); |
| 849 #endif | 1141 #endif |
| 850 }; | 1142 }; |
| 851 | 1143 |
| 852 | 1144 |
| 853 // ----------------------------------------------------------------------------- | 1145 // ----------------------------------------------------------------------------- |
| 854 // A PageIterator iterates the pages in a paged space. | 1146 // A PageIterator iterates the pages in a paged space. |
| 855 // | |
| 856 // The PageIterator class provides three modes for iterating pages in a space: | |
| 857 // PAGES_IN_USE iterates pages containing allocated objects. | |
| 858 // PAGES_USED_BY_MC iterates pages that hold relocated objects during a | |
| 859 // mark-compact collection. | |
| 860 // ALL_PAGES iterates all pages in the space. | |
| 861 // | |
| 862 // There are some caveats. | |
| 863 // | |
| 864 // (1) If the space expands during iteration, new pages will not be | |
| 865 // returned by the iterator in any mode. | |
| 866 // | |
| 867 // (2) If new objects are allocated during iteration, they will appear | |
| 868 // in pages returned by the iterator. Allocation may cause the | |
| 869 // allocation pointer or MC allocation pointer in the last page to | |
| 870 // change between constructing the iterator and iterating the last | |
| 871 // page. | |
| 872 // | |
| 873 // (3) The space should not shrink during iteration, otherwise the | |
| 874 // iterator will return deallocated pages. | |
| 875 | 1147 |
| 876 class PageIterator BASE_EMBEDDED { | 1148 class PageIterator BASE_EMBEDDED { |
| 877 public: | 1149 public: |
| 878 enum Mode { | 1150 explicit inline PageIterator(PagedSpace* space); |
| 879 PAGES_IN_USE, | |
| 880 PAGES_USED_BY_MC, | |
| 881 ALL_PAGES | |
| 882 }; | |
| 883 | |
| 884 PageIterator(PagedSpace* space, Mode mode); | |
| 885 | 1151 |
| 886 inline bool has_next(); | 1152 inline bool has_next(); |
| 887 inline Page* next(); | 1153 inline Page* next(); |
| 888 | 1154 |
| 889 private: | 1155 private: |
| 890 PagedSpace* space_; | 1156 PagedSpace* space_; |
| 891 Page* prev_page_; // Previous page returned. | 1157 Page* prev_page_; // Previous page returned. |
| 892 Page* stop_page_; // Page to stop at (last page returned by the iterator). | 1158 // Next page that will be returned. Cached here so that we can use this |
| 1159 // iterator for operations that deallocate pages. |
| 1160 Page* next_page_; |
| 893 }; | 1161 }; |
| 894 | 1162 |
| 895 | 1163 |
| 896 // ----------------------------------------------------------------------------- | 1164 // ----------------------------------------------------------------------------- |
| 897 // A space has a list of pages. The next page can be accessed via | 1165 // A space has a circular list of pages. The next page can be accessed via |
| 898 // Page::next_page() call. The next page of the last page is an | 1166 // Page::next_page() call. |
| 899 // invalid page pointer. A space can expand and shrink dynamically. | |
| 900 | 1167 |
| 901 // An abstraction of allocation and relocation pointers in a page-structured | 1168 // An abstraction of allocation and relocation pointers in a page-structured |
| 902 // space. | 1169 // space. |
| 903 class AllocationInfo { | 1170 class AllocationInfo { |
| 904 public: | 1171 public: |
| 905 Address top; // current allocation top | 1172 AllocationInfo() : top(NULL), limit(NULL) { |
| 906 Address limit; // current allocation limit | 1173 } |
| 1174 |
| 1175 Address top; // Current allocation top. |
| 1176 Address limit; // Current allocation limit. |
| 907 | 1177 |
| 908 #ifdef DEBUG | 1178 #ifdef DEBUG |
| 909 bool VerifyPagedAllocation() { | 1179 bool VerifyPagedAllocation() { |
| 910 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit)) | 1180 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit)) |
| 911 && (top <= limit); | 1181 && (top <= limit); |
| 912 } | 1182 } |
| 913 #endif | 1183 #endif |
| 914 }; | 1184 }; |
| 915 | 1185 |
| 916 | 1186 |
| (...skipping 11 matching lines...) Expand all Loading... |
| 928 // functions increase or decrease one of the non-capacity stats in | 1198 // functions increase or decrease one of the non-capacity stats in |
| 929 // conjunction with capacity, or else they always balance increases and | 1199 // conjunction with capacity, or else they always balance increases and |
| 930 // decreases to the non-capacity stats. | 1200 // decreases to the non-capacity stats. |
| 931 class AllocationStats BASE_EMBEDDED { | 1201 class AllocationStats BASE_EMBEDDED { |
| 932 public: | 1202 public: |
| 933 AllocationStats() { Clear(); } | 1203 AllocationStats() { Clear(); } |
| 934 | 1204 |
| 935 // Zero out all the allocation statistics (ie, no capacity). | 1205 // Zero out all the allocation statistics (ie, no capacity). |
| 936 void Clear() { | 1206 void Clear() { |
| 937 capacity_ = 0; | 1207 capacity_ = 0; |
| 938 available_ = 0; | |
| 939 size_ = 0; | 1208 size_ = 0; |
| 940 waste_ = 0; | 1209 waste_ = 0; |
| 941 } | 1210 } |
| 942 | 1211 |
| 1212 void ClearSizeWaste() { |
| 1213 size_ = capacity_; |
| 1214 waste_ = 0; |
| 1215 } |
| 1216 |
| 943 // Reset the allocation statistics (ie, available = capacity with no | 1217 // Reset the allocation statistics (ie, available = capacity with no |
| 944 // wasted or allocated bytes). | 1218 // wasted or allocated bytes). |
| 945 void Reset() { | 1219 void Reset() { |
| 946 available_ = capacity_; | |
| 947 size_ = 0; | 1220 size_ = 0; |
| 948 waste_ = 0; | 1221 waste_ = 0; |
| 949 } | 1222 } |
| 950 | 1223 |
| 951 // Accessors for the allocation statistics. | 1224 // Accessors for the allocation statistics. |
| 952 intptr_t Capacity() { return capacity_; } | 1225 intptr_t Capacity() { return capacity_; } |
| 953 intptr_t Available() { return available_; } | |
| 954 intptr_t Size() { return size_; } | 1226 intptr_t Size() { return size_; } |
| 955 intptr_t Waste() { return waste_; } | 1227 intptr_t Waste() { return waste_; } |
| 956 | 1228 |
| 957 // Grow the space by adding available bytes. | 1229 // Grow the space by adding available bytes. They are initially marked as |
| 1230 // being in use (part of the size), but will normally be immediately freed, |
| 1231 // putting them on the free list and removing them from size_. |
| 958 void ExpandSpace(int size_in_bytes) { | 1232 void ExpandSpace(int size_in_bytes) { |
| 959 capacity_ += size_in_bytes; | 1233 capacity_ += size_in_bytes; |
| 960 available_ += size_in_bytes; | 1234 size_ += size_in_bytes; |
| 1235 ASSERT(size_ >= 0); |
| 961 } | 1236 } |
| 962 | 1237 |
| 963 // Shrink the space by removing available bytes. | 1238 // Shrink the space by removing available bytes. Since shrinking is done |
| 1239 // during sweeping, bytes have been marked as being in use (part of the size) |
| 1240 // and are hereby freed. |
| 964 void ShrinkSpace(int size_in_bytes) { | 1241 void ShrinkSpace(int size_in_bytes) { |
| 965 capacity_ -= size_in_bytes; | 1242 capacity_ -= size_in_bytes; |
| 966 available_ -= size_in_bytes; | 1243 size_ -= size_in_bytes; |
| 1244 ASSERT(size_ >= 0); |
| 967 } | 1245 } |
| 968 | 1246 |
| 969 // Allocate from available bytes (available -> size). | 1247 // Allocate from available bytes (available -> size). |
| 970 void AllocateBytes(intptr_t size_in_bytes) { | 1248 void AllocateBytes(intptr_t size_in_bytes) { |
| 971 available_ -= size_in_bytes; | |
| 972 size_ += size_in_bytes; | 1249 size_ += size_in_bytes; |
| 1250 ASSERT(size_ >= 0); |
| 973 } | 1251 } |
| 974 | 1252 |
| 975 // Free allocated bytes, making them available (size -> available). | 1253 // Free allocated bytes, making them available (size -> available). |
| 976 void DeallocateBytes(intptr_t size_in_bytes) { | 1254 void DeallocateBytes(intptr_t size_in_bytes) { |
| 977 size_ -= size_in_bytes; | 1255 size_ -= size_in_bytes; |
| 978 available_ += size_in_bytes; | 1256 ASSERT(size_ >= 0); |
| 979 } | 1257 } |
| 980 | 1258 |
| 981 // Waste free bytes (available -> waste). | 1259 // Waste free bytes (available -> waste). |
| 982 void WasteBytes(int size_in_bytes) { | 1260 void WasteBytes(int size_in_bytes) { |
| 983 available_ -= size_in_bytes; | 1261 size_ -= size_in_bytes; |
| 984 waste_ += size_in_bytes; | 1262 waste_ += size_in_bytes; |
| 985 } | 1263 ASSERT(size_ >= 0); |
| 986 | |
| 987 // Consider the wasted bytes to be allocated, as they contain filler | |
| 988 // objects (waste -> size). | |
| 989 void FillWastedBytes(intptr_t size_in_bytes) { | |
| 990 waste_ -= size_in_bytes; | |
| 991 size_ += size_in_bytes; | |
| 992 } | 1264 } |
| 993 | 1265 |
| 994 private: | 1266 private: |
| 995 intptr_t capacity_; | 1267 intptr_t capacity_; |
| 996 intptr_t available_; | |
| 997 intptr_t size_; | 1268 intptr_t size_; |
| 998 intptr_t waste_; | 1269 intptr_t waste_; |
| 999 }; | 1270 }; |
| 1000 | 1271 |
| 1001 | 1272 |
| 1273 // ----------------------------------------------------------------------------- |
| 1274 // Free lists for old object spaces |
| 1275 // |
| 1276 // Free-list nodes are free blocks in the heap. They look like heap objects |
| 1277 // (free-list node pointers have the heap object tag, and they have a map like |
| 1278 // a heap object). They have a size and a next pointer. The next pointer is |
| 1279 // the raw address of the next free list node (or NULL). |
| 1280 class FreeListNode: public HeapObject { |
| 1281 public: |
| 1282 // Obtain a free-list node from a raw address. This is not a cast because |
| 1283 // it does not check nor require that the first word at the address is a map |
| 1284 // pointer. |
| 1285 static FreeListNode* FromAddress(Address address) { |
| 1286 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address)); |
| 1287 } |
| 1288 |
| 1289 static inline bool IsFreeListNode(HeapObject* object); |
| 1290 |
| 1291 // Set the size in bytes, which can be read with HeapObject::Size(). This |
| 1292 // function also writes a map to the first word of the block so that it |
| 1293 // looks like a heap object to the garbage collector and heap iteration |
| 1294 // functions. |
| 1295 void set_size(Heap* heap, int size_in_bytes); |
| 1296 |
| 1297 // Accessors for the next field. |
| 1298 inline FreeListNode* next(); |
| 1299 inline FreeListNode** next_address(); |
| 1300 inline void set_next(FreeListNode* next); |
| 1301 |
| 1302 inline void Zap(); |
| 1303 |
| 1304 private: |
| 1305 static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize); |
| 1306 |
| 1307 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode); |
| 1308 }; |
| 1309 |
| 1310 |
| 1311 // The free list for the old space. The free list is organized in such a way |
| 1312 // as to encourage objects allocated around the same time to be near each |
| 1313 // other. The normal way to allocate is intended to be by bumping a 'top' |
| 1314 // pointer until it hits a 'limit' pointer. When the limit is hit we need to |
| 1315 // find a new space to allocate from. This is done with the free list, which |
| 1316 // is divided up into rough categories to cut down on waste. Having finer |
| 1317 // categories would scatter allocation more. |
| 1318 |
| 1319 // The old space free list is organized in categories. |
| 1320 // 1-31 words: Such small free areas are discarded for efficiency reasons. |
| 1321 // They can be reclaimed by the compactor. However the distance between top |
| 1322 // and limit may be this small. |
| 1323 // 32-255 words: There is a list of spaces this large. It is used for top and |
| 1324 // limit when the object we need to allocate is 1-31 words in size. These |
| 1325 // spaces are called small. |
| 1326 // 256-2047 words: There is a list of spaces this large. It is used for top and |
| 1327 // limit when the object we need to allocate is 32-255 words in size. These |
| 1328 // spaces are called medium. |
| 1329 // 1048-16383 words: There is a list of spaces this large. It is used for top |
| 1330 // and limit when the object we need to allocate is 256-2047 words in size. |
| 1331 // These spaces are call large. |
| 1332 // At least 16384 words. This list is for objects of 2048 words or larger. |
| 1333 // Empty pages are added to this list. These spaces are called huge. |
| 1334 class FreeList BASE_EMBEDDED { |
| 1335 public: |
| 1336 explicit FreeList(PagedSpace* owner); |
| 1337 |
| 1338 // Clear the free list. |
| 1339 void Reset(); |
| 1340 |
| 1341 // Return the number of bytes available on the free list. |
| 1342 intptr_t available() { return available_; } |
| 1343 |
| 1344 // Place a node on the free list. The block of size 'size_in_bytes' |
| 1345 // starting at 'start' is placed on the free list. The return value is the |
| 1346 // number of bytes that have been lost due to internal fragmentation by |
| 1347 // freeing the block. Bookkeeping information will be written to the block, |
| 1348 // ie, its contents will be destroyed. The start address should be word |
| 1349 // aligned, and the size should be a non-zero multiple of the word size. |
| 1350 int Free(Address start, int size_in_bytes); |
| 1351 |
| 1352 // Allocate a block of size 'size_in_bytes' from the free list. The block |
| 1353 // is unitialized. A failure is returned if no block is available. The |
| 1354 // number of bytes lost to fragmentation is returned in the output parameter |
| 1355 // 'wasted_bytes'. The size should be a non-zero multiple of the word size. |
| 1356 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); |
| 1357 |
| 1358 void MarkNodes(); |
| 1359 |
| 1360 #ifdef DEBUG |
| 1361 void Zap(); |
| 1362 static intptr_t SumFreeList(FreeListNode* node); |
| 1363 static int FreeListLength(FreeListNode* cur); |
| 1364 intptr_t SumFreeLists(); |
| 1365 bool IsVeryLong(); |
| 1366 #endif |
| 1367 |
| 1368 void CountFreeListItems(Page* p, intptr_t* sizes); |
| 1369 |
| 1370 private: |
| 1371 // The size range of blocks, in bytes. |
| 1372 static const int kMinBlockSize = 3 * kPointerSize; |
| 1373 static const int kMaxBlockSize = Page::kMaxHeapObjectSize; |
| 1374 |
| 1375 FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size); |
| 1376 |
| 1377 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size); |
| 1378 |
| 1379 PagedSpace* owner_; |
| 1380 Heap* heap_; |
| 1381 |
| 1382 // Total available bytes in all blocks on this free list. |
| 1383 int available_; |
| 1384 |
| 1385 static const int kSmallListMin = 0x20 * kPointerSize; |
| 1386 static const int kSmallListMax = 0xff * kPointerSize; |
| 1387 static const int kMediumListMax = 0x7ff * kPointerSize; |
| 1388 static const int kLargeListMax = 0x3fff * kPointerSize; |
| 1389 static const int kSmallAllocationMax = kSmallListMin - kPointerSize; |
| 1390 static const int kMediumAllocationMax = kSmallListMax; |
| 1391 static const int kLargeAllocationMax = kMediumListMax; |
| 1392 FreeListNode* small_list_; |
| 1393 FreeListNode* medium_list_; |
| 1394 FreeListNode* large_list_; |
| 1395 FreeListNode* huge_list_; |
| 1396 |
| 1397 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList); |
| 1398 }; |
| 1399 |
| 1400 |
| 1002 class PagedSpace : public Space { | 1401 class PagedSpace : public Space { |
| 1003 public: | 1402 public: |
| 1004 // Creates a space with a maximum capacity, and an id. | 1403 // Creates a space with a maximum capacity, and an id. |
| 1005 PagedSpace(Heap* heap, | 1404 PagedSpace(Heap* heap, |
| 1006 intptr_t max_capacity, | 1405 intptr_t max_capacity, |
| 1007 AllocationSpace id, | 1406 AllocationSpace id, |
| 1008 Executability executable); | 1407 Executability executable); |
| 1009 | 1408 |
| 1010 virtual ~PagedSpace() {} | 1409 virtual ~PagedSpace() {} |
| 1011 | 1410 |
| 1012 // Set up the space using the given address range of virtual memory (from | 1411 // Set up the space using the given address range of virtual memory (from |
| 1013 // the memory allocator's initial chunk) if possible. If the block of | 1412 // the memory allocator's initial chunk) if possible. If the block of |
| 1014 // addresses is not big enough to contain a single page-aligned page, a | 1413 // addresses is not big enough to contain a single page-aligned page, a |
| 1015 // fresh chunk will be allocated. | 1414 // fresh chunk will be allocated. |
| 1016 bool Setup(Address start, size_t size); | 1415 bool Setup(); |
| 1017 | 1416 |
| 1018 // Returns true if the space has been successfully set up and not | 1417 // Returns true if the space has been successfully set up and not |
| 1019 // subsequently torn down. | 1418 // subsequently torn down. |
| 1020 bool HasBeenSetup(); | 1419 bool HasBeenSetup(); |
| 1021 | 1420 |
| 1022 // Cleans up the space, frees all pages in this space except those belonging | 1421 // Cleans up the space, frees all pages in this space except those belonging |
| 1023 // to the initial chunk, uncommits addresses in the initial chunk. | 1422 // to the initial chunk, uncommits addresses in the initial chunk. |
| 1024 void TearDown(); | 1423 void TearDown(); |
| 1025 | 1424 |
| 1026 // Checks whether an object/address is in this space. | 1425 // Checks whether an object/address is in this space. |
| 1027 inline bool Contains(Address a); | 1426 inline bool Contains(Address a); |
| 1028 bool Contains(HeapObject* o) { return Contains(o->address()); } | 1427 bool Contains(HeapObject* o) { return Contains(o->address()); } |
| 1029 // Never crashes even if a is not a valid pointer. | |
| 1030 inline bool SafeContains(Address a); | |
| 1031 | 1428 |
| 1032 // Given an address occupied by a live object, return that object if it is | 1429 // Given an address occupied by a live object, return that object if it is |
| 1033 // in this space, or Failure::Exception() if it is not. The implementation | 1430 // in this space, or Failure::Exception() if it is not. The implementation |
| 1034 // iterates over objects in the page containing the address, the cost is | 1431 // iterates over objects in the page containing the address, the cost is |
| 1035 // linear in the number of objects in the page. It may be slow. | 1432 // linear in the number of objects in the page. It may be slow. |
| 1036 MUST_USE_RESULT MaybeObject* FindObject(Address addr); | 1433 MUST_USE_RESULT MaybeObject* FindObject(Address addr); |
| 1037 | 1434 |
| 1038 // Checks whether page is currently in use by this space. | 1435 // Prepares for a mark-compact GC. |
| 1039 bool IsUsed(Page* page); | 1436 virtual void PrepareForMarkCompact(); |
| 1040 | 1437 |
| 1041 void MarkAllPagesClean(); | 1438 // Current capacity without growing (Size() + Available()). |
| 1042 | |
| 1043 // Prepares for a mark-compact GC. | |
| 1044 virtual void PrepareForMarkCompact(bool will_compact); | |
| 1045 | |
| 1046 // The top of allocation in a page in this space. Undefined if page is unused. | |
| 1047 Address PageAllocationTop(Page* page) { | |
| 1048 return page == TopPageOf(allocation_info_) ? top() | |
| 1049 : PageAllocationLimit(page); | |
| 1050 } | |
| 1051 | |
| 1052 // The limit of allocation for a page in this space. | |
| 1053 virtual Address PageAllocationLimit(Page* page) = 0; | |
| 1054 | |
| 1055 void FlushTopPageWatermark() { | |
| 1056 AllocationTopPage()->SetCachedAllocationWatermark(top()); | |
| 1057 AllocationTopPage()->InvalidateWatermark(true); | |
| 1058 } | |
| 1059 | |
| 1060 // Current capacity without growing (Size() + Available() + Waste()). | |
| 1061 intptr_t Capacity() { return accounting_stats_.Capacity(); } | 1439 intptr_t Capacity() { return accounting_stats_.Capacity(); } |
| 1062 | 1440 |
| 1063 // Total amount of memory committed for this space. For paged | 1441 // Total amount of memory committed for this space. For paged |
| 1064 // spaces this equals the capacity. | 1442 // spaces this equals the capacity. |
| 1065 intptr_t CommittedMemory() { return Capacity(); } | 1443 intptr_t CommittedMemory() { return Capacity(); } |
| 1066 | 1444 |
| 1067 // Available bytes without growing. | 1445 // Sets the capacity, the available space and the wasted space to zero. |
| 1068 intptr_t Available() { return accounting_stats_.Available(); } | 1446 // The stats are rebuilt during sweeping by adding each page to the |
| 1447 // capacity and the size when it is encountered. As free spaces are |
| 1448 // discovered during the sweeping they are subtracted from the size and added |
| 1449 // to the available and wasted totals. |
| 1450 void ClearStats() { |
| 1451 accounting_stats_.ClearSizeWaste(); |
| 1452 } |
| 1069 | 1453 |
| 1070 // Allocated bytes in this space. | 1454 // Available bytes without growing. These are the bytes on the free list. |
| 1455 // The bytes in the linear allocation area are not included in this total |
| 1456 // because updating the stats would slow down allocation. New pages are |
| 1457 // immediately added to the free list so they show up here. |
| 1458 intptr_t Available() { return free_list_.available(); } |
| 1459 |
| 1460 // Allocated bytes in this space. Garbage bytes that were not found due to |
| 1461 // lazy sweeping are counted as being allocated! The bytes in the current |
| 1462 // linear allocation area (between top and limit) are also counted here. |
| 1071 virtual intptr_t Size() { return accounting_stats_.Size(); } | 1463 virtual intptr_t Size() { return accounting_stats_.Size(); } |
| 1072 | 1464 |
| 1073 // Wasted bytes due to fragmentation and not recoverable until the | 1465 // As size, but the bytes in the current linear allocation area are not |
| 1074 // next GC of this space. | 1466 // included. |
| 1075 intptr_t Waste() { return accounting_stats_.Waste(); } | 1467 virtual intptr_t SizeOfObjects() { return Size() - (limit() - top()); } |
| 1076 | 1468 |
| 1077 // Returns the address of the first object in this space. | 1469 // Wasted bytes in this space. These are just the bytes that were thrown away |
| 1078 Address bottom() { return first_page_->ObjectAreaStart(); } | 1470 // due to being too small to use for allocation. They do not include the |
| 1471 // free bytes that were not found at all due to lazy sweeping. |
| 1472 virtual intptr_t Waste() { return accounting_stats_.Waste(); } |
| 1079 | 1473 |
| 1080 // Returns the allocation pointer in this space. | 1474 // Returns the allocation pointer in this space. |
| 1081 Address top() { return allocation_info_.top; } | 1475 Address top() { |
| 1476 return allocation_info_.top; |
| 1477 } |
| 1478 Address limit() { return allocation_info_.limit; } |
| 1082 | 1479 |
| 1083 // Allocate the requested number of bytes in the space if possible, return a | 1480 // Allocate the requested number of bytes in the space if possible, return a |
| 1084 // failure object if not. | 1481 // failure object if not. |
| 1085 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); | 1482 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); |
| 1086 | 1483 |
| 1087 // Allocate the requested number of bytes for relocation during mark-compact | |
| 1088 // collection. | |
| 1089 MUST_USE_RESULT inline MaybeObject* MCAllocateRaw(int size_in_bytes); | |
| 1090 | |
| 1091 virtual bool ReserveSpace(int bytes); | 1484 virtual bool ReserveSpace(int bytes); |
| 1092 | 1485 |
| 1093 // Used by ReserveSpace. | 1486 // Give a block of memory to the space's free list. It might be added to |
| 1094 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0; | 1487 // the free list or accounted as waste. |
| 1095 | 1488 // If add_to_freelist is false then just accounting stats are updated and |
| 1096 // Free all pages in range from prev (exclusive) to last (inclusive). | 1489 // no attempt to add area to free list is made. |
| 1097 // Freed pages are moved to the end of page list. | 1490 int Free(Address start, int size_in_bytes) { |
| 1098 void FreePages(Page* prev, Page* last); | 1491 int wasted = free_list_.Free(start, size_in_bytes); |
| 1099 | 1492 accounting_stats_.DeallocateBytes(size_in_bytes - wasted); |
| 1100 // Deallocates a block. | 1493 return size_in_bytes - wasted; |
| 1101 virtual void DeallocateBlock(Address start, | 1494 } |
| 1102 int size_in_bytes, | |
| 1103 bool add_to_freelist) = 0; | |
| 1104 | 1495 |
| 1105 // Set space allocation info. | 1496 // Set space allocation info. |
| 1106 void SetTop(Address top) { | 1497 void SetTop(Address top, Address limit) { |
| 1498 ASSERT(top == limit || |
| 1499 Page::FromAddress(top) == Page::FromAddress(limit - 1)); |
| 1107 allocation_info_.top = top; | 1500 allocation_info_.top = top; |
| 1108 allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top)); | 1501 allocation_info_.limit = limit; |
| 1109 } | 1502 } |
| 1110 | 1503 |
| 1111 // --------------------------------------------------------------------------- | 1504 void Allocate(int bytes) { |
| 1112 // Mark-compact collection support functions | 1505 accounting_stats_.AllocateBytes(bytes); |
| 1113 | |
| 1114 // Set the relocation point to the beginning of the space. | |
| 1115 void MCResetRelocationInfo(); | |
| 1116 | |
| 1117 // Writes relocation info to the top page. | |
| 1118 void MCWriteRelocationInfoToPage() { | |
| 1119 TopPageOf(mc_forwarding_info_)-> | |
| 1120 SetAllocationWatermark(mc_forwarding_info_.top); | |
| 1121 } | 1506 } |
| 1122 | 1507 |
| 1123 // Computes the offset of a given address in this space to the beginning | 1508 void IncreaseCapacity(int size) { |
| 1124 // of the space. | 1509 accounting_stats_.ExpandSpace(size); |
| 1125 int MCSpaceOffsetForAddress(Address addr); | 1510 } |
| 1126 | 1511 |
| 1127 // Updates the allocation pointer to the relocation top after a mark-compact | 1512 // Releases an unused page and shrinks the space. |
| 1128 // collection. | 1513 void ReleasePage(Page* page); |
| 1129 virtual void MCCommitRelocationInfo() = 0; | |
| 1130 | 1514 |
| 1131 // Releases half of unused pages. | 1515 // Releases all of the unused pages. |
| 1132 void Shrink(); | 1516 void ReleaseAllUnusedPages(); |
| 1133 | 1517 |
| 1134 // Ensures that the capacity is at least 'capacity'. Returns false on failure. | 1518 // The dummy page that anchors the linked list of pages. |
| 1135 bool EnsureCapacity(int capacity); | 1519 Page* anchor() { return &anchor_; } |
| 1136 | 1520 |
| 1137 #ifdef DEBUG | 1521 #ifdef DEBUG |
| 1138 // Print meta info and objects in this space. | 1522 // Print meta info and objects in this space. |
| 1139 virtual void Print(); | 1523 virtual void Print(); |
| 1140 | 1524 |
| 1141 // Verify integrity of this space. | 1525 // Verify integrity of this space. |
| 1142 virtual void Verify(ObjectVisitor* visitor); | 1526 virtual void Verify(ObjectVisitor* visitor); |
| 1143 | 1527 |
| 1528 // Reports statistics for the space |
| 1529 void ReportStatistics(); |
| 1530 |
| 1144 // Overridden by subclasses to verify space-specific object | 1531 // Overridden by subclasses to verify space-specific object |
| 1145 // properties (e.g., only maps or free-list nodes are in map space). | 1532 // properties (e.g., only maps or free-list nodes are in map space). |
| 1146 virtual void VerifyObject(HeapObject* obj) {} | 1533 virtual void VerifyObject(HeapObject* obj) {} |
| 1147 | 1534 |
| 1148 // Report code object related statistics | 1535 // Report code object related statistics |
| 1149 void CollectCodeStatistics(); | 1536 void CollectCodeStatistics(); |
| 1150 static void ReportCodeStatistics(); | 1537 static void ReportCodeStatistics(); |
| 1151 static void ResetCodeStatistics(); | 1538 static void ResetCodeStatistics(); |
| 1152 #endif | 1539 #endif |
| 1153 | 1540 |
| 1154 // Returns the page of the allocation pointer. | 1541 bool was_swept_conservatively() { return was_swept_conservatively_; } |
| 1155 Page* AllocationTopPage() { return TopPageOf(allocation_info_); } | 1542 void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; } |
| 1156 | 1543 |
| 1157 void RelinkPageListInChunkOrder(bool deallocate_blocks); | 1544 // Evacuation candidates are swept by evacuator. Needs to return a valid |
| 1545 // result before _and_ after evacuation has finished. |
| 1546 static bool ShouldBeSweptLazily(Page* p) { |
| 1547 return !p->IsEvacuationCandidate() && |
| 1548 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && |
| 1549 !p->WasSweptPrecisely(); |
| 1550 } |
| 1551 |
| 1552 void SetPagesToSweep(Page* first, Page* last) { |
| 1553 first_unswept_page_ = first; |
| 1554 last_unswept_page_ = last; |
| 1555 } |
| 1556 |
| 1557 bool AdvanceSweeper(intptr_t bytes_to_sweep); |
| 1558 |
| 1559 bool IsSweepingComplete() { |
| 1560 return !first_unswept_page_->is_valid(); |
| 1561 } |
| 1562 |
| 1563 Page* FirstPage() { return anchor_.next_page(); } |
| 1564 Page* LastPage() { return anchor_.prev_page(); } |
| 1565 |
| 1566 bool IsFragmented(Page* p) { |
| 1567 intptr_t sizes[4]; |
| 1568 free_list_.CountFreeListItems(p, sizes); |
| 1569 |
| 1570 intptr_t ratio; |
| 1571 intptr_t ratio_threshold; |
| 1572 if (identity() == CODE_SPACE) { |
| 1573 ratio = (sizes[1] * 10 + sizes[2] * 2) * 100 / Page::kObjectAreaSize; |
| 1574 ratio_threshold = 10; |
| 1575 } else { |
| 1576 ratio = (sizes[0] * 5 + sizes[1]) * 100 / Page::kObjectAreaSize; |
| 1577 ratio_threshold = 15; |
| 1578 } |
| 1579 |
| 1580 if (FLAG_trace_fragmentation) { |
| 1581 PrintF("%p [%d]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n", |
| 1582 reinterpret_cast<void*>(p), |
| 1583 identity(), |
| 1584 static_cast<int>(sizes[0]), |
| 1585 static_cast<double>(sizes[0] * 100) / Page::kObjectAreaSize, |
| 1586 static_cast<int>(sizes[1]), |
| 1587 static_cast<double>(sizes[1] * 100) / Page::kObjectAreaSize, |
| 1588 static_cast<int>(sizes[2]), |
| 1589 static_cast<double>(sizes[2] * 100) / Page::kObjectAreaSize, |
| 1590 static_cast<int>(sizes[3]), |
| 1591 static_cast<double>(sizes[3] * 100) / Page::kObjectAreaSize, |
| 1592 (ratio > ratio_threshold) ? "[fragmented]" : ""); |
| 1593 } |
| 1594 |
| 1595 return (ratio > ratio_threshold) || |
| 1596 (FLAG_always_compact && sizes[3] != Page::kObjectAreaSize); |
| 1597 } |
| 1598 |
| 1599 void EvictEvacuationCandidatesFromFreeLists(); |
| 1600 |
| 1601 bool CanExpand(); |
| 1158 | 1602 |
| 1159 protected: | 1603 protected: |
| 1160 // Maximum capacity of this space. | 1604 // Maximum capacity of this space. |
| 1161 intptr_t max_capacity_; | 1605 intptr_t max_capacity_; |
| 1162 | 1606 |
| 1163 // Accounting information for this space. | 1607 // Accounting information for this space. |
| 1164 AllocationStats accounting_stats_; | 1608 AllocationStats accounting_stats_; |
| 1165 | 1609 |
| 1166 // The first page in this space. | 1610 // The dummy page that anchors the double linked list of pages. |
| 1167 Page* first_page_; | 1611 Page anchor_; |
| 1168 | 1612 |
| 1169 // The last page in this space. Initially set in Setup, updated in | 1613 // The space's free list. |
| 1170 // Expand and Shrink. | 1614 FreeList free_list_; |
| 1171 Page* last_page_; | |
| 1172 | |
| 1173 // True if pages owned by this space are linked in chunk-order. | |
| 1174 // See comment for class MemoryAllocator for definition of chunk-order. | |
| 1175 bool page_list_is_chunk_ordered_; | |
| 1176 | 1615 |
| 1177 // Normal allocation information. | 1616 // Normal allocation information. |
| 1178 AllocationInfo allocation_info_; | 1617 AllocationInfo allocation_info_; |
| 1179 | 1618 |
| 1180 // Relocation information during mark-compact collections. | |
| 1181 AllocationInfo mc_forwarding_info_; | |
| 1182 | |
| 1183 // Bytes of each page that cannot be allocated. Possibly non-zero | 1619 // Bytes of each page that cannot be allocated. Possibly non-zero |
| 1184 // for pages in spaces with only fixed-size objects. Always zero | 1620 // for pages in spaces with only fixed-size objects. Always zero |
| 1185 // for pages in spaces with variable sized objects (those pages are | 1621 // for pages in spaces with variable sized objects (those pages are |
| 1186 // padded with free-list nodes). | 1622 // padded with free-list nodes). |
| 1187 int page_extra_; | 1623 int page_extra_; |
| 1188 | 1624 |
| 1189 // Sets allocation pointer to a page bottom. | 1625 bool was_swept_conservatively_; |
| 1190 static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p); | |
| 1191 | 1626 |
| 1192 // Returns the top page specified by an allocation info structure. | 1627 Page* first_unswept_page_; |
| 1193 static Page* TopPageOf(AllocationInfo alloc_info) { | 1628 Page* last_unswept_page_; |
| 1194 return Page::FromAllocationTop(alloc_info.limit); | |
| 1195 } | |
| 1196 | |
| 1197 int CountPagesToTop() { | |
| 1198 Page* p = Page::FromAllocationTop(allocation_info_.top); | |
| 1199 PageIterator it(this, PageIterator::ALL_PAGES); | |
| 1200 int counter = 1; | |
| 1201 while (it.has_next()) { | |
| 1202 if (it.next() == p) return counter; | |
| 1203 counter++; | |
| 1204 } | |
| 1205 UNREACHABLE(); | |
| 1206 return -1; | |
| 1207 } | |
| 1208 | 1629 |
| 1209 // Expands the space by allocating a fixed number of pages. Returns false if | 1630 // Expands the space by allocating a fixed number of pages. Returns false if |
| 1210 // it cannot allocate requested number of pages from OS. Newly allocated | 1631 // it cannot allocate requested number of pages from OS. |
| 1211 // pages are append to the last_page; | 1632 bool Expand(); |
| 1212 bool Expand(Page* last_page); | |
| 1213 | 1633 |
| 1214 // Generic fast case allocation function that tries linear allocation in | 1634 // Generic fast case allocation function that tries linear allocation at the |
| 1215 // the top page of 'alloc_info'. Returns NULL on failure. | 1635 // address denoted by top in allocation_info_. |
| 1216 inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info, | 1636 inline HeapObject* AllocateLinearly(int size_in_bytes); |
| 1217 int size_in_bytes); | |
| 1218 | |
| 1219 // During normal allocation or deserialization, roll to the next page in | |
| 1220 // the space (there is assumed to be one) and allocate there. This | |
| 1221 // function is space-dependent. | |
| 1222 virtual HeapObject* AllocateInNextPage(Page* current_page, | |
| 1223 int size_in_bytes) = 0; | |
| 1224 | 1637 |
| 1225 // Slow path of AllocateRaw. This function is space-dependent. | 1638 // Slow path of AllocateRaw. This function is space-dependent. |
| 1226 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0; | 1639 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes); |
| 1227 | |
| 1228 // Slow path of MCAllocateRaw. | |
| 1229 MUST_USE_RESULT HeapObject* SlowMCAllocateRaw(int size_in_bytes); | |
| 1230 | 1640 |
| 1231 #ifdef DEBUG | 1641 #ifdef DEBUG |
| 1232 // Returns the number of total pages in this space. | 1642 // Returns the number of total pages in this space. |
| 1233 int CountTotalPages(); | 1643 int CountTotalPages(); |
| 1234 #endif | 1644 #endif |
| 1235 | 1645 |
| 1236 private: | |
| 1237 // Returns a pointer to the page of the relocation pointer. | |
| 1238 Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); } | |
| 1239 | |
| 1240 friend class PageIterator; | 1646 friend class PageIterator; |
| 1241 }; | 1647 }; |
| 1242 | 1648 |
| 1243 | 1649 |
| 1244 class NumberAndSizeInfo BASE_EMBEDDED { | 1650 class NumberAndSizeInfo BASE_EMBEDDED { |
| 1245 public: | 1651 public: |
| 1246 NumberAndSizeInfo() : number_(0), bytes_(0) {} | 1652 NumberAndSizeInfo() : number_(0), bytes_(0) {} |
| 1247 | 1653 |
| 1248 int number() const { return number_; } | 1654 int number() const { return number_; } |
| 1249 void increment_number(int num) { number_ += num; } | 1655 void increment_number(int num) { number_ += num; } |
| (...skipping 19 matching lines...) Expand all Loading... |
| 1269 HistogramInfo() : NumberAndSizeInfo() {} | 1675 HistogramInfo() : NumberAndSizeInfo() {} |
| 1270 | 1676 |
| 1271 const char* name() { return name_; } | 1677 const char* name() { return name_; } |
| 1272 void set_name(const char* name) { name_ = name; } | 1678 void set_name(const char* name) { name_ = name; } |
| 1273 | 1679 |
| 1274 private: | 1680 private: |
| 1275 const char* name_; | 1681 const char* name_; |
| 1276 }; | 1682 }; |
| 1277 | 1683 |
| 1278 | 1684 |
| 1685 enum SemiSpaceId { |
| 1686 kFromSpace = 0, |
| 1687 kToSpace = 1 |
| 1688 }; |
| 1689 |
| 1690 |
| 1691 class SemiSpace; |
| 1692 |
| 1693 |
| 1694 class NewSpacePage : public MemoryChunk { |
| 1695 public: |
| 1696 // GC related flags copied from from-space to to-space when |
| 1697 // flipping semispaces. |
| 1698 static const intptr_t kCopyOnFlipFlagsMask = |
| 1699 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | |
| 1700 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) | |
| 1701 (1 << MemoryChunk::SCAN_ON_SCAVENGE); |
| 1702 |
| 1703 inline NewSpacePage* next_page() const { |
| 1704 return static_cast<NewSpacePage*>(next_chunk()); |
| 1705 } |
| 1706 |
| 1707 inline void set_next_page(NewSpacePage* page) { |
| 1708 set_next_chunk(page); |
| 1709 } |
| 1710 |
| 1711 inline NewSpacePage* prev_page() const { |
| 1712 return static_cast<NewSpacePage*>(prev_chunk()); |
| 1713 } |
| 1714 |
| 1715 inline void set_prev_page(NewSpacePage* page) { |
| 1716 set_prev_chunk(page); |
| 1717 } |
| 1718 |
| 1719 SemiSpace* semi_space() { |
| 1720 return reinterpret_cast<SemiSpace*>(owner()); |
| 1721 } |
| 1722 |
| 1723 bool is_anchor() { return !this->InNewSpace(); } |
| 1724 |
| 1725 static bool IsAtStart(Address addr) { |
| 1726 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) |
| 1727 == kObjectStartOffset; |
| 1728 } |
| 1729 |
| 1730 static bool IsAtEnd(Address addr) { |
| 1731 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0; |
| 1732 } |
| 1733 |
| 1734 Address address() { |
| 1735 return reinterpret_cast<Address>(this); |
| 1736 } |
| 1737 |
| 1738 // Finds the NewSpacePage containg the given address. |
| 1739 static inline NewSpacePage* FromAddress(Address address_in_page) { |
| 1740 Address page_start = |
| 1741 reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) & |
| 1742 ~Page::kPageAlignmentMask); |
| 1743 NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start); |
| 1744 ASSERT(page->InNewSpace()); |
| 1745 return page; |
| 1746 } |
| 1747 |
| 1748 // Find the page for a limit address. A limit address is either an address |
| 1749 // inside a page, or the address right after the last byte of a page. |
| 1750 static inline NewSpacePage* FromLimit(Address address_limit) { |
| 1751 return NewSpacePage::FromAddress(address_limit - 1); |
| 1752 } |
| 1753 |
| 1754 private: |
| 1755 // Create a NewSpacePage object that is only used as anchor |
| 1756 // for the doubly-linked list of real pages. |
| 1757 explicit NewSpacePage(SemiSpace* owner) { |
| 1758 InitializeAsAnchor(owner); |
| 1759 } |
| 1760 |
| 1761 static NewSpacePage* Initialize(Heap* heap, |
| 1762 Address start, |
| 1763 SemiSpace* semi_space); |
| 1764 |
| 1765 // Intialize a fake NewSpacePage used as sentinel at the ends |
| 1766 // of a doubly-linked list of real NewSpacePages. |
| 1767 // Only uses the prev/next links, and sets flags to not be in new-space. |
| 1768 void InitializeAsAnchor(SemiSpace* owner); |
| 1769 |
| 1770 friend class SemiSpace; |
| 1771 friend class SemiSpaceIterator; |
| 1772 }; |
| 1773 |
| 1774 |
| 1279 // ----------------------------------------------------------------------------- | 1775 // ----------------------------------------------------------------------------- |
| 1280 // SemiSpace in young generation | 1776 // SemiSpace in young generation |
| 1281 // | 1777 // |
| 1282 // A semispace is a contiguous chunk of memory. The mark-compact collector | 1778 // A semispace is a contiguous chunk of memory holding page-like memory |
| 1283 // uses the memory in the from space as a marking stack when tracing live | 1779 // chunks. The mark-compact collector uses the memory of the first page in |
| 1284 // objects. | 1780 // the from space as a marking stack when tracing live objects. |
| 1285 | 1781 |
| 1286 class SemiSpace : public Space { | 1782 class SemiSpace : public Space { |
| 1287 public: | 1783 public: |
| 1288 // Constructor. | 1784 // Constructor. |
| 1289 explicit SemiSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE) { | 1785 SemiSpace(Heap* heap, SemiSpaceId semispace) |
| 1290 start_ = NULL; | 1786 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), |
| 1291 age_mark_ = NULL; | 1787 start_(NULL), |
| 1292 } | 1788 age_mark_(NULL), |
| 1789 id_(semispace), |
| 1790 anchor_(this), |
| 1791 current_page_(NULL) { } |
| 1293 | 1792 |
| 1294 // Sets up the semispace using the given chunk. | 1793 // Sets up the semispace using the given chunk. |
| 1295 bool Setup(Address start, int initial_capacity, int maximum_capacity); | 1794 bool Setup(Address start, int initial_capacity, int maximum_capacity); |
| 1296 | 1795 |
| 1297 // Tear down the space. Heap memory was not allocated by the space, so it | 1796 // Tear down the space. Heap memory was not allocated by the space, so it |
| 1298 // is not deallocated here. | 1797 // is not deallocated here. |
| 1299 void TearDown(); | 1798 void TearDown(); |
| 1300 | 1799 |
| 1301 // True if the space has been set up but not torn down. | 1800 // True if the space has been set up but not torn down. |
| 1302 bool HasBeenSetup() { return start_ != NULL; } | 1801 bool HasBeenSetup() { return start_ != NULL; } |
| 1303 | 1802 |
| 1304 // Grow the size of the semispace by committing extra virtual memory. | |
| 1305 // Assumes that the caller has checked that the semispace has not reached | |
| 1306 // its maximum capacity (and thus there is space available in the reserved | |
| 1307 // address range to grow). | |
| 1308 bool Grow(); | |
| 1309 | |
| 1310 // Grow the semispace to the new capacity. The new capacity | 1803 // Grow the semispace to the new capacity. The new capacity |
| 1311 // requested must be larger than the current capacity. | 1804 // requested must be larger than the current capacity and less than |
| 1805 // the maximum capacity. |
| 1312 bool GrowTo(int new_capacity); | 1806 bool GrowTo(int new_capacity); |
| 1313 | 1807 |
| 1314 // Shrinks the semispace to the new capacity. The new capacity | 1808 // Shrinks the semispace to the new capacity. The new capacity |
| 1315 // requested must be more than the amount of used memory in the | 1809 // requested must be more than the amount of used memory in the |
| 1316 // semispace and less than the current capacity. | 1810 // semispace and less than the current capacity. |
| 1317 bool ShrinkTo(int new_capacity); | 1811 bool ShrinkTo(int new_capacity); |
| 1318 | 1812 |
| 1319 // Returns the start address of the space. | 1813 // Returns the start address of the first page of the space. |
| 1320 Address low() { return start_; } | 1814 Address space_start() { |
| 1815 ASSERT(anchor_.next_page() != &anchor_); |
| 1816 return anchor_.next_page()->body(); |
| 1817 } |
| 1818 |
| 1819 // Returns the start address of the current page of the space. |
| 1820 Address page_low() { |
| 1821 ASSERT(anchor_.next_page() != &anchor_); |
| 1822 return current_page_->body(); |
| 1823 } |
| 1824 |
| 1321 // Returns one past the end address of the space. | 1825 // Returns one past the end address of the space. |
| 1322 Address high() { return low() + capacity_; } | 1826 Address space_end() { |
| 1827 return anchor_.prev_page()->body_limit(); |
| 1828 } |
| 1829 |
| 1830 // Returns one past the end address of the current page of the space. |
| 1831 Address page_high() { |
| 1832 return current_page_->body_limit(); |
| 1833 } |
| 1834 |
| 1835 bool AdvancePage() { |
| 1836 NewSpacePage* next_page = current_page_->next_page(); |
| 1837 if (next_page == anchor()) return false; |
| 1838 current_page_ = next_page; |
| 1839 return true; |
| 1840 } |
| 1841 |
| 1842 // Resets the space to using the first page. |
| 1843 void Reset(); |
| 1323 | 1844 |
| 1324 // Age mark accessors. | 1845 // Age mark accessors. |
| 1325 Address age_mark() { return age_mark_; } | 1846 Address age_mark() { return age_mark_; } |
| 1326 void set_age_mark(Address mark) { age_mark_ = mark; } | 1847 void set_age_mark(Address mark); |
| 1327 | 1848 |
| 1328 // True if the address is in the address range of this semispace (not | 1849 // True if the address is in the address range of this semispace (not |
| 1329 // necessarily below the allocation pointer). | 1850 // necessarily below the allocation pointer). |
| 1330 bool Contains(Address a) { | 1851 bool Contains(Address a) { |
| 1331 return (reinterpret_cast<uintptr_t>(a) & address_mask_) | 1852 return (reinterpret_cast<uintptr_t>(a) & address_mask_) |
| 1332 == reinterpret_cast<uintptr_t>(start_); | 1853 == reinterpret_cast<uintptr_t>(start_); |
| 1333 } | 1854 } |
| 1334 | 1855 |
| 1335 // True if the object is a heap object in the address range of this | 1856 // True if the object is a heap object in the address range of this |
| 1336 // semispace (not necessarily below the allocation pointer). | 1857 // semispace (not necessarily below the allocation pointer). |
| 1337 bool Contains(Object* o) { | 1858 bool Contains(Object* o) { |
| 1338 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_; | 1859 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_; |
| 1339 } | 1860 } |
| 1340 | 1861 |
| 1341 // The offset of an address from the beginning of the space. | |
| 1342 int SpaceOffsetForAddress(Address addr) { | |
| 1343 return static_cast<int>(addr - low()); | |
| 1344 } | |
| 1345 | |
| 1346 // If we don't have these here then SemiSpace will be abstract. However | 1862 // If we don't have these here then SemiSpace will be abstract. However |
| 1347 // they should never be called. | 1863 // they should never be called. |
| 1348 virtual intptr_t Size() { | 1864 virtual intptr_t Size() { |
| 1349 UNREACHABLE(); | 1865 UNREACHABLE(); |
| 1350 return 0; | 1866 return 0; |
| 1351 } | 1867 } |
| 1352 | 1868 |
| 1353 virtual bool ReserveSpace(int bytes) { | 1869 virtual bool ReserveSpace(int bytes) { |
| 1354 UNREACHABLE(); | 1870 UNREACHABLE(); |
| 1355 return false; | 1871 return false; |
| 1356 } | 1872 } |
| 1357 | 1873 |
| 1358 bool is_committed() { return committed_; } | 1874 bool is_committed() { return committed_; } |
| 1359 bool Commit(); | 1875 bool Commit(); |
| 1360 bool Uncommit(); | 1876 bool Uncommit(); |
| 1361 | 1877 |
| 1878 NewSpacePage* first_page() { return anchor_.next_page(); } |
| 1879 NewSpacePage* current_page() { return current_page_; } |
| 1880 |
| 1362 #ifdef DEBUG | 1881 #ifdef DEBUG |
| 1363 virtual void Print(); | 1882 virtual void Print(); |
| 1364 virtual void Verify(); | 1883 virtual void Verify(); |
| 1884 // Validate a range of of addresses in a SemiSpace. |
| 1885 // The "from" address must be on a page prior to the "to" address, |
| 1886 // in the linked page order, or it must be earlier on the same page. |
| 1887 static void AssertValidRange(Address from, Address to); |
| 1888 #else |
| 1889 // Do nothing. |
| 1890 inline static void AssertValidRange(Address from, Address to) {} |
| 1365 #endif | 1891 #endif |
| 1366 | 1892 |
| 1367 // Returns the current capacity of the semi space. | 1893 // Returns the current capacity of the semi space. |
| 1368 int Capacity() { return capacity_; } | 1894 int Capacity() { return capacity_; } |
| 1369 | 1895 |
| 1370 // Returns the maximum capacity of the semi space. | 1896 // Returns the maximum capacity of the semi space. |
| 1371 int MaximumCapacity() { return maximum_capacity_; } | 1897 int MaximumCapacity() { return maximum_capacity_; } |
| 1372 | 1898 |
| 1373 // Returns the initial capacity of the semi space. | 1899 // Returns the initial capacity of the semi space. |
| 1374 int InitialCapacity() { return initial_capacity_; } | 1900 int InitialCapacity() { return initial_capacity_; } |
| 1375 | 1901 |
| 1902 SemiSpaceId id() { return id_; } |
| 1903 |
| 1904 static void Swap(SemiSpace* from, SemiSpace* to); |
| 1905 |
| 1376 private: | 1906 private: |
| 1907 // Flips the semispace between being from-space and to-space. |
| 1908 // Copies the flags into the masked positions on all pages in the space. |
| 1909 void FlipPages(intptr_t flags, intptr_t flag_mask); |
| 1910 |
| 1911 NewSpacePage* anchor() { return &anchor_; } |
| 1912 |
| 1377 // The current and maximum capacity of the space. | 1913 // The current and maximum capacity of the space. |
| 1378 int capacity_; | 1914 int capacity_; |
| 1379 int maximum_capacity_; | 1915 int maximum_capacity_; |
| 1380 int initial_capacity_; | 1916 int initial_capacity_; |
| 1381 | 1917 |
| 1382 // The start address of the space. | 1918 // The start address of the space. |
| 1383 Address start_; | 1919 Address start_; |
| 1384 // Used to govern object promotion during mark-compact collection. | 1920 // Used to govern object promotion during mark-compact collection. |
| 1385 Address age_mark_; | 1921 Address age_mark_; |
| 1386 | 1922 |
| 1387 // Masks and comparison values to test for containment in this semispace. | 1923 // Masks and comparison values to test for containment in this semispace. |
| 1388 uintptr_t address_mask_; | 1924 uintptr_t address_mask_; |
| 1389 uintptr_t object_mask_; | 1925 uintptr_t object_mask_; |
| 1390 uintptr_t object_expected_; | 1926 uintptr_t object_expected_; |
| 1391 | 1927 |
| 1392 bool committed_; | 1928 bool committed_; |
| 1929 SemiSpaceId id_; |
| 1393 | 1930 |
| 1931 NewSpacePage anchor_; |
| 1932 NewSpacePage* current_page_; |
| 1933 |
| 1934 friend class SemiSpaceIterator; |
| 1935 friend class NewSpacePageIterator; |
| 1394 public: | 1936 public: |
| 1395 TRACK_MEMORY("SemiSpace") | 1937 TRACK_MEMORY("SemiSpace") |
| 1396 }; | 1938 }; |
| 1397 | 1939 |
| 1398 | 1940 |
| 1399 // A SemiSpaceIterator is an ObjectIterator that iterates over the active | 1941 // A SemiSpaceIterator is an ObjectIterator that iterates over the active |
| 1400 // semispace of the heap's new space. It iterates over the objects in the | 1942 // semispace of the heap's new space. It iterates over the objects in the |
| 1401 // semispace from a given start address (defaulting to the bottom of the | 1943 // semispace from a given start address (defaulting to the bottom of the |
| 1402 // semispace) to the top of the semispace. New objects allocated after the | 1944 // semispace) to the top of the semispace. New objects allocated after the |
| 1403 // iterator is created are not iterated. | 1945 // iterator is created are not iterated. |
| 1404 class SemiSpaceIterator : public ObjectIterator { | 1946 class SemiSpaceIterator : public ObjectIterator { |
| 1405 public: | 1947 public: |
| 1406 // Create an iterator over the objects in the given space. If no start | 1948 // Create an iterator over the objects in the given space. If no start |
| 1407 // address is given, the iterator starts from the bottom of the space. If | 1949 // address is given, the iterator starts from the bottom of the space. If |
| 1408 // no size function is given, the iterator calls Object::Size(). | 1950 // no size function is given, the iterator calls Object::Size(). |
| 1951 |
| 1952 // Iterate over all of allocated to-space. |
| 1409 explicit SemiSpaceIterator(NewSpace* space); | 1953 explicit SemiSpaceIterator(NewSpace* space); |
| 1954 // Iterate over all of allocated to-space, with a custome size function. |
| 1410 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func); | 1955 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func); |
| 1956 // Iterate over part of allocated to-space, from start to the end |
| 1957 // of allocation. |
| 1411 SemiSpaceIterator(NewSpace* space, Address start); | 1958 SemiSpaceIterator(NewSpace* space, Address start); |
| 1959 // Iterate from one address to another in the same semi-space. |
| 1960 SemiSpaceIterator(Address from, Address to); |
| 1412 | 1961 |
| 1413 HeapObject* next() { | 1962 HeapObject* Next() { |
| 1414 if (current_ == limit_) return NULL; | 1963 if (current_ == limit_) return NULL; |
| 1964 if (NewSpacePage::IsAtEnd(current_)) { |
| 1965 NewSpacePage* page = NewSpacePage::FromLimit(current_); |
| 1966 page = page->next_page(); |
| 1967 ASSERT(!page->is_anchor()); |
| 1968 current_ = page->body(); |
| 1969 if (current_ == limit_) return NULL; |
| 1970 } |
| 1415 | 1971 |
| 1416 HeapObject* object = HeapObject::FromAddress(current_); | 1972 HeapObject* object = HeapObject::FromAddress(current_); |
| 1417 int size = (size_func_ == NULL) ? object->Size() : size_func_(object); | 1973 int size = (size_func_ == NULL) ? object->Size() : size_func_(object); |
| 1418 | 1974 |
| 1419 current_ += size; | 1975 current_ += size; |
| 1420 return object; | 1976 return object; |
| 1421 } | 1977 } |
| 1422 | 1978 |
| 1423 // Implementation of the ObjectIterator functions. | 1979 // Implementation of the ObjectIterator functions. |
| 1424 virtual HeapObject* next_object() { return next(); } | 1980 virtual HeapObject* next_object() { return Next(); } |
| 1425 | 1981 |
| 1426 private: | 1982 private: |
| 1427 void Initialize(NewSpace* space, Address start, Address end, | 1983 void Initialize(Address start, |
| 1984 Address end, |
| 1428 HeapObjectCallback size_func); | 1985 HeapObjectCallback size_func); |
| 1429 | 1986 |
| 1430 // The semispace. | |
| 1431 SemiSpace* space_; | |
| 1432 // The current iteration point. | 1987 // The current iteration point. |
| 1433 Address current_; | 1988 Address current_; |
| 1434 // The end of iteration. | 1989 // The end of iteration. |
| 1435 Address limit_; | 1990 Address limit_; |
| 1436 // The callback function. | 1991 // The callback function. |
| 1437 HeapObjectCallback size_func_; | 1992 HeapObjectCallback size_func_; |
| 1438 }; | 1993 }; |
| 1439 | 1994 |
| 1440 | 1995 |
| 1441 // ----------------------------------------------------------------------------- | 1996 // ----------------------------------------------------------------------------- |
| 1997 // A PageIterator iterates the pages in a semi-space. |
| 1998 class NewSpacePageIterator BASE_EMBEDDED { |
| 1999 public: |
| 2000 // Make an iterator that runs over all pages in to-space. |
| 2001 explicit inline NewSpacePageIterator(NewSpace* space); |
| 2002 |
| 2003 // Make an iterator that runs over all pages in the given semispace, |
| 2004 // even those not used in allocation. |
| 2005 explicit inline NewSpacePageIterator(SemiSpace* space); |
| 2006 |
| 2007 // Make iterator that iterates from the page containing start |
| 2008 // to the page that contains limit in the same semispace. |
| 2009 inline NewSpacePageIterator(Address start, Address limit); |
| 2010 |
| 2011 inline bool has_next(); |
| 2012 inline NewSpacePage* next(); |
| 2013 |
| 2014 private: |
| 2015 NewSpacePage* prev_page_; // Previous page returned. |
| 2016 // Next page that will be returned. Cached here so that we can use this |
| 2017 // iterator for operations that deallocate pages. |
| 2018 NewSpacePage* next_page_; |
| 2019 // Last page returned. |
| 2020 NewSpacePage* last_page_; |
| 2021 }; |
| 2022 |
| 2023 |
| 2024 // ----------------------------------------------------------------------------- |
| 1442 // The young generation space. | 2025 // The young generation space. |
| 1443 // | 2026 // |
| 1444 // The new space consists of a contiguous pair of semispaces. It simply | 2027 // The new space consists of a contiguous pair of semispaces. It simply |
| 1445 // forwards most functions to the appropriate semispace. | 2028 // forwards most functions to the appropriate semispace. |
| 1446 | 2029 |
| 1447 class NewSpace : public Space { | 2030 class NewSpace : public Space { |
| 1448 public: | 2031 public: |
| 1449 // Constructor. | 2032 // Constructor. |
| 1450 explicit NewSpace(Heap* heap) | 2033 explicit NewSpace(Heap* heap) |
| 1451 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), | 2034 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), |
| 1452 to_space_(heap), | 2035 to_space_(heap, kToSpace), |
| 1453 from_space_(heap) {} | 2036 from_space_(heap, kFromSpace), |
| 2037 reservation_(), |
| 2038 inline_allocation_limit_step_(0) {} |
| 1454 | 2039 |
| 1455 // Sets up the new space using the given chunk. | 2040 // Sets up the new space using the given chunk. |
| 1456 bool Setup(Address start, int size); | 2041 bool Setup(int reserved_semispace_size_, int max_semispace_size); |
| 1457 | 2042 |
| 1458 // Tears down the space. Heap memory was not allocated by the space, so it | 2043 // Tears down the space. Heap memory was not allocated by the space, so it |
| 1459 // is not deallocated here. | 2044 // is not deallocated here. |
| 1460 void TearDown(); | 2045 void TearDown(); |
| 1461 | 2046 |
| 1462 // True if the space has been set up but not torn down. | 2047 // True if the space has been set up but not torn down. |
| 1463 bool HasBeenSetup() { | 2048 bool HasBeenSetup() { |
| 1464 return to_space_.HasBeenSetup() && from_space_.HasBeenSetup(); | 2049 return to_space_.HasBeenSetup() && from_space_.HasBeenSetup(); |
| 1465 } | 2050 } |
| 1466 | 2051 |
| 1467 // Flip the pair of spaces. | 2052 // Flip the pair of spaces. |
| 1468 void Flip(); | 2053 void Flip(); |
| 1469 | 2054 |
| 1470 // Grow the capacity of the semispaces. Assumes that they are not at | 2055 // Grow the capacity of the semispaces. Assumes that they are not at |
| 1471 // their maximum capacity. | 2056 // their maximum capacity. |
| 1472 void Grow(); | 2057 void Grow(); |
| 1473 | 2058 |
| 1474 // Shrink the capacity of the semispaces. | 2059 // Shrink the capacity of the semispaces. |
| 1475 void Shrink(); | 2060 void Shrink(); |
| 1476 | 2061 |
| 1477 // True if the address or object lies in the address range of either | 2062 // True if the address or object lies in the address range of either |
| 1478 // semispace (not necessarily below the allocation pointer). | 2063 // semispace (not necessarily below the allocation pointer). |
| 1479 bool Contains(Address a) { | 2064 bool Contains(Address a) { |
| 1480 return (reinterpret_cast<uintptr_t>(a) & address_mask_) | 2065 return (reinterpret_cast<uintptr_t>(a) & address_mask_) |
| 1481 == reinterpret_cast<uintptr_t>(start_); | 2066 == reinterpret_cast<uintptr_t>(start_); |
| 1482 } | 2067 } |
| 2068 |
| 1483 bool Contains(Object* o) { | 2069 bool Contains(Object* o) { |
| 1484 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_; | 2070 Address a = reinterpret_cast<Address>(o); |
| 2071 return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_; |
| 1485 } | 2072 } |
| 1486 | 2073 |
| 1487 // Return the allocated bytes in the active semispace. | 2074 // Return the allocated bytes in the active semispace. |
| 1488 virtual intptr_t Size() { return static_cast<int>(top() - bottom()); } | 2075 virtual intptr_t Size() { |
| 2076 return pages_used_ * Page::kObjectAreaSize + |
| 2077 static_cast<int>(top() - to_space_.page_low()); |
| 2078 } |
| 2079 |
| 1489 // The same, but returning an int. We have to have the one that returns | 2080 // The same, but returning an int. We have to have the one that returns |
| 1490 // intptr_t because it is inherited, but if we know we are dealing with the | 2081 // intptr_t because it is inherited, but if we know we are dealing with the |
| 1491 // new space, which can't get as big as the other spaces then this is useful: | 2082 // new space, which can't get as big as the other spaces then this is useful: |
| 1492 int SizeAsInt() { return static_cast<int>(Size()); } | 2083 int SizeAsInt() { return static_cast<int>(Size()); } |
| 1493 | 2084 |
| 1494 // Return the current capacity of a semispace. | 2085 // Return the current capacity of a semispace. |
| 2086 intptr_t EffectiveCapacity() { |
| 2087 ASSERT(to_space_.Capacity() == from_space_.Capacity()); |
| 2088 return (to_space_.Capacity() / Page::kPageSize) * Page::kObjectAreaSize; |
| 2089 } |
| 2090 |
| 2091 // Return the current capacity of a semispace. |
| 1495 intptr_t Capacity() { | 2092 intptr_t Capacity() { |
| 1496 ASSERT(to_space_.Capacity() == from_space_.Capacity()); | 2093 ASSERT(to_space_.Capacity() == from_space_.Capacity()); |
| 1497 return to_space_.Capacity(); | 2094 return to_space_.Capacity(); |
| 1498 } | 2095 } |
| 1499 | 2096 |
| 1500 // Return the total amount of memory committed for new space. | 2097 // Return the total amount of memory committed for new space. |
| 1501 intptr_t CommittedMemory() { | 2098 intptr_t CommittedMemory() { |
| 1502 if (from_space_.is_committed()) return 2 * Capacity(); | 2099 if (from_space_.is_committed()) return 2 * Capacity(); |
| 1503 return Capacity(); | 2100 return Capacity(); |
| 1504 } | 2101 } |
| 1505 | 2102 |
| 1506 // Return the available bytes without growing in the active semispace. | 2103 // Return the available bytes without growing or switching page in the |
| 1507 intptr_t Available() { return Capacity() - Size(); } | 2104 // active semispace. |
| 2105 intptr_t Available() { |
| 2106 return allocation_info_.limit - allocation_info_.top; |
| 2107 } |
| 1508 | 2108 |
| 1509 // Return the maximum capacity of a semispace. | 2109 // Return the maximum capacity of a semispace. |
| 1510 int MaximumCapacity() { | 2110 int MaximumCapacity() { |
| 1511 ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity()); | 2111 ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity()); |
| 1512 return to_space_.MaximumCapacity(); | 2112 return to_space_.MaximumCapacity(); |
| 1513 } | 2113 } |
| 1514 | 2114 |
| 1515 // Returns the initial capacity of a semispace. | 2115 // Returns the initial capacity of a semispace. |
| 1516 int InitialCapacity() { | 2116 int InitialCapacity() { |
| 1517 ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity()); | 2117 ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity()); |
| 1518 return to_space_.InitialCapacity(); | 2118 return to_space_.InitialCapacity(); |
| 1519 } | 2119 } |
| 1520 | 2120 |
| 1521 // Return the address of the allocation pointer in the active semispace. | 2121 // Return the address of the allocation pointer in the active semispace. |
| 1522 Address top() { return allocation_info_.top; } | 2122 Address top() { |
| 2123 ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top)); |
| 2124 return allocation_info_.top; |
| 2125 } |
| 1523 // Return the address of the first object in the active semispace. | 2126 // Return the address of the first object in the active semispace. |
| 1524 Address bottom() { return to_space_.low(); } | 2127 Address bottom() { return to_space_.space_start(); } |
| 1525 | 2128 |
| 1526 // Get the age mark of the inactive semispace. | 2129 // Get the age mark of the inactive semispace. |
| 1527 Address age_mark() { return from_space_.age_mark(); } | 2130 Address age_mark() { return from_space_.age_mark(); } |
| 1528 // Set the age mark in the active semispace. | 2131 // Set the age mark in the active semispace. |
| 1529 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); } | 2132 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); } |
| 1530 | 2133 |
| 1531 // The start address of the space and a bit mask. Anding an address in the | 2134 // The start address of the space and a bit mask. Anding an address in the |
| 1532 // new space with the mask will result in the start address. | 2135 // new space with the mask will result in the start address. |
| 1533 Address start() { return start_; } | 2136 Address start() { return start_; } |
| 1534 uintptr_t mask() { return address_mask_; } | 2137 uintptr_t mask() { return address_mask_; } |
| 1535 | 2138 |
| 2139 INLINE(uint32_t AddressToMarkbitIndex(Address addr)) { |
| 2140 ASSERT(Contains(addr)); |
| 2141 ASSERT(IsAligned(OffsetFrom(addr), kPointerSize) || |
| 2142 IsAligned(OffsetFrom(addr) - 1, kPointerSize)); |
| 2143 return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2; |
| 2144 } |
| 2145 |
| 2146 INLINE(Address MarkbitIndexToAddress(uint32_t index)) { |
| 2147 return reinterpret_cast<Address>(index << kPointerSizeLog2); |
| 2148 } |
| 2149 |
| 1536 // The allocation top and limit addresses. | 2150 // The allocation top and limit addresses. |
| 1537 Address* allocation_top_address() { return &allocation_info_.top; } | 2151 Address* allocation_top_address() { return &allocation_info_.top; } |
| 1538 Address* allocation_limit_address() { return &allocation_info_.limit; } | 2152 Address* allocation_limit_address() { return &allocation_info_.limit; } |
| 1539 | 2153 |
| 1540 MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) { | 2154 MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) { |
| 1541 return AllocateRawInternal(size_in_bytes, &allocation_info_); | 2155 return AllocateRawInternal(size_in_bytes); |
| 1542 } | |
| 1543 | |
| 1544 // Allocate the requested number of bytes for relocation during mark-compact | |
| 1545 // collection. | |
| 1546 MUST_USE_RESULT MaybeObject* MCAllocateRaw(int size_in_bytes) { | |
| 1547 return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_); | |
| 1548 } | 2156 } |
| 1549 | 2157 |
| 1550 // Reset the allocation pointer to the beginning of the active semispace. | 2158 // Reset the allocation pointer to the beginning of the active semispace. |
| 1551 void ResetAllocationInfo(); | 2159 void ResetAllocationInfo(); |
| 1552 // Reset the reloction pointer to the bottom of the inactive semispace in | |
| 1553 // preparation for mark-compact collection. | |
| 1554 void MCResetRelocationInfo(); | |
| 1555 // Update the allocation pointer in the active semispace after a | |
| 1556 // mark-compact collection. | |
| 1557 void MCCommitRelocationInfo(); | |
| 1558 | 2160 |
| 1559 // Get the extent of the inactive semispace (for use as a marking stack). | 2161 void LowerInlineAllocationLimit(intptr_t step) { |
| 1560 Address FromSpaceLow() { return from_space_.low(); } | 2162 inline_allocation_limit_step_ = step; |
| 1561 Address FromSpaceHigh() { return from_space_.high(); } | 2163 if (step == 0) { |
| 2164 allocation_info_.limit = to_space_.page_high(); |
| 2165 } else { |
| 2166 allocation_info_.limit = Min( |
| 2167 allocation_info_.top + inline_allocation_limit_step_, |
| 2168 allocation_info_.limit); |
| 2169 } |
| 2170 top_on_previous_step_ = allocation_info_.top; |
| 2171 } |
| 1562 | 2172 |
| 1563 // Get the extent of the active semispace (to sweep newly copied objects | 2173 // Get the extent of the inactive semispace (for use as a marking stack, |
| 1564 // during a scavenge collection). | 2174 // or to zap it). Notice: space-addresses are not necessarily on the |
| 1565 Address ToSpaceLow() { return to_space_.low(); } | 2175 // same page, so FromSpaceStart() might be above FromSpaceEnd(). |
| 1566 Address ToSpaceHigh() { return to_space_.high(); } | 2176 Address FromSpacePageLow() { return from_space_.page_low(); } |
| 2177 Address FromSpacePageHigh() { return from_space_.page_high(); } |
| 2178 Address FromSpaceStart() { return from_space_.space_start(); } |
| 2179 Address FromSpaceEnd() { return from_space_.space_end(); } |
| 1567 | 2180 |
| 1568 // Offsets from the beginning of the semispaces. | 2181 // Get the extent of the active semispace's pages' memory. |
| 1569 int ToSpaceOffsetForAddress(Address a) { | 2182 Address ToSpaceStart() { return to_space_.space_start(); } |
| 1570 return to_space_.SpaceOffsetForAddress(a); | 2183 Address ToSpaceEnd() { return to_space_.space_end(); } |
| 2184 |
| 2185 inline bool ToSpaceContains(Address address) { |
| 2186 return to_space_.Contains(address); |
| 1571 } | 2187 } |
| 1572 int FromSpaceOffsetForAddress(Address a) { | 2188 inline bool FromSpaceContains(Address address) { |
| 1573 return from_space_.SpaceOffsetForAddress(a); | 2189 return from_space_.Contains(address); |
| 1574 } | 2190 } |
| 1575 | 2191 |
| 1576 // True if the object is a heap object in the address range of the | 2192 // True if the object is a heap object in the address range of the |
| 1577 // respective semispace (not necessarily below the allocation pointer of the | 2193 // respective semispace (not necessarily below the allocation pointer of the |
| 1578 // semispace). | 2194 // semispace). |
| 1579 bool ToSpaceContains(Object* o) { return to_space_.Contains(o); } | 2195 inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); } |
| 1580 bool FromSpaceContains(Object* o) { return from_space_.Contains(o); } | 2196 inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); } |
| 1581 | 2197 |
| 1582 bool ToSpaceContains(Address a) { return to_space_.Contains(a); } | 2198 // Try to switch the active semispace to a new, empty, page. |
| 1583 bool FromSpaceContains(Address a) { return from_space_.Contains(a); } | 2199 // Returns false if this isn't possible or reasonable (i.e., there |
| 2200 // are no pages, or the current page is already empty), or true |
| 2201 // if successful. |
| 2202 bool AddFreshPage(); |
| 1584 | 2203 |
| 1585 virtual bool ReserveSpace(int bytes); | 2204 virtual bool ReserveSpace(int bytes); |
| 1586 | 2205 |
| 1587 // Resizes a sequential string which must be the most recent thing that was | 2206 // Resizes a sequential string which must be the most recent thing that was |
| 1588 // allocated in new space. | 2207 // allocated in new space. |
| 1589 template <typename StringType> | 2208 template <typename StringType> |
| 1590 inline void ShrinkStringAtAllocationBoundary(String* string, int len); | 2209 inline void ShrinkStringAtAllocationBoundary(String* string, int len); |
| 1591 | 2210 |
| 1592 #ifdef DEBUG | 2211 #ifdef DEBUG |
| 1593 // Verify the active semispace. | 2212 // Verify the active semispace. |
| (...skipping 19 matching lines...) Expand all Loading... |
| 1613 bool CommitFromSpaceIfNeeded() { | 2232 bool CommitFromSpaceIfNeeded() { |
| 1614 if (from_space_.is_committed()) return true; | 2233 if (from_space_.is_committed()) return true; |
| 1615 return from_space_.Commit(); | 2234 return from_space_.Commit(); |
| 1616 } | 2235 } |
| 1617 | 2236 |
| 1618 bool UncommitFromSpace() { | 2237 bool UncommitFromSpace() { |
| 1619 if (!from_space_.is_committed()) return true; | 2238 if (!from_space_.is_committed()) return true; |
| 1620 return from_space_.Uncommit(); | 2239 return from_space_.Uncommit(); |
| 1621 } | 2240 } |
| 1622 | 2241 |
| 2242 inline intptr_t inline_allocation_limit_step() { |
| 2243 return inline_allocation_limit_step_; |
| 2244 } |
| 2245 |
| 2246 SemiSpace* active_space() { return &to_space_; } |
| 2247 |
| 1623 private: | 2248 private: |
| 2249 // Update allocation info to match the current to-space page. |
| 2250 void UpdateAllocationInfo(); |
| 2251 |
| 2252 Address chunk_base_; |
| 2253 uintptr_t chunk_size_; |
| 2254 |
| 1624 // The semispaces. | 2255 // The semispaces. |
| 1625 SemiSpace to_space_; | 2256 SemiSpace to_space_; |
| 1626 SemiSpace from_space_; | 2257 SemiSpace from_space_; |
| 2258 VirtualMemory reservation_; |
| 2259 int pages_used_; |
| 1627 | 2260 |
| 1628 // Start address and bit mask for containment testing. | 2261 // Start address and bit mask for containment testing. |
| 1629 Address start_; | 2262 Address start_; |
| 1630 uintptr_t address_mask_; | 2263 uintptr_t address_mask_; |
| 1631 uintptr_t object_mask_; | 2264 uintptr_t object_mask_; |
| 1632 uintptr_t object_expected_; | 2265 uintptr_t object_expected_; |
| 1633 | 2266 |
| 1634 // Allocation pointer and limit for normal allocation and allocation during | 2267 // Allocation pointer and limit for normal allocation and allocation during |
| 1635 // mark-compact collection. | 2268 // mark-compact collection. |
| 1636 AllocationInfo allocation_info_; | 2269 AllocationInfo allocation_info_; |
| 1637 AllocationInfo mc_forwarding_info_; | 2270 |
| 2271 // When incremental marking is active we will set allocation_info_.limit |
| 2272 // to be lower than actual limit and then will gradually increase it |
| 2273 // in steps to guarantee that we do incremental marking steps even |
| 2274 // when all allocation is performed from inlined generated code. |
| 2275 intptr_t inline_allocation_limit_step_; |
| 2276 |
| 2277 Address top_on_previous_step_; |
| 1638 | 2278 |
| 1639 HistogramInfo* allocated_histogram_; | 2279 HistogramInfo* allocated_histogram_; |
| 1640 HistogramInfo* promoted_histogram_; | 2280 HistogramInfo* promoted_histogram_; |
| 1641 | 2281 |
| 1642 // Implementation of AllocateRaw and MCAllocateRaw. | 2282 // Implementation of AllocateRaw. |
| 1643 MUST_USE_RESULT inline MaybeObject* AllocateRawInternal( | 2283 MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(int size_in_bytes); |
| 1644 int size_in_bytes, | |
| 1645 AllocationInfo* alloc_info); | |
| 1646 | 2284 |
| 1647 friend class SemiSpaceIterator; | 2285 friend class SemiSpaceIterator; |
| 1648 | 2286 |
| 1649 public: | 2287 public: |
| 1650 TRACK_MEMORY("NewSpace") | 2288 TRACK_MEMORY("NewSpace") |
| 1651 }; | 2289 }; |
| 1652 | 2290 |
| 1653 | 2291 |
| 1654 // ----------------------------------------------------------------------------- | 2292 // ----------------------------------------------------------------------------- |
| 1655 // Free lists for old object spaces | |
| 1656 // | |
| 1657 // Free-list nodes are free blocks in the heap. They look like heap objects | |
| 1658 // (free-list node pointers have the heap object tag, and they have a map like | |
| 1659 // a heap object). They have a size and a next pointer. The next pointer is | |
| 1660 // the raw address of the next free list node (or NULL). | |
| 1661 class FreeListNode: public HeapObject { | |
| 1662 public: | |
| 1663 // Obtain a free-list node from a raw address. This is not a cast because | |
| 1664 // it does not check nor require that the first word at the address is a map | |
| 1665 // pointer. | |
| 1666 static FreeListNode* FromAddress(Address address) { | |
| 1667 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address)); | |
| 1668 } | |
| 1669 | |
| 1670 static inline bool IsFreeListNode(HeapObject* object); | |
| 1671 | |
| 1672 // Set the size in bytes, which can be read with HeapObject::Size(). This | |
| 1673 // function also writes a map to the first word of the block so that it | |
| 1674 // looks like a heap object to the garbage collector and heap iteration | |
| 1675 // functions. | |
| 1676 void set_size(Heap* heap, int size_in_bytes); | |
| 1677 | |
| 1678 // Accessors for the next field. | |
| 1679 inline Address next(Heap* heap); | |
| 1680 inline void set_next(Heap* heap, Address next); | |
| 1681 | |
| 1682 private: | |
| 1683 static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize); | |
| 1684 | |
| 1685 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode); | |
| 1686 }; | |
| 1687 | |
| 1688 | |
| 1689 // The free list for the old space. | |
| 1690 class OldSpaceFreeList BASE_EMBEDDED { | |
| 1691 public: | |
| 1692 OldSpaceFreeList(Heap* heap, AllocationSpace owner); | |
| 1693 | |
| 1694 // Clear the free list. | |
| 1695 void Reset(); | |
| 1696 | |
| 1697 // Return the number of bytes available on the free list. | |
| 1698 intptr_t available() { return available_; } | |
| 1699 | |
| 1700 // Place a node on the free list. The block of size 'size_in_bytes' | |
| 1701 // starting at 'start' is placed on the free list. The return value is the | |
| 1702 // number of bytes that have been lost due to internal fragmentation by | |
| 1703 // freeing the block. Bookkeeping information will be written to the block, | |
| 1704 // ie, its contents will be destroyed. The start address should be word | |
| 1705 // aligned, and the size should be a non-zero multiple of the word size. | |
| 1706 int Free(Address start, int size_in_bytes); | |
| 1707 | |
| 1708 // Allocate a block of size 'size_in_bytes' from the free list. The block | |
| 1709 // is unitialized. A failure is returned if no block is available. The | |
| 1710 // number of bytes lost to fragmentation is returned in the output parameter | |
| 1711 // 'wasted_bytes'. The size should be a non-zero multiple of the word size. | |
| 1712 MUST_USE_RESULT MaybeObject* Allocate(int size_in_bytes, int* wasted_bytes); | |
| 1713 | |
| 1714 void MarkNodes(); | |
| 1715 | |
| 1716 private: | |
| 1717 // The size range of blocks, in bytes. (Smaller allocations are allowed, but | |
| 1718 // will always result in waste.) | |
| 1719 static const int kMinBlockSize = 2 * kPointerSize; | |
| 1720 static const int kMaxBlockSize = Page::kMaxHeapObjectSize; | |
| 1721 | |
| 1722 Heap* heap_; | |
| 1723 | |
| 1724 // The identity of the owning space, for building allocation Failure | |
| 1725 // objects. | |
| 1726 AllocationSpace owner_; | |
| 1727 | |
| 1728 // Total available bytes in all blocks on this free list. | |
| 1729 int available_; | |
| 1730 | |
| 1731 // Blocks are put on exact free lists in an array, indexed by size in words. | |
| 1732 // The available sizes are kept in an increasingly ordered list. Entries | |
| 1733 // corresponding to sizes < kMinBlockSize always have an empty free list | |
| 1734 // (but index kHead is used for the head of the size list). | |
| 1735 struct SizeNode { | |
| 1736 // Address of the head FreeListNode of the implied block size or NULL. | |
| 1737 Address head_node_; | |
| 1738 // Size (words) of the next larger available size if head_node_ != NULL. | |
| 1739 int next_size_; | |
| 1740 }; | |
| 1741 static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1; | |
| 1742 SizeNode free_[kFreeListsLength]; | |
| 1743 | |
| 1744 // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[. | |
| 1745 static const int kHead = kMinBlockSize / kPointerSize - 1; | |
| 1746 static const int kEnd = kMaxInt; | |
| 1747 | |
| 1748 // We keep a "finger" in the size list to speed up a common pattern: | |
| 1749 // repeated requests for the same or increasing sizes. | |
| 1750 int finger_; | |
| 1751 | |
| 1752 // Starting from *prev, find and return the smallest size >= index (words), | |
| 1753 // or kEnd. Update *prev to be the largest size < index, or kHead. | |
| 1754 int FindSize(int index, int* prev) { | |
| 1755 int cur = free_[*prev].next_size_; | |
| 1756 while (cur < index) { | |
| 1757 *prev = cur; | |
| 1758 cur = free_[cur].next_size_; | |
| 1759 } | |
| 1760 return cur; | |
| 1761 } | |
| 1762 | |
| 1763 // Remove an existing element from the size list. | |
| 1764 void RemoveSize(int index) { | |
| 1765 int prev = kHead; | |
| 1766 int cur = FindSize(index, &prev); | |
| 1767 ASSERT(cur == index); | |
| 1768 free_[prev].next_size_ = free_[cur].next_size_; | |
| 1769 finger_ = prev; | |
| 1770 } | |
| 1771 | |
| 1772 // Insert a new element into the size list. | |
| 1773 void InsertSize(int index) { | |
| 1774 int prev = kHead; | |
| 1775 int cur = FindSize(index, &prev); | |
| 1776 ASSERT(cur != index); | |
| 1777 free_[prev].next_size_ = index; | |
| 1778 free_[index].next_size_ = cur; | |
| 1779 } | |
| 1780 | |
| 1781 // The size list is not updated during a sequence of calls to Free, but is | |
| 1782 // rebuilt before the next allocation. | |
| 1783 void RebuildSizeList(); | |
| 1784 bool needs_rebuild_; | |
| 1785 | |
| 1786 #ifdef DEBUG | |
| 1787 // Does this free list contain a free block located at the address of 'node'? | |
| 1788 bool Contains(FreeListNode* node); | |
| 1789 #endif | |
| 1790 | |
| 1791 DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList); | |
| 1792 }; | |
| 1793 | |
| 1794 | |
| 1795 // The free list for the map space. | |
| 1796 class FixedSizeFreeList BASE_EMBEDDED { | |
| 1797 public: | |
| 1798 FixedSizeFreeList(Heap* heap, AllocationSpace owner, int object_size); | |
| 1799 | |
| 1800 // Clear the free list. | |
| 1801 void Reset(); | |
| 1802 | |
| 1803 // Return the number of bytes available on the free list. | |
| 1804 intptr_t available() { return available_; } | |
| 1805 | |
| 1806 // Place a node on the free list. The block starting at 'start' (assumed to | |
| 1807 // have size object_size_) is placed on the free list. Bookkeeping | |
| 1808 // information will be written to the block, ie, its contents will be | |
| 1809 // destroyed. The start address should be word aligned. | |
| 1810 void Free(Address start); | |
| 1811 | |
| 1812 // Allocate a fixed sized block from the free list. The block is unitialized. | |
| 1813 // A failure is returned if no block is available. | |
| 1814 MUST_USE_RESULT MaybeObject* Allocate(); | |
| 1815 | |
| 1816 void MarkNodes(); | |
| 1817 | |
| 1818 private: | |
| 1819 Heap* heap_; | |
| 1820 | |
| 1821 // Available bytes on the free list. | |
| 1822 intptr_t available_; | |
| 1823 | |
| 1824 // The head of the free list. | |
| 1825 Address head_; | |
| 1826 | |
| 1827 // The tail of the free list. | |
| 1828 Address tail_; | |
| 1829 | |
| 1830 // The identity of the owning space, for building allocation Failure | |
| 1831 // objects. | |
| 1832 AllocationSpace owner_; | |
| 1833 | |
| 1834 // The size of the objects in this space. | |
| 1835 int object_size_; | |
| 1836 | |
| 1837 DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList); | |
| 1838 }; | |
| 1839 | |
| 1840 | |
| 1841 // ----------------------------------------------------------------------------- | |
| 1842 // Old object space (excluding map objects) | 2293 // Old object space (excluding map objects) |
| 1843 | 2294 |
| 1844 class OldSpace : public PagedSpace { | 2295 class OldSpace : public PagedSpace { |
| 1845 public: | 2296 public: |
| 1846 // Creates an old space object with a given maximum capacity. | 2297 // Creates an old space object with a given maximum capacity. |
| 1847 // The constructor does not allocate pages from OS. | 2298 // The constructor does not allocate pages from OS. |
| 1848 OldSpace(Heap* heap, | 2299 OldSpace(Heap* heap, |
| 1849 intptr_t max_capacity, | 2300 intptr_t max_capacity, |
| 1850 AllocationSpace id, | 2301 AllocationSpace id, |
| 1851 Executability executable) | 2302 Executability executable) |
| 1852 : PagedSpace(heap, max_capacity, id, executable), | 2303 : PagedSpace(heap, max_capacity, id, executable) { |
| 1853 free_list_(heap, id) { | |
| 1854 page_extra_ = 0; | 2304 page_extra_ = 0; |
| 1855 } | 2305 } |
| 1856 | 2306 |
| 1857 // The bytes available on the free list (ie, not above the linear allocation | |
| 1858 // pointer). | |
| 1859 intptr_t AvailableFree() { return free_list_.available(); } | |
| 1860 | |
| 1861 // The limit of allocation for a page in this space. | 2307 // The limit of allocation for a page in this space. |
| 1862 virtual Address PageAllocationLimit(Page* page) { | 2308 virtual Address PageAllocationLimit(Page* page) { |
| 1863 return page->ObjectAreaEnd(); | 2309 return page->ObjectAreaEnd(); |
| 1864 } | 2310 } |
| 1865 | 2311 |
| 1866 // Give a block of memory to the space's free list. It might be added to | |
| 1867 // the free list or accounted as waste. | |
| 1868 // If add_to_freelist is false then just accounting stats are updated and | |
| 1869 // no attempt to add area to free list is made. | |
| 1870 void Free(Address start, int size_in_bytes, bool add_to_freelist) { | |
| 1871 accounting_stats_.DeallocateBytes(size_in_bytes); | |
| 1872 | |
| 1873 if (add_to_freelist) { | |
| 1874 int wasted_bytes = free_list_.Free(start, size_in_bytes); | |
| 1875 accounting_stats_.WasteBytes(wasted_bytes); | |
| 1876 } | |
| 1877 } | |
| 1878 | |
| 1879 virtual void DeallocateBlock(Address start, | |
| 1880 int size_in_bytes, | |
| 1881 bool add_to_freelist); | |
| 1882 | |
| 1883 // Prepare for full garbage collection. Resets the relocation pointer and | |
| 1884 // clears the free list. | |
| 1885 virtual void PrepareForMarkCompact(bool will_compact); | |
| 1886 | |
| 1887 // Updates the allocation pointer to the relocation top after a mark-compact | |
| 1888 // collection. | |
| 1889 virtual void MCCommitRelocationInfo(); | |
| 1890 | |
| 1891 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page); | |
| 1892 | |
| 1893 void MarkFreeListNodes() { free_list_.MarkNodes(); } | |
| 1894 | |
| 1895 #ifdef DEBUG | |
| 1896 // Reports statistics for the space | |
| 1897 void ReportStatistics(); | |
| 1898 #endif | |
| 1899 | |
| 1900 protected: | |
| 1901 // Virtual function in the superclass. Slow path of AllocateRaw. | |
| 1902 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes); | |
| 1903 | |
| 1904 // Virtual function in the superclass. Allocate linearly at the start of | |
| 1905 // the page after current_page (there is assumed to be one). | |
| 1906 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes); | |
| 1907 | |
| 1908 private: | |
| 1909 // The space's free list. | |
| 1910 OldSpaceFreeList free_list_; | |
| 1911 | |
| 1912 public: | 2312 public: |
| 1913 TRACK_MEMORY("OldSpace") | 2313 TRACK_MEMORY("OldSpace") |
| 1914 }; | 2314 }; |
| 1915 | 2315 |
| 1916 | 2316 |
| 2317 // For contiguous spaces, top should be in the space (or at the end) and limit |
| 2318 // should be the end of the space. |
| 2319 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ |
| 2320 ASSERT((space).page_low() <= (info).top \ |
| 2321 && (info).top <= (space).page_high() \ |
| 2322 && (info).limit <= (space).page_high()) |
| 2323 |
| 2324 |
| 1917 // ----------------------------------------------------------------------------- | 2325 // ----------------------------------------------------------------------------- |
| 1918 // Old space for objects of a fixed size | 2326 // Old space for objects of a fixed size |
| 1919 | 2327 |
| 1920 class FixedSpace : public PagedSpace { | 2328 class FixedSpace : public PagedSpace { |
| 1921 public: | 2329 public: |
| 1922 FixedSpace(Heap* heap, | 2330 FixedSpace(Heap* heap, |
| 1923 intptr_t max_capacity, | 2331 intptr_t max_capacity, |
| 1924 AllocationSpace id, | 2332 AllocationSpace id, |
| 1925 int object_size_in_bytes, | 2333 int object_size_in_bytes, |
| 1926 const char* name) | 2334 const char* name) |
| 1927 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), | 2335 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), |
| 1928 object_size_in_bytes_(object_size_in_bytes), | 2336 object_size_in_bytes_(object_size_in_bytes), |
| 1929 name_(name), | 2337 name_(name) { |
| 1930 free_list_(heap, id, object_size_in_bytes) { | |
| 1931 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes; | 2338 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes; |
| 1932 } | 2339 } |
| 1933 | 2340 |
| 1934 // The limit of allocation for a page in this space. | 2341 // The limit of allocation for a page in this space. |
| 1935 virtual Address PageAllocationLimit(Page* page) { | 2342 virtual Address PageAllocationLimit(Page* page) { |
| 1936 return page->ObjectAreaEnd() - page_extra_; | 2343 return page->ObjectAreaEnd() - page_extra_; |
| 1937 } | 2344 } |
| 1938 | 2345 |
| 1939 int object_size_in_bytes() { return object_size_in_bytes_; } | 2346 int object_size_in_bytes() { return object_size_in_bytes_; } |
| 1940 | 2347 |
| 1941 // Give a fixed sized block of memory to the space's free list. | |
| 1942 // If add_to_freelist is false then just accounting stats are updated and | |
| 1943 // no attempt to add area to free list is made. | |
| 1944 void Free(Address start, bool add_to_freelist) { | |
| 1945 if (add_to_freelist) { | |
| 1946 free_list_.Free(start); | |
| 1947 } | |
| 1948 accounting_stats_.DeallocateBytes(object_size_in_bytes_); | |
| 1949 } | |
| 1950 | |
| 1951 // Prepares for a mark-compact GC. | 2348 // Prepares for a mark-compact GC. |
| 1952 virtual void PrepareForMarkCompact(bool will_compact); | 2349 virtual void PrepareForMarkCompact(); |
| 1953 | |
| 1954 // Updates the allocation pointer to the relocation top after a mark-compact | |
| 1955 // collection. | |
| 1956 virtual void MCCommitRelocationInfo(); | |
| 1957 | |
| 1958 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page); | |
| 1959 | |
| 1960 virtual void DeallocateBlock(Address start, | |
| 1961 int size_in_bytes, | |
| 1962 bool add_to_freelist); | |
| 1963 | 2350 |
| 1964 void MarkFreeListNodes() { free_list_.MarkNodes(); } | 2351 void MarkFreeListNodes() { free_list_.MarkNodes(); } |
| 1965 | 2352 |
| 1966 #ifdef DEBUG | |
| 1967 // Reports statistic info of the space | |
| 1968 void ReportStatistics(); | |
| 1969 #endif | |
| 1970 | |
| 1971 protected: | 2353 protected: |
| 1972 // Virtual function in the superclass. Slow path of AllocateRaw. | |
| 1973 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes); | |
| 1974 | |
| 1975 // Virtual function in the superclass. Allocate linearly at the start of | |
| 1976 // the page after current_page (there is assumed to be one). | |
| 1977 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes); | |
| 1978 | |
| 1979 void ResetFreeList() { | 2354 void ResetFreeList() { |
| 1980 free_list_.Reset(); | 2355 free_list_.Reset(); |
| 1981 } | 2356 } |
| 1982 | 2357 |
| 1983 private: | 2358 private: |
| 1984 // The size of objects in this space. | 2359 // The size of objects in this space. |
| 1985 int object_size_in_bytes_; | 2360 int object_size_in_bytes_; |
| 1986 | 2361 |
| 1987 // The name of this space. | 2362 // The name of this space. |
| 1988 const char* name_; | 2363 const char* name_; |
| 1989 | |
| 1990 // The space's free list. | |
| 1991 FixedSizeFreeList free_list_; | |
| 1992 }; | 2364 }; |
| 1993 | 2365 |
| 1994 | 2366 |
| 1995 // ----------------------------------------------------------------------------- | 2367 // ----------------------------------------------------------------------------- |
| 1996 // Old space for all map objects | 2368 // Old space for all map objects |
| 1997 | 2369 |
| 1998 class MapSpace : public FixedSpace { | 2370 class MapSpace : public FixedSpace { |
| 1999 public: | 2371 public: |
| 2000 // Creates a map space object with a maximum capacity. | 2372 // Creates a map space object with a maximum capacity. |
| 2001 MapSpace(Heap* heap, | 2373 MapSpace(Heap* heap, |
| 2002 intptr_t max_capacity, | 2374 intptr_t max_capacity, |
| 2003 int max_map_space_pages, | 2375 int max_map_space_pages, |
| 2004 AllocationSpace id) | 2376 AllocationSpace id) |
| 2005 : FixedSpace(heap, max_capacity, id, Map::kSize, "map"), | 2377 : FixedSpace(heap, max_capacity, id, Map::kSize, "map"), |
| 2006 max_map_space_pages_(max_map_space_pages) { | 2378 max_map_space_pages_(max_map_space_pages) { |
| 2007 ASSERT(max_map_space_pages < kMaxMapPageIndex); | |
| 2008 } | 2379 } |
| 2009 | 2380 |
| 2010 // Prepares for a mark-compact GC. | 2381 // Given an index, returns the page address. |
| 2011 virtual void PrepareForMarkCompact(bool will_compact); | 2382 // TODO(1600): this limit is artifical just to keep code compilable |
| 2383 static const int kMaxMapPageIndex = 1 << 16; |
| 2012 | 2384 |
| 2013 // Given an index, returns the page address. | 2385 virtual int RoundSizeDownToObjectAlignment(int size) { |
| 2014 Address PageAddress(int page_index) { return page_addresses_[page_index]; } | 2386 if (IsPowerOf2(Map::kSize)) { |
| 2015 | 2387 return RoundDown(size, Map::kSize); |
| 2016 static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits; | 2388 } else { |
| 2017 | 2389 return (size / Map::kSize) * Map::kSize; |
| 2018 // Are map pointers encodable into map word? | |
| 2019 bool MapPointersEncodable() { | |
| 2020 if (!FLAG_use_big_map_space) { | |
| 2021 ASSERT(CountPagesToTop() <= kMaxMapPageIndex); | |
| 2022 return true; | |
| 2023 } | 2390 } |
| 2024 return CountPagesToTop() <= max_map_space_pages_; | |
| 2025 } | |
| 2026 | |
| 2027 // Should be called after forced sweep to find out if map space needs | |
| 2028 // compaction. | |
| 2029 bool NeedsCompaction(int live_maps) { | |
| 2030 return !MapPointersEncodable() && live_maps <= CompactionThreshold(); | |
| 2031 } | |
| 2032 | |
| 2033 Address TopAfterCompaction(int live_maps) { | |
| 2034 ASSERT(NeedsCompaction(live_maps)); | |
| 2035 | |
| 2036 int pages_left = live_maps / kMapsPerPage; | |
| 2037 PageIterator it(this, PageIterator::ALL_PAGES); | |
| 2038 while (pages_left-- > 0) { | |
| 2039 ASSERT(it.has_next()); | |
| 2040 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks); | |
| 2041 } | |
| 2042 ASSERT(it.has_next()); | |
| 2043 Page* top_page = it.next(); | |
| 2044 top_page->SetRegionMarks(Page::kAllRegionsCleanMarks); | |
| 2045 ASSERT(top_page->is_valid()); | |
| 2046 | |
| 2047 int offset = live_maps % kMapsPerPage * Map::kSize; | |
| 2048 Address top = top_page->ObjectAreaStart() + offset; | |
| 2049 ASSERT(top < top_page->ObjectAreaEnd()); | |
| 2050 ASSERT(Contains(top)); | |
| 2051 | |
| 2052 return top; | |
| 2053 } | |
| 2054 | |
| 2055 void FinishCompaction(Address new_top, int live_maps) { | |
| 2056 Page* top_page = Page::FromAddress(new_top); | |
| 2057 ASSERT(top_page->is_valid()); | |
| 2058 | |
| 2059 SetAllocationInfo(&allocation_info_, top_page); | |
| 2060 allocation_info_.top = new_top; | |
| 2061 | |
| 2062 int new_size = live_maps * Map::kSize; | |
| 2063 accounting_stats_.DeallocateBytes(accounting_stats_.Size()); | |
| 2064 accounting_stats_.AllocateBytes(new_size); | |
| 2065 | |
| 2066 // Flush allocation watermarks. | |
| 2067 for (Page* p = first_page_; p != top_page; p = p->next_page()) { | |
| 2068 p->SetAllocationWatermark(p->AllocationTop()); | |
| 2069 } | |
| 2070 top_page->SetAllocationWatermark(new_top); | |
| 2071 | |
| 2072 #ifdef DEBUG | |
| 2073 if (FLAG_enable_slow_asserts) { | |
| 2074 intptr_t actual_size = 0; | |
| 2075 for (Page* p = first_page_; p != top_page; p = p->next_page()) | |
| 2076 actual_size += kMapsPerPage * Map::kSize; | |
| 2077 actual_size += (new_top - top_page->ObjectAreaStart()); | |
| 2078 ASSERT(accounting_stats_.Size() == actual_size); | |
| 2079 } | |
| 2080 #endif | |
| 2081 | |
| 2082 Shrink(); | |
| 2083 ResetFreeList(); | |
| 2084 } | 2391 } |
| 2085 | 2392 |
| 2086 protected: | 2393 protected: |
| 2087 #ifdef DEBUG | 2394 #ifdef DEBUG |
| 2088 virtual void VerifyObject(HeapObject* obj); | 2395 virtual void VerifyObject(HeapObject* obj); |
| 2089 #endif | 2396 #endif |
| 2090 | 2397 |
| 2091 private: | 2398 private: |
| 2092 static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize; | 2399 static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize; |
| 2093 | 2400 |
| 2094 // Do map space compaction if there is a page gap. | 2401 // Do map space compaction if there is a page gap. |
| 2095 int CompactionThreshold() { | 2402 int CompactionThreshold() { |
| 2096 return kMapsPerPage * (max_map_space_pages_ - 1); | 2403 return kMapsPerPage * (max_map_space_pages_ - 1); |
| 2097 } | 2404 } |
| 2098 | 2405 |
| 2099 const int max_map_space_pages_; | 2406 const int max_map_space_pages_; |
| 2100 | 2407 |
| 2101 // An array of page start address in a map space. | |
| 2102 Address page_addresses_[kMaxMapPageIndex]; | |
| 2103 | |
| 2104 public: | 2408 public: |
| 2105 TRACK_MEMORY("MapSpace") | 2409 TRACK_MEMORY("MapSpace") |
| 2106 }; | 2410 }; |
| 2107 | 2411 |
| 2108 | 2412 |
| 2109 // ----------------------------------------------------------------------------- | 2413 // ----------------------------------------------------------------------------- |
| 2110 // Old space for all global object property cell objects | 2414 // Old space for all global object property cell objects |
| 2111 | 2415 |
| 2112 class CellSpace : public FixedSpace { | 2416 class CellSpace : public FixedSpace { |
| 2113 public: | 2417 public: |
| 2114 // Creates a property cell space object with a maximum capacity. | 2418 // Creates a property cell space object with a maximum capacity. |
| 2115 CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id) | 2419 CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id) |
| 2116 : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell") | 2420 : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell") |
| 2117 {} | 2421 {} |
| 2118 | 2422 |
| 2423 virtual int RoundSizeDownToObjectAlignment(int size) { |
| 2424 if (IsPowerOf2(JSGlobalPropertyCell::kSize)) { |
| 2425 return RoundDown(size, JSGlobalPropertyCell::kSize); |
| 2426 } else { |
| 2427 return (size / JSGlobalPropertyCell::kSize) * JSGlobalPropertyCell::kSize; |
| 2428 } |
| 2429 } |
| 2430 |
| 2119 protected: | 2431 protected: |
| 2120 #ifdef DEBUG | 2432 #ifdef DEBUG |
| 2121 virtual void VerifyObject(HeapObject* obj); | 2433 virtual void VerifyObject(HeapObject* obj); |
| 2122 #endif | 2434 #endif |
| 2123 | 2435 |
| 2124 public: | 2436 public: |
| 2125 TRACK_MEMORY("CellSpace") | 2437 TRACK_MEMORY("CellSpace") |
| 2126 }; | 2438 }; |
| 2127 | 2439 |
| 2128 | 2440 |
| 2129 // ----------------------------------------------------------------------------- | 2441 // ----------------------------------------------------------------------------- |
| 2130 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by | 2442 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by |
| 2131 // the large object space. A large object is allocated from OS heap with | 2443 // the large object space. A large object is allocated from OS heap with |
| 2132 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset). | 2444 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset). |
| 2133 // A large object always starts at Page::kObjectStartOffset to a page. | 2445 // A large object always starts at Page::kObjectStartOffset to a page. |
| 2134 // Large objects do not move during garbage collections. | 2446 // Large objects do not move during garbage collections. |
| 2135 | 2447 |
| 2136 // A LargeObjectChunk holds exactly one large object page with exactly one | |
| 2137 // large object. | |
| 2138 class LargeObjectChunk { | |
| 2139 public: | |
| 2140 // Allocates a new LargeObjectChunk that contains a large object page | |
| 2141 // (Page::kPageSize aligned) that has at least size_in_bytes (for a large | |
| 2142 // object) bytes after the object area start of that page. | |
| 2143 static LargeObjectChunk* New(int size_in_bytes, Executability executable); | |
| 2144 | |
| 2145 // Free the memory associated with the chunk. | |
| 2146 void Free(Executability executable); | |
| 2147 | |
| 2148 // Interpret a raw address as a large object chunk. | |
| 2149 static LargeObjectChunk* FromAddress(Address address) { | |
| 2150 return reinterpret_cast<LargeObjectChunk*>(address); | |
| 2151 } | |
| 2152 | |
| 2153 // Returns the address of this chunk. | |
| 2154 Address address() { return reinterpret_cast<Address>(this); } | |
| 2155 | |
| 2156 Page* GetPage() { | |
| 2157 return Page::FromAddress(RoundUp(address(), Page::kPageSize)); | |
| 2158 } | |
| 2159 | |
| 2160 // Accessors for the fields of the chunk. | |
| 2161 LargeObjectChunk* next() { return next_; } | |
| 2162 void set_next(LargeObjectChunk* chunk) { next_ = chunk; } | |
| 2163 size_t size() { return size_ & ~Page::kPageFlagMask; } | |
| 2164 | |
| 2165 // Compute the start address in the chunk. | |
| 2166 Address GetStartAddress() { return GetPage()->ObjectAreaStart(); } | |
| 2167 | |
| 2168 // Returns the object in this chunk. | |
| 2169 HeapObject* GetObject() { return HeapObject::FromAddress(GetStartAddress()); } | |
| 2170 | |
| 2171 // Given a requested size returns the physical size of a chunk to be | |
| 2172 // allocated. | |
| 2173 static int ChunkSizeFor(int size_in_bytes); | |
| 2174 | |
| 2175 // Given a chunk size, returns the object size it can accommodate. Used by | |
| 2176 // LargeObjectSpace::Available. | |
| 2177 static intptr_t ObjectSizeFor(intptr_t chunk_size) { | |
| 2178 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0; | |
| 2179 return chunk_size - Page::kPageSize - Page::kObjectStartOffset; | |
| 2180 } | |
| 2181 | |
| 2182 private: | |
| 2183 // A pointer to the next large object chunk in the space or NULL. | |
| 2184 LargeObjectChunk* next_; | |
| 2185 | |
| 2186 // The total size of this chunk. | |
| 2187 size_t size_; | |
| 2188 | |
| 2189 public: | |
| 2190 TRACK_MEMORY("LargeObjectChunk") | |
| 2191 }; | |
| 2192 | |
| 2193 | |
| 2194 class LargeObjectSpace : public Space { | 2448 class LargeObjectSpace : public Space { |
| 2195 public: | 2449 public: |
| 2196 LargeObjectSpace(Heap* heap, AllocationSpace id); | 2450 LargeObjectSpace(Heap* heap, AllocationSpace id); |
| 2197 virtual ~LargeObjectSpace() {} | 2451 virtual ~LargeObjectSpace() {} |
| 2198 | 2452 |
| 2199 // Initializes internal data structures. | 2453 // Initializes internal data structures. |
| 2200 bool Setup(); | 2454 bool Setup(); |
| 2201 | 2455 |
| 2202 // Releases internal resources, frees objects in this space. | 2456 // Releases internal resources, frees objects in this space. |
| 2203 void TearDown(); | 2457 void TearDown(); |
| 2204 | 2458 |
| 2205 // Allocates a (non-FixedArray, non-Code) large object. | 2459 static intptr_t ObjectSizeFor(intptr_t chunk_size) { |
| 2206 MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes); | 2460 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0; |
| 2207 // Allocates a large Code object. | 2461 return chunk_size - Page::kPageSize - Page::kObjectStartOffset; |
| 2208 MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes); | 2462 } |
| 2209 // Allocates a large FixedArray. | 2463 |
| 2210 MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes); | 2464 // Shared implementation of AllocateRaw, AllocateRawCode and |
| 2465 // AllocateRawFixedArray. |
| 2466 MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size, |
| 2467 Executability executable); |
| 2211 | 2468 |
| 2212 // Available bytes for objects in this space. | 2469 // Available bytes for objects in this space. |
| 2213 inline intptr_t Available(); | 2470 inline intptr_t Available(); |
| 2214 | 2471 |
| 2215 virtual intptr_t Size() { | 2472 virtual intptr_t Size() { |
| 2216 return size_; | 2473 return size_; |
| 2217 } | 2474 } |
| 2218 | 2475 |
| 2219 virtual intptr_t SizeOfObjects() { | 2476 virtual intptr_t SizeOfObjects() { |
| 2220 return objects_size_; | 2477 return objects_size_; |
| 2221 } | 2478 } |
| 2222 | 2479 |
| 2223 int PageCount() { | 2480 int PageCount() { |
| 2224 return page_count_; | 2481 return page_count_; |
| 2225 } | 2482 } |
| 2226 | 2483 |
| 2227 // Finds an object for a given address, returns Failure::Exception() | 2484 // Finds an object for a given address, returns Failure::Exception() |
| 2228 // if it is not found. The function iterates through all objects in this | 2485 // if it is not found. The function iterates through all objects in this |
| 2229 // space, may be slow. | 2486 // space, may be slow. |
| 2230 MaybeObject* FindObject(Address a); | 2487 MaybeObject* FindObject(Address a); |
| 2231 | 2488 |
| 2232 // Finds a large object page containing the given pc, returns NULL | 2489 // Finds a large object page containing the given pc, returns NULL |
| 2233 // if such a page doesn't exist. | 2490 // if such a page doesn't exist. |
| 2234 LargeObjectChunk* FindChunkContainingPc(Address pc); | 2491 LargePage* FindPageContainingPc(Address pc); |
| 2235 | |
| 2236 // Iterates objects covered by dirty regions. | |
| 2237 void IterateDirtyRegions(ObjectSlotCallback func); | |
| 2238 | 2492 |
| 2239 // Frees unmarked objects. | 2493 // Frees unmarked objects. |
| 2240 void FreeUnmarkedObjects(); | 2494 void FreeUnmarkedObjects(); |
| 2241 | 2495 |
| 2242 // Checks whether a heap object is in this space; O(1). | 2496 // Checks whether a heap object is in this space; O(1). |
| 2243 bool Contains(HeapObject* obj); | 2497 bool Contains(HeapObject* obj); |
| 2244 | 2498 |
| 2245 // Checks whether the space is empty. | 2499 // Checks whether the space is empty. |
| 2246 bool IsEmpty() { return first_chunk_ == NULL; } | 2500 bool IsEmpty() { return first_page_ == NULL; } |
| 2247 | 2501 |
| 2248 // See the comments for ReserveSpace in the Space class. This has to be | 2502 // See the comments for ReserveSpace in the Space class. This has to be |
| 2249 // called after ReserveSpace has been called on the paged spaces, since they | 2503 // called after ReserveSpace has been called on the paged spaces, since they |
| 2250 // may use some memory, leaving less for large objects. | 2504 // may use some memory, leaving less for large objects. |
| 2251 virtual bool ReserveSpace(int bytes); | 2505 virtual bool ReserveSpace(int bytes); |
| 2252 | 2506 |
| 2507 LargePage* first_page() { return first_page_; } |
| 2508 |
| 2253 #ifdef DEBUG | 2509 #ifdef DEBUG |
| 2254 virtual void Verify(); | 2510 virtual void Verify(); |
| 2255 virtual void Print(); | 2511 virtual void Print(); |
| 2256 void ReportStatistics(); | 2512 void ReportStatistics(); |
| 2257 void CollectCodeStatistics(); | 2513 void CollectCodeStatistics(); |
| 2258 #endif | 2514 #endif |
| 2259 // Checks whether an address is in the object area in this space. It | 2515 // Checks whether an address is in the object area in this space. It |
| 2260 // iterates all objects in the space. May be slow. | 2516 // iterates all objects in the space. May be slow. |
| 2261 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); } | 2517 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); } |
| 2262 | 2518 |
| 2263 private: | 2519 private: |
| 2264 // The head of the linked list of large object chunks. | 2520 // The head of the linked list of large object chunks. |
| 2265 LargeObjectChunk* first_chunk_; | 2521 LargePage* first_page_; |
| 2266 intptr_t size_; // allocated bytes | 2522 intptr_t size_; // allocated bytes |
| 2267 int page_count_; // number of chunks | 2523 int page_count_; // number of chunks |
| 2268 intptr_t objects_size_; // size of objects | 2524 intptr_t objects_size_; // size of objects |
| 2269 | 2525 |
| 2270 // Shared implementation of AllocateRaw, AllocateRawCode and | |
| 2271 // AllocateRawFixedArray. | |
| 2272 MUST_USE_RESULT MaybeObject* AllocateRawInternal(int requested_size, | |
| 2273 int object_size, | |
| 2274 Executability executable); | |
| 2275 | |
| 2276 friend class LargeObjectIterator; | 2526 friend class LargeObjectIterator; |
| 2277 | 2527 |
| 2278 public: | 2528 public: |
| 2279 TRACK_MEMORY("LargeObjectSpace") | 2529 TRACK_MEMORY("LargeObjectSpace") |
| 2280 }; | 2530 }; |
| 2281 | 2531 |
| 2282 | 2532 |
| 2283 class LargeObjectIterator: public ObjectIterator { | 2533 class LargeObjectIterator: public ObjectIterator { |
| 2284 public: | 2534 public: |
| 2285 explicit LargeObjectIterator(LargeObjectSpace* space); | 2535 explicit LargeObjectIterator(LargeObjectSpace* space); |
| 2286 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func); | 2536 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func); |
| 2287 | 2537 |
| 2288 HeapObject* next(); | 2538 HeapObject* Next(); |
| 2289 | 2539 |
| 2290 // implementation of ObjectIterator. | 2540 // implementation of ObjectIterator. |
| 2291 virtual HeapObject* next_object() { return next(); } | 2541 virtual HeapObject* next_object() { return Next(); } |
| 2292 | 2542 |
| 2293 private: | 2543 private: |
| 2294 LargeObjectChunk* current_; | 2544 LargePage* current_; |
| 2295 HeapObjectCallback size_func_; | 2545 HeapObjectCallback size_func_; |
| 2296 }; | 2546 }; |
| 2297 | 2547 |
| 2298 | 2548 |
| 2549 // Iterates over the chunks (pages and large object pages) that can contain |
| 2550 // pointers to new space. |
| 2551 class PointerChunkIterator BASE_EMBEDDED { |
| 2552 public: |
| 2553 inline explicit PointerChunkIterator(Heap* heap); |
| 2554 |
| 2555 // Return NULL when the iterator is done. |
| 2556 MemoryChunk* next() { |
| 2557 switch (state_) { |
| 2558 case kOldPointerState: { |
| 2559 if (old_pointer_iterator_.has_next()) { |
| 2560 return old_pointer_iterator_.next(); |
| 2561 } |
| 2562 state_ = kMapState; |
| 2563 // Fall through. |
| 2564 } |
| 2565 case kMapState: { |
| 2566 if (map_iterator_.has_next()) { |
| 2567 return map_iterator_.next(); |
| 2568 } |
| 2569 state_ = kLargeObjectState; |
| 2570 // Fall through. |
| 2571 } |
| 2572 case kLargeObjectState: { |
| 2573 HeapObject* heap_object; |
| 2574 do { |
| 2575 heap_object = lo_iterator_.Next(); |
| 2576 if (heap_object == NULL) { |
| 2577 state_ = kFinishedState; |
| 2578 return NULL; |
| 2579 } |
| 2580 // Fixed arrays are the only pointer-containing objects in large |
| 2581 // object space. |
| 2582 } while (!heap_object->IsFixedArray()); |
| 2583 MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address()); |
| 2584 return answer; |
| 2585 } |
| 2586 case kFinishedState: |
| 2587 return NULL; |
| 2588 default: |
| 2589 break; |
| 2590 } |
| 2591 UNREACHABLE(); |
| 2592 return NULL; |
| 2593 } |
| 2594 |
| 2595 |
| 2596 private: |
| 2597 enum State { |
| 2598 kOldPointerState, |
| 2599 kMapState, |
| 2600 kLargeObjectState, |
| 2601 kFinishedState |
| 2602 }; |
| 2603 State state_; |
| 2604 PageIterator old_pointer_iterator_; |
| 2605 PageIterator map_iterator_; |
| 2606 LargeObjectIterator lo_iterator_; |
| 2607 }; |
| 2608 |
| 2609 |
| 2299 #ifdef DEBUG | 2610 #ifdef DEBUG |
| 2300 struct CommentStatistic { | 2611 struct CommentStatistic { |
| 2301 const char* comment; | 2612 const char* comment; |
| 2302 int size; | 2613 int size; |
| 2303 int count; | 2614 int count; |
| 2304 void Clear() { | 2615 void Clear() { |
| 2305 comment = NULL; | 2616 comment = NULL; |
| 2306 size = 0; | 2617 size = 0; |
| 2307 count = 0; | 2618 count = 0; |
| 2308 } | 2619 } |
| 2309 // Must be small, since an iteration is used for lookup. | 2620 // Must be small, since an iteration is used for lookup. |
| 2310 static const int kMaxComments = 64; | 2621 static const int kMaxComments = 64; |
| 2311 }; | 2622 }; |
| 2312 #endif | 2623 #endif |
| 2313 | 2624 |
| 2314 | 2625 |
| 2315 } } // namespace v8::internal | 2626 } } // namespace v8::internal |
| 2316 | 2627 |
| 2317 #endif // V8_SPACES_H_ | 2628 #endif // V8_SPACES_H_ |
| OLD | NEW |