OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
6 #define V8_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
7 | 7 |
8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
9 #include "src/base/atomicops.h" | 9 #include "src/base/atomicops.h" |
10 #include "src/base/platform/mutex.h" | 10 #include "src/base/platform/mutex.h" |
11 #include "src/hashmap.h" | 11 #include "src/hashmap.h" |
12 #include "src/list.h" | 12 #include "src/list.h" |
13 #include "src/log.h" | 13 #include "src/log.h" |
14 #include "src/utils.h" | 14 #include "src/utils.h" |
15 | 15 |
16 namespace v8 { | 16 namespace v8 { |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
67 // is to enable linear allocation without having to constantly update the byte | 67 // is to enable linear allocation without having to constantly update the byte |
68 // array every time the top field is updated and a new object is created. The | 68 // array every time the top field is updated and a new object is created. The |
69 // special garbage section is not in the chain of garbage sections. | 69 // special garbage section is not in the chain of garbage sections. |
70 // | 70 // |
71 // Since the top and limit fields are in the space, not the page, only one page | 71 // Since the top and limit fields are in the space, not the page, only one page |
72 // has a special garbage section, and if the top and limit are equal then there | 72 // has a special garbage section, and if the top and limit are equal then there |
73 // is no special garbage section. | 73 // is no special garbage section. |
74 | 74 |
75 // Some assertion macros used in the debugging mode. | 75 // Some assertion macros used in the debugging mode. |
76 | 76 |
77 #define DCHECK_PAGE_ALIGNED(address) \ | 77 #define DCHECK_PAGE_ALIGNED(address) \ |
78 DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) | 78 DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) |
79 | 79 |
80 #define DCHECK_OBJECT_ALIGNED(address) \ | 80 #define DCHECK_OBJECT_ALIGNED(address) \ |
81 DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0) | 81 DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0) |
82 | 82 |
83 #define DCHECK_OBJECT_SIZE(size) \ | 83 #define DCHECK_OBJECT_SIZE(size) \ |
84 DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize)) | 84 DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize)) |
85 | 85 |
86 #define DCHECK_PAGE_OFFSET(offset) \ | 86 #define DCHECK_PAGE_OFFSET(offset) \ |
87 DCHECK((Page::kObjectStartOffset <= offset) \ | 87 DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize)) |
88 && (offset <= Page::kPageSize)) | |
89 | 88 |
90 #define DCHECK_MAP_PAGE_INDEX(index) \ | 89 #define DCHECK_MAP_PAGE_INDEX(index) \ |
91 DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) | 90 DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) |
92 | 91 |
93 | 92 |
94 class PagedSpace; | 93 class PagedSpace; |
95 class MemoryAllocator; | 94 class MemoryAllocator; |
96 class AllocationInfo; | 95 class AllocationInfo; |
97 class Space; | 96 class Space; |
98 class FreeList; | 97 class FreeList; |
99 class MemoryChunk; | 98 class MemoryChunk; |
100 | 99 |
101 class MarkBit { | 100 class MarkBit { |
102 public: | 101 public: |
103 typedef uint32_t CellType; | 102 typedef uint32_t CellType; |
104 | 103 |
105 inline MarkBit(CellType* cell, CellType mask, bool data_only) | 104 inline MarkBit(CellType* cell, CellType mask, bool data_only) |
106 : cell_(cell), mask_(mask), data_only_(data_only) { } | 105 : cell_(cell), mask_(mask), data_only_(data_only) {} |
107 | 106 |
108 inline CellType* cell() { return cell_; } | 107 inline CellType* cell() { return cell_; } |
109 inline CellType mask() { return mask_; } | 108 inline CellType mask() { return mask_; } |
110 | 109 |
111 #ifdef DEBUG | 110 #ifdef DEBUG |
112 bool operator==(const MarkBit& other) { | 111 bool operator==(const MarkBit& other) { |
113 return cell_ == other.cell_ && mask_ == other.mask_; | 112 return cell_ == other.cell_ && mask_ == other.mask_; |
114 } | 113 } |
115 #endif | 114 #endif |
116 | 115 |
(...skipping 25 matching lines...) Expand all Loading... |
142 | 141 |
143 // Bitmap is a sequence of cells each containing fixed number of bits. | 142 // Bitmap is a sequence of cells each containing fixed number of bits. |
144 class Bitmap { | 143 class Bitmap { |
145 public: | 144 public: |
146 static const uint32_t kBitsPerCell = 32; | 145 static const uint32_t kBitsPerCell = 32; |
147 static const uint32_t kBitsPerCellLog2 = 5; | 146 static const uint32_t kBitsPerCellLog2 = 5; |
148 static const uint32_t kBitIndexMask = kBitsPerCell - 1; | 147 static const uint32_t kBitIndexMask = kBitsPerCell - 1; |
149 static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte; | 148 static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte; |
150 static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2; | 149 static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2; |
151 | 150 |
152 static const size_t kLength = | 151 static const size_t kLength = (1 << kPageSizeBits) >> (kPointerSizeLog2); |
153 (1 << kPageSizeBits) >> (kPointerSizeLog2); | |
154 | 152 |
155 static const size_t kSize = | 153 static const size_t kSize = |
156 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2); | 154 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2); |
157 | 155 |
158 | 156 |
159 static int CellsForLength(int length) { | 157 static int CellsForLength(int length) { |
160 return (length + kBitsPerCell - 1) >> kBitsPerCellLog2; | 158 return (length + kBitsPerCell - 1) >> kBitsPerCellLog2; |
161 } | 159 } |
162 | 160 |
163 int CellsCount() { | 161 int CellsCount() { return CellsForLength(kLength); } |
164 return CellsForLength(kLength); | |
165 } | |
166 | 162 |
167 static int SizeFor(int cells_count) { | 163 static int SizeFor(int cells_count) { |
168 return sizeof(MarkBit::CellType) * cells_count; | 164 return sizeof(MarkBit::CellType) * cells_count; |
169 } | 165 } |
170 | 166 |
171 INLINE(static uint32_t IndexToCell(uint32_t index)) { | 167 INLINE(static uint32_t IndexToCell(uint32_t index)) { |
172 return index >> kBitsPerCellLog2; | 168 return index >> kBitsPerCellLog2; |
173 } | 169 } |
174 | 170 |
175 INLINE(static uint32_t CellToIndex(uint32_t index)) { | 171 INLINE(static uint32_t CellToIndex(uint32_t index)) { |
176 return index << kBitsPerCellLog2; | 172 return index << kBitsPerCellLog2; |
177 } | 173 } |
178 | 174 |
179 INLINE(static uint32_t CellAlignIndex(uint32_t index)) { | 175 INLINE(static uint32_t CellAlignIndex(uint32_t index)) { |
180 return (index + kBitIndexMask) & ~kBitIndexMask; | 176 return (index + kBitIndexMask) & ~kBitIndexMask; |
181 } | 177 } |
182 | 178 |
183 INLINE(MarkBit::CellType* cells()) { | 179 INLINE(MarkBit::CellType* cells()) { |
184 return reinterpret_cast<MarkBit::CellType*>(this); | 180 return reinterpret_cast<MarkBit::CellType*>(this); |
185 } | 181 } |
186 | 182 |
187 INLINE(Address address()) { | 183 INLINE(Address address()) { return reinterpret_cast<Address>(this); } |
188 return reinterpret_cast<Address>(this); | |
189 } | |
190 | 184 |
191 INLINE(static Bitmap* FromAddress(Address addr)) { | 185 INLINE(static Bitmap* FromAddress(Address addr)) { |
192 return reinterpret_cast<Bitmap*>(addr); | 186 return reinterpret_cast<Bitmap*>(addr); |
193 } | 187 } |
194 | 188 |
195 inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) { | 189 inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) { |
196 MarkBit::CellType mask = 1 << (index & kBitIndexMask); | 190 MarkBit::CellType mask = 1 << (index & kBitIndexMask); |
197 MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2); | 191 MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2); |
198 return MarkBit(cell, mask, data_only); | 192 return MarkBit(cell, mask, data_only); |
199 } | 193 } |
200 | 194 |
201 static inline void Clear(MemoryChunk* chunk); | 195 static inline void Clear(MemoryChunk* chunk); |
202 | 196 |
203 static void PrintWord(uint32_t word, uint32_t himask = 0) { | 197 static void PrintWord(uint32_t word, uint32_t himask = 0) { |
204 for (uint32_t mask = 1; mask != 0; mask <<= 1) { | 198 for (uint32_t mask = 1; mask != 0; mask <<= 1) { |
205 if ((mask & himask) != 0) PrintF("["); | 199 if ((mask & himask) != 0) PrintF("["); |
206 PrintF((mask & word) ? "1" : "0"); | 200 PrintF((mask & word) ? "1" : "0"); |
207 if ((mask & himask) != 0) PrintF("]"); | 201 if ((mask & himask) != 0) PrintF("]"); |
208 } | 202 } |
209 } | 203 } |
210 | 204 |
211 class CellPrinter { | 205 class CellPrinter { |
212 public: | 206 public: |
213 CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { } | 207 CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {} |
214 | 208 |
215 void Print(uint32_t pos, uint32_t cell) { | 209 void Print(uint32_t pos, uint32_t cell) { |
216 if (cell == seq_type) { | 210 if (cell == seq_type) { |
217 seq_length++; | 211 seq_length++; |
218 return; | 212 return; |
219 } | 213 } |
220 | 214 |
221 Flush(); | 215 Flush(); |
222 | 216 |
223 if (IsSeq(cell)) { | 217 if (IsSeq(cell)) { |
224 seq_start = pos; | 218 seq_start = pos; |
225 seq_length = 0; | 219 seq_length = 0; |
226 seq_type = cell; | 220 seq_type = cell; |
227 return; | 221 return; |
228 } | 222 } |
229 | 223 |
230 PrintF("%d: ", pos); | 224 PrintF("%d: ", pos); |
231 PrintWord(cell); | 225 PrintWord(cell); |
232 PrintF("\n"); | 226 PrintF("\n"); |
233 } | 227 } |
234 | 228 |
235 void Flush() { | 229 void Flush() { |
236 if (seq_length > 0) { | 230 if (seq_length > 0) { |
237 PrintF("%d: %dx%d\n", | 231 PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1, |
238 seq_start, | |
239 seq_type == 0 ? 0 : 1, | |
240 seq_length * kBitsPerCell); | 232 seq_length * kBitsPerCell); |
241 seq_length = 0; | 233 seq_length = 0; |
242 } | 234 } |
243 } | 235 } |
244 | 236 |
245 static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; } | 237 static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; } |
246 | 238 |
247 private: | 239 private: |
248 uint32_t seq_start; | 240 uint32_t seq_start; |
249 uint32_t seq_type; | 241 uint32_t seq_type; |
(...skipping 27 matching lines...) Expand all Loading... |
277 // It is divided into the header and the body. Chunk start is always | 269 // It is divided into the header and the body. Chunk start is always |
278 // 1MB aligned. Start of the body is aligned so it can accommodate | 270 // 1MB aligned. Start of the body is aligned so it can accommodate |
279 // any heap object. | 271 // any heap object. |
280 class MemoryChunk { | 272 class MemoryChunk { |
281 public: | 273 public: |
282 // Only works if the pointer is in the first kPageSize of the MemoryChunk. | 274 // Only works if the pointer is in the first kPageSize of the MemoryChunk. |
283 static MemoryChunk* FromAddress(Address a) { | 275 static MemoryChunk* FromAddress(Address a) { |
284 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); | 276 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); |
285 } | 277 } |
286 static const MemoryChunk* FromAddress(const byte* a) { | 278 static const MemoryChunk* FromAddress(const byte* a) { |
287 return reinterpret_cast<const MemoryChunk*>( | 279 return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) & |
288 OffsetFrom(a) & ~kAlignmentMask); | 280 ~kAlignmentMask); |
289 } | 281 } |
290 | 282 |
291 // Only works for addresses in pointer spaces, not data or code spaces. | 283 // Only works for addresses in pointer spaces, not data or code spaces. |
292 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr); | 284 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr); |
293 | 285 |
294 Address address() { return reinterpret_cast<Address>(this); } | 286 Address address() { return reinterpret_cast<Address>(this); } |
295 | 287 |
296 bool is_valid() { return address() != NULL; } | 288 bool is_valid() { return address() != NULL; } |
297 | 289 |
298 MemoryChunk* next_chunk() const { | 290 MemoryChunk* next_chunk() const { |
(...skipping 22 matching lines...) Expand all Loading... |
321 } | 313 } |
322 } | 314 } |
323 | 315 |
324 void set_owner(Space* space) { | 316 void set_owner(Space* space) { |
325 DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0); | 317 DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0); |
326 owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag; | 318 owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag; |
327 DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == | 319 DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == |
328 kPageHeaderTag); | 320 kPageHeaderTag); |
329 } | 321 } |
330 | 322 |
331 base::VirtualMemory* reserved_memory() { | 323 base::VirtualMemory* reserved_memory() { return &reservation_; } |
332 return &reservation_; | |
333 } | |
334 | 324 |
335 void InitializeReservedMemory() { | 325 void InitializeReservedMemory() { reservation_.Reset(); } |
336 reservation_.Reset(); | |
337 } | |
338 | 326 |
339 void set_reserved_memory(base::VirtualMemory* reservation) { | 327 void set_reserved_memory(base::VirtualMemory* reservation) { |
340 DCHECK_NOT_NULL(reservation); | 328 DCHECK_NOT_NULL(reservation); |
341 reservation_.TakeControl(reservation); | 329 reservation_.TakeControl(reservation); |
342 } | 330 } |
343 | 331 |
344 bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); } | 332 bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); } |
345 void initialize_scan_on_scavenge(bool scan) { | 333 void initialize_scan_on_scavenge(bool scan) { |
346 if (scan) { | 334 if (scan) { |
347 SetFlag(SCAN_ON_SCAVENGE); | 335 SetFlag(SCAN_ON_SCAVENGE); |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
402 NUM_MEMORY_CHUNK_FLAGS | 390 NUM_MEMORY_CHUNK_FLAGS |
403 }; | 391 }; |
404 | 392 |
405 | 393 |
406 static const int kPointersToHereAreInterestingMask = | 394 static const int kPointersToHereAreInterestingMask = |
407 1 << POINTERS_TO_HERE_ARE_INTERESTING; | 395 1 << POINTERS_TO_HERE_ARE_INTERESTING; |
408 | 396 |
409 static const int kPointersFromHereAreInterestingMask = | 397 static const int kPointersFromHereAreInterestingMask = |
410 1 << POINTERS_FROM_HERE_ARE_INTERESTING; | 398 1 << POINTERS_FROM_HERE_ARE_INTERESTING; |
411 | 399 |
412 static const int kEvacuationCandidateMask = | 400 static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE; |
413 1 << EVACUATION_CANDIDATE; | |
414 | 401 |
415 static const int kSkipEvacuationSlotsRecordingMask = | 402 static const int kSkipEvacuationSlotsRecordingMask = |
416 (1 << EVACUATION_CANDIDATE) | | 403 (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) | |
417 (1 << RESCAN_ON_EVACUATION) | | 404 (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE); |
418 (1 << IN_FROM_SPACE) | | |
419 (1 << IN_TO_SPACE); | |
420 | 405 |
421 | 406 |
422 void SetFlag(int flag) { | 407 void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; } |
423 flags_ |= static_cast<uintptr_t>(1) << flag; | |
424 } | |
425 | 408 |
426 void ClearFlag(int flag) { | 409 void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); } |
427 flags_ &= ~(static_cast<uintptr_t>(1) << flag); | |
428 } | |
429 | 410 |
430 void SetFlagTo(int flag, bool value) { | 411 void SetFlagTo(int flag, bool value) { |
431 if (value) { | 412 if (value) { |
432 SetFlag(flag); | 413 SetFlag(flag); |
433 } else { | 414 } else { |
434 ClearFlag(flag); | 415 ClearFlag(flag); |
435 } | 416 } |
436 } | 417 } |
437 | 418 |
438 bool IsFlagSet(int flag) { | 419 bool IsFlagSet(int flag) { |
(...skipping 27 matching lines...) Expand all Loading... |
466 ParallelSweepingState parallel_sweeping() { | 447 ParallelSweepingState parallel_sweeping() { |
467 return static_cast<ParallelSweepingState>( | 448 return static_cast<ParallelSweepingState>( |
468 base::Acquire_Load(¶llel_sweeping_)); | 449 base::Acquire_Load(¶llel_sweeping_)); |
469 } | 450 } |
470 | 451 |
471 void set_parallel_sweeping(ParallelSweepingState state) { | 452 void set_parallel_sweeping(ParallelSweepingState state) { |
472 base::Release_Store(¶llel_sweeping_, state); | 453 base::Release_Store(¶llel_sweeping_, state); |
473 } | 454 } |
474 | 455 |
475 bool TryParallelSweeping() { | 456 bool TryParallelSweeping() { |
476 return base::Acquire_CompareAndSwap( | 457 return base::Acquire_CompareAndSwap(¶llel_sweeping_, SWEEPING_PENDING, |
477 ¶llel_sweeping_, SWEEPING_PENDING, SWEEPING_IN_PROGRESS) == | 458 SWEEPING_IN_PROGRESS) == |
478 SWEEPING_PENDING; | 459 SWEEPING_PENDING; |
479 } | 460 } |
480 | 461 |
481 bool SweepingCompleted() { return parallel_sweeping() <= SWEEPING_FINALIZE; } | 462 bool SweepingCompleted() { return parallel_sweeping() <= SWEEPING_FINALIZE; } |
482 | 463 |
483 // Manage live byte count (count of bytes known to be live, | 464 // Manage live byte count (count of bytes known to be live, |
484 // because they are marked black). | 465 // because they are marked black). |
485 void ResetLiveBytes() { | 466 void ResetLiveBytes() { |
486 if (FLAG_gc_verbose) { | 467 if (FLAG_gc_verbose) { |
487 PrintF("ResetLiveBytes:%p:%x->0\n", | 468 PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this), |
488 static_cast<void*>(this), live_byte_count_); | 469 live_byte_count_); |
489 } | 470 } |
490 live_byte_count_ = 0; | 471 live_byte_count_ = 0; |
491 } | 472 } |
492 void IncrementLiveBytes(int by) { | 473 void IncrementLiveBytes(int by) { |
493 if (FLAG_gc_verbose) { | 474 if (FLAG_gc_verbose) { |
494 printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", | 475 printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", static_cast<void*>(this), |
495 static_cast<void*>(this), live_byte_count_, | 476 live_byte_count_, ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by), |
496 ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by), | |
497 live_byte_count_ + by); | 477 live_byte_count_ + by); |
498 } | 478 } |
499 live_byte_count_ += by; | 479 live_byte_count_ += by; |
500 DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_); | 480 DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_); |
501 } | 481 } |
502 int LiveBytes() { | 482 int LiveBytes() { |
503 DCHECK(static_cast<unsigned>(live_byte_count_) <= size_); | 483 DCHECK(static_cast<unsigned>(live_byte_count_) <= size_); |
504 return live_byte_count_; | 484 return live_byte_count_; |
505 } | 485 } |
506 | 486 |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
543 static void IncrementLiveBytesFromMutator(Address address, int by); | 523 static void IncrementLiveBytesFromMutator(Address address, int by); |
544 | 524 |
545 static const intptr_t kAlignment = | 525 static const intptr_t kAlignment = |
546 (static_cast<uintptr_t>(1) << kPageSizeBits); | 526 (static_cast<uintptr_t>(1) << kPageSizeBits); |
547 | 527 |
548 static const intptr_t kAlignmentMask = kAlignment - 1; | 528 static const intptr_t kAlignmentMask = kAlignment - 1; |
549 | 529 |
550 static const intptr_t kSizeOffset = 0; | 530 static const intptr_t kSizeOffset = 0; |
551 | 531 |
552 static const intptr_t kLiveBytesOffset = | 532 static const intptr_t kLiveBytesOffset = |
553 kSizeOffset + kPointerSize + kPointerSize + kPointerSize + | 533 kSizeOffset + kPointerSize + kPointerSize + kPointerSize + kPointerSize + |
554 kPointerSize + kPointerSize + | 534 kPointerSize + kPointerSize + kPointerSize + kPointerSize + kIntSize; |
555 kPointerSize + kPointerSize + kPointerSize + kIntSize; | |
556 | 535 |
557 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize; | 536 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize; |
558 | 537 |
559 static const size_t kWriteBarrierCounterOffset = | 538 static const size_t kWriteBarrierCounterOffset = |
560 kSlotsBufferOffset + kPointerSize + kPointerSize; | 539 kSlotsBufferOffset + kPointerSize + kPointerSize; |
561 | 540 |
562 static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize + | 541 static const size_t kHeaderSize = |
563 kIntSize + kIntSize + kPointerSize + | 542 kWriteBarrierCounterOffset + kPointerSize + kIntSize + kIntSize + |
564 5 * kPointerSize + | 543 kPointerSize + 5 * kPointerSize + kPointerSize + kPointerSize; |
565 kPointerSize + kPointerSize; | |
566 | 544 |
567 static const int kBodyOffset = | 545 static const int kBodyOffset = |
568 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); | 546 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); |
569 | 547 |
570 // The start offset of the object area in a page. Aligned to both maps and | 548 // The start offset of the object area in a page. Aligned to both maps and |
571 // code alignment to be suitable for both. Also aligned to 32 words because | 549 // code alignment to be suitable for both. Also aligned to 32 words because |
572 // the marking bitmap is arranged in 32 bit chunks. | 550 // the marking bitmap is arranged in 32 bit chunks. |
573 static const int kObjectStartAlignment = 32 * kPointerSize; | 551 static const int kObjectStartAlignment = 32 * kPointerSize; |
574 static const int kObjectStartOffset = kBodyOffset - 1 + | 552 static const int kObjectStartOffset = |
| 553 kBodyOffset - 1 + |
575 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); | 554 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); |
576 | 555 |
577 size_t size() const { return size_; } | 556 size_t size() const { return size_; } |
578 | 557 |
579 void set_size(size_t size) { | 558 void set_size(size_t size) { size_ = size; } |
580 size_ = size; | |
581 } | |
582 | 559 |
583 void SetArea(Address area_start, Address area_end) { | 560 void SetArea(Address area_start, Address area_end) { |
584 area_start_ = area_start; | 561 area_start_ = area_start; |
585 area_end_ = area_end; | 562 area_end_ = area_end; |
586 } | 563 } |
587 | 564 |
588 Executability executable() { | 565 Executability executable() { |
589 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; | 566 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
590 } | 567 } |
591 | 568 |
592 bool ContainsOnlyData() { | 569 bool ContainsOnlyData() { return IsFlagSet(CONTAINS_ONLY_DATA); } |
593 return IsFlagSet(CONTAINS_ONLY_DATA); | |
594 } | |
595 | 570 |
596 bool InNewSpace() { | 571 bool InNewSpace() { |
597 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0; | 572 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0; |
598 } | 573 } |
599 | 574 |
600 bool InToSpace() { | 575 bool InToSpace() { return IsFlagSet(IN_TO_SPACE); } |
601 return IsFlagSet(IN_TO_SPACE); | |
602 } | |
603 | 576 |
604 bool InFromSpace() { | 577 bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); } |
605 return IsFlagSet(IN_FROM_SPACE); | |
606 } | |
607 | 578 |
608 // --------------------------------------------------------------------- | 579 // --------------------------------------------------------------------- |
609 // Markbits support | 580 // Markbits support |
610 | 581 |
611 inline Bitmap* markbits() { | 582 inline Bitmap* markbits() { |
612 return Bitmap::FromAddress(address() + kHeaderSize); | 583 return Bitmap::FromAddress(address() + kHeaderSize); |
613 } | 584 } |
614 | 585 |
615 void PrintMarkbits() { markbits()->Print(); } | 586 void PrintMarkbits() { markbits()->Print(); } |
616 | 587 |
617 inline uint32_t AddressToMarkbitIndex(Address addr) { | 588 inline uint32_t AddressToMarkbitIndex(Address addr) { |
618 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2; | 589 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2; |
619 } | 590 } |
620 | 591 |
621 inline static uint32_t FastAddressToMarkbitIndex(Address addr) { | 592 inline static uint32_t FastAddressToMarkbitIndex(Address addr) { |
622 const intptr_t offset = | 593 const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask; |
623 reinterpret_cast<intptr_t>(addr) & kAlignmentMask; | |
624 | 594 |
625 return static_cast<uint32_t>(offset) >> kPointerSizeLog2; | 595 return static_cast<uint32_t>(offset) >> kPointerSizeLog2; |
626 } | 596 } |
627 | 597 |
628 inline Address MarkbitIndexToAddress(uint32_t index) { | 598 inline Address MarkbitIndexToAddress(uint32_t index) { |
629 return this->address() + (index << kPointerSizeLog2); | 599 return this->address() + (index << kPointerSizeLog2); |
630 } | 600 } |
631 | 601 |
632 void InsertAfter(MemoryChunk* other); | 602 void InsertAfter(MemoryChunk* other); |
633 void Unlink(); | 603 void Unlink(); |
634 | 604 |
635 inline Heap* heap() const { return heap_; } | 605 inline Heap* heap() const { return heap_; } |
636 | 606 |
637 static const int kFlagsOffset = kPointerSize; | 607 static const int kFlagsOffset = kPointerSize; |
638 | 608 |
639 bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); } | 609 bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); } |
640 | 610 |
641 bool ShouldSkipEvacuationSlotRecording() { | 611 bool ShouldSkipEvacuationSlotRecording() { |
642 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; | 612 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; |
643 } | 613 } |
644 | 614 |
645 inline SkipList* skip_list() { | 615 inline SkipList* skip_list() { return skip_list_; } |
646 return skip_list_; | |
647 } | |
648 | 616 |
649 inline void set_skip_list(SkipList* skip_list) { | 617 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; } |
650 skip_list_ = skip_list; | |
651 } | |
652 | 618 |
653 inline SlotsBuffer* slots_buffer() { | 619 inline SlotsBuffer* slots_buffer() { return slots_buffer_; } |
654 return slots_buffer_; | |
655 } | |
656 | 620 |
657 inline SlotsBuffer** slots_buffer_address() { | 621 inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; } |
658 return &slots_buffer_; | |
659 } | |
660 | 622 |
661 void MarkEvacuationCandidate() { | 623 void MarkEvacuationCandidate() { |
662 DCHECK(slots_buffer_ == NULL); | 624 DCHECK(slots_buffer_ == NULL); |
663 SetFlag(EVACUATION_CANDIDATE); | 625 SetFlag(EVACUATION_CANDIDATE); |
664 } | 626 } |
665 | 627 |
666 void ClearEvacuationCandidate() { | 628 void ClearEvacuationCandidate() { |
667 DCHECK(slots_buffer_ == NULL); | 629 DCHECK(slots_buffer_ == NULL); |
668 ClearFlag(EVACUATION_CANDIDATE); | 630 ClearFlag(EVACUATION_CANDIDATE); |
669 } | 631 } |
670 | 632 |
671 Address area_start() { return area_start_; } | 633 Address area_start() { return area_start_; } |
672 Address area_end() { return area_end_; } | 634 Address area_end() { return area_end_; } |
673 int area_size() { | 635 int area_size() { return static_cast<int>(area_end() - area_start()); } |
674 return static_cast<int>(area_end() - area_start()); | |
675 } | |
676 bool CommitArea(size_t requested); | 636 bool CommitArea(size_t requested); |
677 | 637 |
678 // Approximate amount of physical memory committed for this chunk. | 638 // Approximate amount of physical memory committed for this chunk. |
679 size_t CommittedPhysicalMemory() { | 639 size_t CommittedPhysicalMemory() { return high_water_mark_; } |
680 return high_water_mark_; | |
681 } | |
682 | 640 |
683 static inline void UpdateHighWaterMark(Address mark); | 641 static inline void UpdateHighWaterMark(Address mark); |
684 | 642 |
685 protected: | 643 protected: |
686 size_t size_; | 644 size_t size_; |
687 intptr_t flags_; | 645 intptr_t flags_; |
688 | 646 |
689 // Start and end of allocatable memory on this chunk. | 647 // Start and end of allocatable memory on this chunk. |
690 Address area_start_; | 648 Address area_start_; |
691 Address area_end_; | 649 Address area_end_; |
(...skipping 22 matching lines...) Expand all Loading... |
714 | 672 |
715 base::AtomicWord parallel_sweeping_; | 673 base::AtomicWord parallel_sweeping_; |
716 | 674 |
717 // PagedSpace free-list statistics. | 675 // PagedSpace free-list statistics. |
718 intptr_t available_in_small_free_list_; | 676 intptr_t available_in_small_free_list_; |
719 intptr_t available_in_medium_free_list_; | 677 intptr_t available_in_medium_free_list_; |
720 intptr_t available_in_large_free_list_; | 678 intptr_t available_in_large_free_list_; |
721 intptr_t available_in_huge_free_list_; | 679 intptr_t available_in_huge_free_list_; |
722 intptr_t non_available_small_blocks_; | 680 intptr_t non_available_small_blocks_; |
723 | 681 |
724 static MemoryChunk* Initialize(Heap* heap, | 682 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, |
725 Address base, | 683 Address area_start, Address area_end, |
726 size_t size, | 684 Executability executable, Space* owner); |
727 Address area_start, | |
728 Address area_end, | |
729 Executability executable, | |
730 Space* owner); | |
731 | 685 |
732 private: | 686 private: |
733 // next_chunk_ holds a pointer of type MemoryChunk | 687 // next_chunk_ holds a pointer of type MemoryChunk |
734 base::AtomicWord next_chunk_; | 688 base::AtomicWord next_chunk_; |
735 // prev_chunk_ holds a pointer of type MemoryChunk | 689 // prev_chunk_ holds a pointer of type MemoryChunk |
736 base::AtomicWord prev_chunk_; | 690 base::AtomicWord prev_chunk_; |
737 | 691 |
738 friend class MemoryAllocator; | 692 friend class MemoryAllocator; |
739 }; | 693 }; |
740 | 694 |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
799 // are allocated in large object space and are never moved in memory. This | 753 // are allocated in large object space and are never moved in memory. This |
800 // also applies to new space allocation, since objects are never migrated | 754 // also applies to new space allocation, since objects are never migrated |
801 // from new space to large object space. Takes double alignment into account. | 755 // from new space to large object space. Takes double alignment into account. |
802 static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset; | 756 static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset; |
803 | 757 |
804 // Page size mask. | 758 // Page size mask. |
805 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; | 759 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; |
806 | 760 |
807 inline void ClearGCFields(); | 761 inline void ClearGCFields(); |
808 | 762 |
809 static inline Page* Initialize(Heap* heap, | 763 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk, |
810 MemoryChunk* chunk, | 764 Executability executable, PagedSpace* owner); |
811 Executability executable, | |
812 PagedSpace* owner); | |
813 | 765 |
814 void InitializeAsAnchor(PagedSpace* owner); | 766 void InitializeAsAnchor(PagedSpace* owner); |
815 | 767 |
816 bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); } | 768 bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); } |
817 bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); } | 769 bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); } |
818 bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); } | 770 bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); } |
819 | 771 |
820 void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); } | 772 void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); } |
821 void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); } | 773 void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); } |
822 | 774 |
(...skipping 21 matching lines...) Expand all Loading... |
844 | 796 |
845 friend class MemoryAllocator; | 797 friend class MemoryAllocator; |
846 }; | 798 }; |
847 | 799 |
848 | 800 |
849 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize); | 801 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize); |
850 | 802 |
851 | 803 |
852 class LargePage : public MemoryChunk { | 804 class LargePage : public MemoryChunk { |
853 public: | 805 public: |
854 HeapObject* GetObject() { | 806 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); } |
855 return HeapObject::FromAddress(area_start()); | |
856 } | |
857 | 807 |
858 inline LargePage* next_page() const { | 808 inline LargePage* next_page() const { |
859 return static_cast<LargePage*>(next_chunk()); | 809 return static_cast<LargePage*>(next_chunk()); |
860 } | 810 } |
861 | 811 |
862 inline void set_next_page(LargePage* page) { | 812 inline void set_next_page(LargePage* page) { set_next_chunk(page); } |
863 set_next_chunk(page); | 813 |
864 } | |
865 private: | 814 private: |
866 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk); | 815 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk); |
867 | 816 |
868 friend class MemoryAllocator; | 817 friend class MemoryAllocator; |
869 }; | 818 }; |
870 | 819 |
871 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize); | 820 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize); |
872 | 821 |
873 // ---------------------------------------------------------------------------- | 822 // ---------------------------------------------------------------------------- |
874 // Space is the abstract superclass for all allocation spaces. | 823 // Space is the abstract superclass for all allocation spaces. |
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
995 // Compares the start addresses of two free blocks. | 944 // Compares the start addresses of two free blocks. |
996 static int CompareFreeBlockAddress(const FreeBlock* left, | 945 static int CompareFreeBlockAddress(const FreeBlock* left, |
997 const FreeBlock* right); | 946 const FreeBlock* right); |
998 | 947 |
999 DISALLOW_COPY_AND_ASSIGN(CodeRange); | 948 DISALLOW_COPY_AND_ASSIGN(CodeRange); |
1000 }; | 949 }; |
1001 | 950 |
1002 | 951 |
1003 class SkipList { | 952 class SkipList { |
1004 public: | 953 public: |
1005 SkipList() { | 954 SkipList() { Clear(); } |
1006 Clear(); | |
1007 } | |
1008 | 955 |
1009 void Clear() { | 956 void Clear() { |
1010 for (int idx = 0; idx < kSize; idx++) { | 957 for (int idx = 0; idx < kSize; idx++) { |
1011 starts_[idx] = reinterpret_cast<Address>(-1); | 958 starts_[idx] = reinterpret_cast<Address>(-1); |
1012 } | 959 } |
1013 } | 960 } |
1014 | 961 |
1015 Address StartFor(Address addr) { | 962 Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; } |
1016 return starts_[RegionNumber(addr)]; | |
1017 } | |
1018 | 963 |
1019 void AddObject(Address addr, int size) { | 964 void AddObject(Address addr, int size) { |
1020 int start_region = RegionNumber(addr); | 965 int start_region = RegionNumber(addr); |
1021 int end_region = RegionNumber(addr + size - kPointerSize); | 966 int end_region = RegionNumber(addr + size - kPointerSize); |
1022 for (int idx = start_region; idx <= end_region; idx++) { | 967 for (int idx = start_region; idx <= end_region; idx++) { |
1023 if (starts_[idx] > addr) starts_[idx] = addr; | 968 if (starts_[idx] > addr) starts_[idx] = addr; |
1024 } | 969 } |
1025 } | 970 } |
1026 | 971 |
1027 static inline int RegionNumber(Address addr) { | 972 static inline int RegionNumber(Address addr) { |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1060 class MemoryAllocator { | 1005 class MemoryAllocator { |
1061 public: | 1006 public: |
1062 explicit MemoryAllocator(Isolate* isolate); | 1007 explicit MemoryAllocator(Isolate* isolate); |
1063 | 1008 |
1064 // Initializes its internal bookkeeping structures. | 1009 // Initializes its internal bookkeeping structures. |
1065 // Max capacity of the total space and executable memory limit. | 1010 // Max capacity of the total space and executable memory limit. |
1066 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable); | 1011 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable); |
1067 | 1012 |
1068 void TearDown(); | 1013 void TearDown(); |
1069 | 1014 |
1070 Page* AllocatePage( | 1015 Page* AllocatePage(intptr_t size, PagedSpace* owner, |
1071 intptr_t size, PagedSpace* owner, Executability executable); | 1016 Executability executable); |
1072 | 1017 |
1073 LargePage* AllocateLargePage( | 1018 LargePage* AllocateLargePage(intptr_t object_size, Space* owner, |
1074 intptr_t object_size, Space* owner, Executability executable); | 1019 Executability executable); |
1075 | 1020 |
1076 void Free(MemoryChunk* chunk); | 1021 void Free(MemoryChunk* chunk); |
1077 | 1022 |
1078 // Returns the maximum available bytes of heaps. | 1023 // Returns the maximum available bytes of heaps. |
1079 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } | 1024 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } |
1080 | 1025 |
1081 // Returns allocated spaces in bytes. | 1026 // Returns allocated spaces in bytes. |
1082 intptr_t Size() { return size_; } | 1027 intptr_t Size() { return size_; } |
1083 | 1028 |
1084 // Returns the maximum available executable bytes of heaps. | 1029 // Returns the maximum available executable bytes of heaps. |
1085 intptr_t AvailableExecutable() { | 1030 intptr_t AvailableExecutable() { |
1086 if (capacity_executable_ < size_executable_) return 0; | 1031 if (capacity_executable_ < size_executable_) return 0; |
1087 return capacity_executable_ - size_executable_; | 1032 return capacity_executable_ - size_executable_; |
1088 } | 1033 } |
1089 | 1034 |
1090 // Returns allocated executable spaces in bytes. | 1035 // Returns allocated executable spaces in bytes. |
1091 intptr_t SizeExecutable() { return size_executable_; } | 1036 intptr_t SizeExecutable() { return size_executable_; } |
1092 | 1037 |
1093 // Returns maximum available bytes that the old space can have. | 1038 // Returns maximum available bytes that the old space can have. |
1094 intptr_t MaxAvailable() { | 1039 intptr_t MaxAvailable() { |
1095 return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize; | 1040 return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize; |
1096 } | 1041 } |
1097 | 1042 |
1098 // Returns an indication of whether a pointer is in a space that has | 1043 // Returns an indication of whether a pointer is in a space that has |
1099 // been allocated by this MemoryAllocator. | 1044 // been allocated by this MemoryAllocator. |
1100 V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const { | 1045 V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const { |
1101 return address < lowest_ever_allocated_ || | 1046 return address < lowest_ever_allocated_ || |
1102 address >= highest_ever_allocated_; | 1047 address >= highest_ever_allocated_; |
1103 } | 1048 } |
1104 | 1049 |
1105 #ifdef DEBUG | 1050 #ifdef DEBUG |
1106 // Reports statistic info of the space. | 1051 // Reports statistic info of the space. |
1107 void ReportStatistics(); | 1052 void ReportStatistics(); |
1108 #endif | 1053 #endif |
1109 | 1054 |
1110 // Returns a MemoryChunk in which the memory region from commit_area_size to | 1055 // Returns a MemoryChunk in which the memory region from commit_area_size to |
1111 // reserve_area_size of the chunk area is reserved but not committed, it | 1056 // reserve_area_size of the chunk area is reserved but not committed, it |
1112 // could be committed later by calling MemoryChunk::CommitArea. | 1057 // could be committed later by calling MemoryChunk::CommitArea. |
1113 MemoryChunk* AllocateChunk(intptr_t reserve_area_size, | 1058 MemoryChunk* AllocateChunk(intptr_t reserve_area_size, |
1114 intptr_t commit_area_size, | 1059 intptr_t commit_area_size, |
1115 Executability executable, | 1060 Executability executable, Space* space); |
1116 Space* space); | |
1117 | 1061 |
1118 Address ReserveAlignedMemory(size_t requested, | 1062 Address ReserveAlignedMemory(size_t requested, size_t alignment, |
1119 size_t alignment, | |
1120 base::VirtualMemory* controller); | 1063 base::VirtualMemory* controller); |
1121 Address AllocateAlignedMemory(size_t reserve_size, | 1064 Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, |
1122 size_t commit_size, | 1065 size_t alignment, Executability executable, |
1123 size_t alignment, | |
1124 Executability executable, | |
1125 base::VirtualMemory* controller); | 1066 base::VirtualMemory* controller); |
1126 | 1067 |
1127 bool CommitMemory(Address addr, size_t size, Executability executable); | 1068 bool CommitMemory(Address addr, size_t size, Executability executable); |
1128 | 1069 |
1129 void FreeMemory(base::VirtualMemory* reservation, Executability executable); | 1070 void FreeMemory(base::VirtualMemory* reservation, Executability executable); |
1130 void FreeMemory(Address addr, size_t size, Executability executable); | 1071 void FreeMemory(Address addr, size_t size, Executability executable); |
1131 | 1072 |
1132 // Commit a contiguous block of memory from the initial chunk. Assumes that | 1073 // Commit a contiguous block of memory from the initial chunk. Assumes that |
1133 // the address is not NULL, the size is greater than zero, and that the | 1074 // the address is not NULL, the size is greater than zero, and that the |
1134 // block is contained in the initial chunk. Returns true if it succeeded | 1075 // block is contained in the initial chunk. Returns true if it succeeded |
1135 // and false otherwise. | 1076 // and false otherwise. |
1136 bool CommitBlock(Address start, size_t size, Executability executable); | 1077 bool CommitBlock(Address start, size_t size, Executability executable); |
1137 | 1078 |
1138 // Uncommit a contiguous block of memory [start..(start+size)[. | 1079 // Uncommit a contiguous block of memory [start..(start+size)[. |
1139 // start is not NULL, the size is greater than zero, and the | 1080 // start is not NULL, the size is greater than zero, and the |
1140 // block is contained in the initial chunk. Returns true if it succeeded | 1081 // block is contained in the initial chunk. Returns true if it succeeded |
1141 // and false otherwise. | 1082 // and false otherwise. |
1142 bool UncommitBlock(Address start, size_t size); | 1083 bool UncommitBlock(Address start, size_t size); |
1143 | 1084 |
1144 // Zaps a contiguous block of memory [start..(start+size)[ thus | 1085 // Zaps a contiguous block of memory [start..(start+size)[ thus |
1145 // filling it up with a recognizable non-NULL bit pattern. | 1086 // filling it up with a recognizable non-NULL bit pattern. |
1146 void ZapBlock(Address start, size_t size); | 1087 void ZapBlock(Address start, size_t size); |
1147 | 1088 |
1148 void PerformAllocationCallback(ObjectSpace space, | 1089 void PerformAllocationCallback(ObjectSpace space, AllocationAction action, |
1149 AllocationAction action, | |
1150 size_t size); | 1090 size_t size); |
1151 | 1091 |
1152 void AddMemoryAllocationCallback(MemoryAllocationCallback callback, | 1092 void AddMemoryAllocationCallback(MemoryAllocationCallback callback, |
1153 ObjectSpace space, | 1093 ObjectSpace space, AllocationAction action); |
1154 AllocationAction action); | |
1155 | 1094 |
1156 void RemoveMemoryAllocationCallback( | 1095 void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback); |
1157 MemoryAllocationCallback callback); | |
1158 | 1096 |
1159 bool MemoryAllocationCallbackRegistered( | 1097 bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback); |
1160 MemoryAllocationCallback callback); | |
1161 | 1098 |
1162 static int CodePageGuardStartOffset(); | 1099 static int CodePageGuardStartOffset(); |
1163 | 1100 |
1164 static int CodePageGuardSize(); | 1101 static int CodePageGuardSize(); |
1165 | 1102 |
1166 static int CodePageAreaStartOffset(); | 1103 static int CodePageAreaStartOffset(); |
1167 | 1104 |
1168 static int CodePageAreaEndOffset(); | 1105 static int CodePageAreaEndOffset(); |
1169 | 1106 |
1170 static int CodePageAreaSize() { | 1107 static int CodePageAreaSize() { |
1171 return CodePageAreaEndOffset() - CodePageAreaStartOffset(); | 1108 return CodePageAreaEndOffset() - CodePageAreaStartOffset(); |
1172 } | 1109 } |
1173 | 1110 |
1174 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm, | 1111 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm, |
1175 Address start, | 1112 Address start, size_t commit_size, |
1176 size_t commit_size, | |
1177 size_t reserved_size); | 1113 size_t reserved_size); |
1178 | 1114 |
1179 private: | 1115 private: |
1180 Isolate* isolate_; | 1116 Isolate* isolate_; |
1181 | 1117 |
1182 // Maximum space size in bytes. | 1118 // Maximum space size in bytes. |
1183 size_t capacity_; | 1119 size_t capacity_; |
1184 // Maximum subset of capacity_ that can be executable | 1120 // Maximum subset of capacity_ that can be executable |
1185 size_t capacity_executable_; | 1121 size_t capacity_executable_; |
1186 | 1122 |
1187 // Allocated space size in bytes. | 1123 // Allocated space size in bytes. |
1188 size_t size_; | 1124 size_t size_; |
1189 // Allocated executable space size in bytes. | 1125 // Allocated executable space size in bytes. |
1190 size_t size_executable_; | 1126 size_t size_executable_; |
1191 | 1127 |
1192 // We keep the lowest and highest addresses allocated as a quick way | 1128 // We keep the lowest and highest addresses allocated as a quick way |
1193 // of determining that pointers are outside the heap. The estimate is | 1129 // of determining that pointers are outside the heap. The estimate is |
1194 // conservative, i.e. not all addrsses in 'allocated' space are allocated | 1130 // conservative, i.e. not all addrsses in 'allocated' space are allocated |
1195 // to our heap. The range is [lowest, highest[, inclusive on the low end | 1131 // to our heap. The range is [lowest, highest[, inclusive on the low end |
1196 // and exclusive on the high end. | 1132 // and exclusive on the high end. |
1197 void* lowest_ever_allocated_; | 1133 void* lowest_ever_allocated_; |
1198 void* highest_ever_allocated_; | 1134 void* highest_ever_allocated_; |
1199 | 1135 |
1200 struct MemoryAllocationCallbackRegistration { | 1136 struct MemoryAllocationCallbackRegistration { |
1201 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, | 1137 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, |
1202 ObjectSpace space, | 1138 ObjectSpace space, |
1203 AllocationAction action) | 1139 AllocationAction action) |
1204 : callback(callback), space(space), action(action) { | 1140 : callback(callback), space(space), action(action) {} |
1205 } | |
1206 MemoryAllocationCallback callback; | 1141 MemoryAllocationCallback callback; |
1207 ObjectSpace space; | 1142 ObjectSpace space; |
1208 AllocationAction action; | 1143 AllocationAction action; |
1209 }; | 1144 }; |
1210 | 1145 |
1211 // A List of callback that are triggered when memory is allocated or free'd | 1146 // A List of callback that are triggered when memory is allocated or free'd |
1212 List<MemoryAllocationCallbackRegistration> | 1147 List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_; |
1213 memory_allocation_callbacks_; | |
1214 | 1148 |
1215 // Initializes pages in a chunk. Returns the first page address. | 1149 // Initializes pages in a chunk. Returns the first page address. |
1216 // This function and GetChunkId() are provided for the mark-compact | 1150 // This function and GetChunkId() are provided for the mark-compact |
1217 // collector to rebuild page headers in the from space, which is | 1151 // collector to rebuild page headers in the from space, which is |
1218 // used as a marking stack and its page headers are destroyed. | 1152 // used as a marking stack and its page headers are destroyed. |
1219 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, | 1153 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, |
1220 PagedSpace* owner); | 1154 PagedSpace* owner); |
1221 | 1155 |
1222 void UpdateAllocatedSpaceLimits(void* low, void* high) { | 1156 void UpdateAllocatedSpaceLimits(void* low, void* high) { |
1223 lowest_ever_allocated_ = Min(lowest_ever_allocated_, low); | 1157 lowest_ever_allocated_ = Min(lowest_ever_allocated_, low); |
1224 highest_ever_allocated_ = Max(highest_ever_allocated_, high); | 1158 highest_ever_allocated_ = Max(highest_ever_allocated_, high); |
1225 } | 1159 } |
1226 | 1160 |
1227 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); | 1161 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); |
1228 }; | 1162 }; |
1229 | 1163 |
1230 | 1164 |
1231 // ----------------------------------------------------------------------------- | 1165 // ----------------------------------------------------------------------------- |
1232 // Interface for heap object iterator to be implemented by all object space | 1166 // Interface for heap object iterator to be implemented by all object space |
1233 // object iterators. | 1167 // object iterators. |
1234 // | 1168 // |
1235 // NOTE: The space specific object iterators also implements the own next() | 1169 // NOTE: The space specific object iterators also implements the own next() |
1236 // method which is used to avoid using virtual functions | 1170 // method which is used to avoid using virtual functions |
1237 // iterating a specific space. | 1171 // iterating a specific space. |
1238 | 1172 |
1239 class ObjectIterator : public Malloced { | 1173 class ObjectIterator : public Malloced { |
1240 public: | 1174 public: |
1241 virtual ~ObjectIterator() { } | 1175 virtual ~ObjectIterator() {} |
1242 | 1176 |
1243 virtual HeapObject* next_object() = 0; | 1177 virtual HeapObject* next_object() = 0; |
1244 }; | 1178 }; |
1245 | 1179 |
1246 | 1180 |
1247 // ----------------------------------------------------------------------------- | 1181 // ----------------------------------------------------------------------------- |
1248 // Heap object iterator in new/old/map spaces. | 1182 // Heap object iterator in new/old/map spaces. |
1249 // | 1183 // |
1250 // A HeapObjectIterator iterates objects from the bottom of the given space | 1184 // A HeapObjectIterator iterates objects from the bottom of the given space |
1251 // to its top or from the bottom of the given page to its top. | 1185 // to its top or from the bottom of the given page to its top. |
1252 // | 1186 // |
1253 // If objects are allocated in the page during iteration the iterator may | 1187 // If objects are allocated in the page during iteration the iterator may |
1254 // or may not iterate over those objects. The caller must create a new | 1188 // or may not iterate over those objects. The caller must create a new |
1255 // iterator in order to be sure to visit these new objects. | 1189 // iterator in order to be sure to visit these new objects. |
1256 class HeapObjectIterator: public ObjectIterator { | 1190 class HeapObjectIterator : public ObjectIterator { |
1257 public: | 1191 public: |
1258 // Creates a new object iterator in a given space. | 1192 // Creates a new object iterator in a given space. |
1259 // If the size function is not given, the iterator calls the default | 1193 // If the size function is not given, the iterator calls the default |
1260 // Object::Size(). | 1194 // Object::Size(). |
1261 explicit HeapObjectIterator(PagedSpace* space); | 1195 explicit HeapObjectIterator(PagedSpace* space); |
1262 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func); | 1196 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func); |
1263 HeapObjectIterator(Page* page, HeapObjectCallback size_func); | 1197 HeapObjectIterator(Page* page, HeapObjectCallback size_func); |
1264 | 1198 |
1265 // Advance to the next object, skipping free spaces and other fillers and | 1199 // Advance to the next object, skipping free spaces and other fillers and |
1266 // skipping the special garbage section of which there is one per space. | 1200 // skipping the special garbage section of which there is one per space. |
1267 // Returns NULL when the iteration has ended. | 1201 // Returns NULL when the iteration has ended. |
1268 inline HeapObject* Next() { | 1202 inline HeapObject* Next() { |
1269 do { | 1203 do { |
1270 HeapObject* next_obj = FromCurrentPage(); | 1204 HeapObject* next_obj = FromCurrentPage(); |
1271 if (next_obj != NULL) return next_obj; | 1205 if (next_obj != NULL) return next_obj; |
1272 } while (AdvanceToNextPage()); | 1206 } while (AdvanceToNextPage()); |
1273 return NULL; | 1207 return NULL; |
1274 } | 1208 } |
1275 | 1209 |
1276 virtual HeapObject* next_object() { | 1210 virtual HeapObject* next_object() { return Next(); } |
1277 return Next(); | |
1278 } | |
1279 | 1211 |
1280 private: | 1212 private: |
1281 enum PageMode { kOnePageOnly, kAllPagesInSpace }; | 1213 enum PageMode { kOnePageOnly, kAllPagesInSpace }; |
1282 | 1214 |
1283 Address cur_addr_; // Current iteration point. | 1215 Address cur_addr_; // Current iteration point. |
1284 Address cur_end_; // End iteration point. | 1216 Address cur_end_; // End iteration point. |
1285 HeapObjectCallback size_func_; // Size function or NULL. | 1217 HeapObjectCallback size_func_; // Size function or NULL. |
1286 PagedSpace* space_; | 1218 PagedSpace* space_; |
1287 PageMode page_mode_; | 1219 PageMode page_mode_; |
1288 | 1220 |
1289 // Fast (inlined) path of next(). | 1221 // Fast (inlined) path of next(). |
1290 inline HeapObject* FromCurrentPage(); | 1222 inline HeapObject* FromCurrentPage(); |
1291 | 1223 |
1292 // Slow path of next(), goes into the next page. Returns false if the | 1224 // Slow path of next(), goes into the next page. Returns false if the |
1293 // iteration has ended. | 1225 // iteration has ended. |
1294 bool AdvanceToNextPage(); | 1226 bool AdvanceToNextPage(); |
1295 | 1227 |
1296 // Initializes fields. | 1228 // Initializes fields. |
1297 inline void Initialize(PagedSpace* owner, | 1229 inline void Initialize(PagedSpace* owner, Address start, Address end, |
1298 Address start, | 1230 PageMode mode, HeapObjectCallback size_func); |
1299 Address end, | |
1300 PageMode mode, | |
1301 HeapObjectCallback size_func); | |
1302 }; | 1231 }; |
1303 | 1232 |
1304 | 1233 |
1305 // ----------------------------------------------------------------------------- | 1234 // ----------------------------------------------------------------------------- |
1306 // A PageIterator iterates the pages in a paged space. | 1235 // A PageIterator iterates the pages in a paged space. |
1307 | 1236 |
1308 class PageIterator BASE_EMBEDDED { | 1237 class PageIterator BASE_EMBEDDED { |
1309 public: | 1238 public: |
1310 explicit inline PageIterator(PagedSpace* space); | 1239 explicit inline PageIterator(PagedSpace* space); |
1311 | 1240 |
(...skipping 10 matching lines...) Expand all Loading... |
1322 | 1251 |
1323 | 1252 |
1324 // ----------------------------------------------------------------------------- | 1253 // ----------------------------------------------------------------------------- |
1325 // A space has a circular list of pages. The next page can be accessed via | 1254 // A space has a circular list of pages. The next page can be accessed via |
1326 // Page::next_page() call. | 1255 // Page::next_page() call. |
1327 | 1256 |
1328 // An abstraction of allocation and relocation pointers in a page-structured | 1257 // An abstraction of allocation and relocation pointers in a page-structured |
1329 // space. | 1258 // space. |
1330 class AllocationInfo { | 1259 class AllocationInfo { |
1331 public: | 1260 public: |
1332 AllocationInfo() : top_(NULL), limit_(NULL) { | 1261 AllocationInfo() : top_(NULL), limit_(NULL) {} |
1333 } | |
1334 | 1262 |
1335 INLINE(void set_top(Address top)) { | 1263 INLINE(void set_top(Address top)) { |
1336 SLOW_DCHECK(top == NULL || | 1264 SLOW_DCHECK(top == NULL || |
1337 (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0); | 1265 (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0); |
1338 top_ = top; | 1266 top_ = top; |
1339 } | 1267 } |
1340 | 1268 |
1341 INLINE(Address top()) const { | 1269 INLINE(Address top()) const { |
1342 SLOW_DCHECK(top_ == NULL || | 1270 SLOW_DCHECK(top_ == NULL || |
1343 (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0); | 1271 (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0); |
1344 return top_; | 1272 return top_; |
1345 } | 1273 } |
1346 | 1274 |
1347 Address* top_address() { | 1275 Address* top_address() { return &top_; } |
1348 return &top_; | |
1349 } | |
1350 | 1276 |
1351 INLINE(void set_limit(Address limit)) { | 1277 INLINE(void set_limit(Address limit)) { |
1352 SLOW_DCHECK(limit == NULL || | 1278 SLOW_DCHECK(limit == NULL || |
1353 (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0); | 1279 (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0); |
1354 limit_ = limit; | 1280 limit_ = limit; |
1355 } | 1281 } |
1356 | 1282 |
1357 INLINE(Address limit()) const { | 1283 INLINE(Address limit()) const { |
1358 SLOW_DCHECK(limit_ == NULL || | 1284 SLOW_DCHECK(limit_ == NULL || |
1359 (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) == 0); | 1285 (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) == |
| 1286 0); |
1360 return limit_; | 1287 return limit_; |
1361 } | 1288 } |
1362 | 1289 |
1363 Address* limit_address() { | 1290 Address* limit_address() { return &limit_; } |
1364 return &limit_; | |
1365 } | |
1366 | 1291 |
1367 #ifdef DEBUG | 1292 #ifdef DEBUG |
1368 bool VerifyPagedAllocation() { | 1293 bool VerifyPagedAllocation() { |
1369 return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) | 1294 return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) && |
1370 && (top_ <= limit_); | 1295 (top_ <= limit_); |
1371 } | 1296 } |
1372 #endif | 1297 #endif |
1373 | 1298 |
1374 private: | 1299 private: |
1375 // Current allocation top. | 1300 // Current allocation top. |
1376 Address top_; | 1301 Address top_; |
1377 // Current allocation limit. | 1302 // Current allocation limit. |
1378 Address limit_; | 1303 Address limit_; |
1379 }; | 1304 }; |
1380 | 1305 |
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1470 }; | 1395 }; |
1471 | 1396 |
1472 | 1397 |
1473 // ----------------------------------------------------------------------------- | 1398 // ----------------------------------------------------------------------------- |
1474 // Free lists for old object spaces | 1399 // Free lists for old object spaces |
1475 // | 1400 // |
1476 // Free-list nodes are free blocks in the heap. They look like heap objects | 1401 // Free-list nodes are free blocks in the heap. They look like heap objects |
1477 // (free-list node pointers have the heap object tag, and they have a map like | 1402 // (free-list node pointers have the heap object tag, and they have a map like |
1478 // a heap object). They have a size and a next pointer. The next pointer is | 1403 // a heap object). They have a size and a next pointer. The next pointer is |
1479 // the raw address of the next free list node (or NULL). | 1404 // the raw address of the next free list node (or NULL). |
1480 class FreeListNode: public HeapObject { | 1405 class FreeListNode : public HeapObject { |
1481 public: | 1406 public: |
1482 // Obtain a free-list node from a raw address. This is not a cast because | 1407 // Obtain a free-list node from a raw address. This is not a cast because |
1483 // it does not check nor require that the first word at the address is a map | 1408 // it does not check nor require that the first word at the address is a map |
1484 // pointer. | 1409 // pointer. |
1485 static FreeListNode* FromAddress(Address address) { | 1410 static FreeListNode* FromAddress(Address address) { |
1486 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address)); | 1411 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address)); |
1487 } | 1412 } |
1488 | 1413 |
1489 static inline bool IsFreeListNode(HeapObject* object); | 1414 static inline bool IsFreeListNode(HeapObject* object); |
1490 | 1415 |
(...skipping 18 matching lines...) Expand all Loading... |
1509 static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize); | 1434 static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize); |
1510 | 1435 |
1511 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode); | 1436 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode); |
1512 }; | 1437 }; |
1513 | 1438 |
1514 | 1439 |
1515 // The free list category holds a pointer to the top element and a pointer to | 1440 // The free list category holds a pointer to the top element and a pointer to |
1516 // the end element of the linked list of free memory blocks. | 1441 // the end element of the linked list of free memory blocks. |
1517 class FreeListCategory { | 1442 class FreeListCategory { |
1518 public: | 1443 public: |
1519 FreeListCategory() : | 1444 FreeListCategory() : top_(0), end_(NULL), available_(0) {} |
1520 top_(0), | |
1521 end_(NULL), | |
1522 available_(0) {} | |
1523 | 1445 |
1524 intptr_t Concatenate(FreeListCategory* category); | 1446 intptr_t Concatenate(FreeListCategory* category); |
1525 | 1447 |
1526 void Reset(); | 1448 void Reset(); |
1527 | 1449 |
1528 void Free(FreeListNode* node, int size_in_bytes); | 1450 void Free(FreeListNode* node, int size_in_bytes); |
1529 | 1451 |
1530 FreeListNode* PickNodeFromList(int *node_size); | 1452 FreeListNode* PickNodeFromList(int* node_size); |
1531 FreeListNode* PickNodeFromList(int size_in_bytes, int *node_size); | 1453 FreeListNode* PickNodeFromList(int size_in_bytes, int* node_size); |
1532 | 1454 |
1533 intptr_t EvictFreeListItemsInList(Page* p); | 1455 intptr_t EvictFreeListItemsInList(Page* p); |
1534 bool ContainsPageFreeListItemsInList(Page* p); | 1456 bool ContainsPageFreeListItemsInList(Page* p); |
1535 | 1457 |
1536 void RepairFreeList(Heap* heap); | 1458 void RepairFreeList(Heap* heap); |
1537 | 1459 |
1538 FreeListNode* top() const { | 1460 FreeListNode* top() const { |
1539 return reinterpret_cast<FreeListNode*>(base::NoBarrier_Load(&top_)); | 1461 return reinterpret_cast<FreeListNode*>(base::NoBarrier_Load(&top_)); |
1540 } | 1462 } |
1541 | 1463 |
1542 void set_top(FreeListNode* top) { | 1464 void set_top(FreeListNode* top) { |
1543 base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top)); | 1465 base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top)); |
1544 } | 1466 } |
1545 | 1467 |
1546 FreeListNode** GetEndAddress() { return &end_; } | 1468 FreeListNode** GetEndAddress() { return &end_; } |
1547 FreeListNode* end() const { return end_; } | 1469 FreeListNode* end() const { return end_; } |
1548 void set_end(FreeListNode* end) { end_ = end; } | 1470 void set_end(FreeListNode* end) { end_ = end; } |
1549 | 1471 |
1550 int* GetAvailableAddress() { return &available_; } | 1472 int* GetAvailableAddress() { return &available_; } |
1551 int available() const { return available_; } | 1473 int available() const { return available_; } |
1552 void set_available(int available) { available_ = available; } | 1474 void set_available(int available) { available_ = available; } |
1553 | 1475 |
1554 base::Mutex* mutex() { return &mutex_; } | 1476 base::Mutex* mutex() { return &mutex_; } |
1555 | 1477 |
1556 bool IsEmpty() { | 1478 bool IsEmpty() { return top() == 0; } |
1557 return top() == 0; | |
1558 } | |
1559 | 1479 |
1560 #ifdef DEBUG | 1480 #ifdef DEBUG |
1561 intptr_t SumFreeList(); | 1481 intptr_t SumFreeList(); |
1562 int FreeListLength(); | 1482 int FreeListLength(); |
1563 #endif | 1483 #endif |
1564 | 1484 |
1565 private: | 1485 private: |
1566 // top_ points to the top FreeListNode* in the free list category. | 1486 // top_ points to the top FreeListNode* in the free list category. |
1567 base::AtomicWord top_; | 1487 base::AtomicWord top_; |
1568 FreeListNode* end_; | 1488 FreeListNode* end_; |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1684 FreeListCategory large_list_; | 1604 FreeListCategory large_list_; |
1685 FreeListCategory huge_list_; | 1605 FreeListCategory huge_list_; |
1686 | 1606 |
1687 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList); | 1607 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList); |
1688 }; | 1608 }; |
1689 | 1609 |
1690 | 1610 |
1691 class AllocationResult { | 1611 class AllocationResult { |
1692 public: | 1612 public: |
1693 // Implicit constructor from Object*. | 1613 // Implicit constructor from Object*. |
1694 AllocationResult(Object* object) : object_(object), // NOLINT | 1614 explicit AllocationResult(Object* object) |
1695 retry_space_(INVALID_SPACE) { } | 1615 : object_(object), // NOLINT |
| 1616 retry_space_(INVALID_SPACE) {} |
1696 | 1617 |
1697 AllocationResult() : object_(NULL), | 1618 AllocationResult() : object_(NULL), retry_space_(INVALID_SPACE) {} |
1698 retry_space_(INVALID_SPACE) { } | |
1699 | 1619 |
1700 static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) { | 1620 static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) { |
1701 return AllocationResult(space); | 1621 return AllocationResult(space); |
1702 } | 1622 } |
1703 | 1623 |
1704 inline bool IsRetry() { return retry_space_ != INVALID_SPACE; } | 1624 inline bool IsRetry() { return retry_space_ != INVALID_SPACE; } |
1705 | 1625 |
1706 template <typename T> | 1626 template <typename T> |
1707 bool To(T** obj) { | 1627 bool To(T** obj) { |
1708 if (IsRetry()) return false; | 1628 if (IsRetry()) return false; |
1709 *obj = T::cast(object_); | 1629 *obj = T::cast(object_); |
1710 return true; | 1630 return true; |
1711 } | 1631 } |
1712 | 1632 |
1713 Object* ToObjectChecked() { | 1633 Object* ToObjectChecked() { |
1714 CHECK(!IsRetry()); | 1634 CHECK(!IsRetry()); |
1715 return object_; | 1635 return object_; |
1716 } | 1636 } |
1717 | 1637 |
1718 AllocationSpace RetrySpace() { | 1638 AllocationSpace RetrySpace() { |
1719 DCHECK(IsRetry()); | 1639 DCHECK(IsRetry()); |
1720 return retry_space_; | 1640 return retry_space_; |
1721 } | 1641 } |
1722 | 1642 |
1723 private: | 1643 private: |
1724 explicit AllocationResult(AllocationSpace space) : object_(NULL), | 1644 explicit AllocationResult(AllocationSpace space) |
1725 retry_space_(space) { } | 1645 : object_(NULL), retry_space_(space) {} |
1726 | 1646 |
1727 Object* object_; | 1647 Object* object_; |
1728 AllocationSpace retry_space_; | 1648 AllocationSpace retry_space_; |
1729 }; | 1649 }; |
1730 | 1650 |
1731 | 1651 |
1732 class PagedSpace : public Space { | 1652 class PagedSpace : public Space { |
1733 public: | 1653 public: |
1734 // Creates a space with a maximum capacity, and an id. | 1654 // Creates a space with a maximum capacity, and an id. |
1735 PagedSpace(Heap* heap, | 1655 PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id, |
1736 intptr_t max_capacity, | |
1737 AllocationSpace id, | |
1738 Executability executable); | 1656 Executability executable); |
1739 | 1657 |
1740 virtual ~PagedSpace() {} | 1658 virtual ~PagedSpace() {} |
1741 | 1659 |
1742 // Set up the space using the given address range of virtual memory (from | 1660 // Set up the space using the given address range of virtual memory (from |
1743 // the memory allocator's initial chunk) if possible. If the block of | 1661 // the memory allocator's initial chunk) if possible. If the block of |
1744 // addresses is not big enough to contain a single page-aligned page, a | 1662 // addresses is not big enough to contain a single page-aligned page, a |
1745 // fresh chunk will be allocated. | 1663 // fresh chunk will be allocated. |
1746 bool SetUp(); | 1664 bool SetUp(); |
1747 | 1665 |
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1831 // Wasted bytes in this space. These are just the bytes that were thrown away | 1749 // Wasted bytes in this space. These are just the bytes that were thrown away |
1832 // due to being too small to use for allocation. They do not include the | 1750 // due to being too small to use for allocation. They do not include the |
1833 // free bytes that were not found at all due to lazy sweeping. | 1751 // free bytes that were not found at all due to lazy sweeping. |
1834 virtual intptr_t Waste() { return accounting_stats_.Waste(); } | 1752 virtual intptr_t Waste() { return accounting_stats_.Waste(); } |
1835 | 1753 |
1836 // Returns the allocation pointer in this space. | 1754 // Returns the allocation pointer in this space. |
1837 Address top() { return allocation_info_.top(); } | 1755 Address top() { return allocation_info_.top(); } |
1838 Address limit() { return allocation_info_.limit(); } | 1756 Address limit() { return allocation_info_.limit(); } |
1839 | 1757 |
1840 // The allocation top address. | 1758 // The allocation top address. |
1841 Address* allocation_top_address() { | 1759 Address* allocation_top_address() { return allocation_info_.top_address(); } |
1842 return allocation_info_.top_address(); | |
1843 } | |
1844 | 1760 |
1845 // The allocation limit address. | 1761 // The allocation limit address. |
1846 Address* allocation_limit_address() { | 1762 Address* allocation_limit_address() { |
1847 return allocation_info_.limit_address(); | 1763 return allocation_info_.limit_address(); |
1848 } | 1764 } |
1849 | 1765 |
1850 // Allocate the requested number of bytes in the space if possible, return a | 1766 // Allocate the requested number of bytes in the space if possible, return a |
1851 // failure object if not. | 1767 // failure object if not. |
1852 MUST_USE_RESULT inline AllocationResult AllocateRaw(int size_in_bytes); | 1768 MUST_USE_RESULT inline AllocationResult AllocateRaw(int size_in_bytes); |
1853 | 1769 |
1854 // Give a block of memory to the space's free list. It might be added to | 1770 // Give a block of memory to the space's free list. It might be added to |
1855 // the free list or accounted as waste. | 1771 // the free list or accounted as waste. |
1856 // If add_to_freelist is false then just accounting stats are updated and | 1772 // If add_to_freelist is false then just accounting stats are updated and |
1857 // no attempt to add area to free list is made. | 1773 // no attempt to add area to free list is made. |
1858 int Free(Address start, int size_in_bytes) { | 1774 int Free(Address start, int size_in_bytes) { |
1859 int wasted = free_list_.Free(start, size_in_bytes); | 1775 int wasted = free_list_.Free(start, size_in_bytes); |
1860 accounting_stats_.DeallocateBytes(size_in_bytes); | 1776 accounting_stats_.DeallocateBytes(size_in_bytes); |
1861 accounting_stats_.WasteBytes(wasted); | 1777 accounting_stats_.WasteBytes(wasted); |
1862 return size_in_bytes - wasted; | 1778 return size_in_bytes - wasted; |
1863 } | 1779 } |
1864 | 1780 |
1865 void ResetFreeList() { | 1781 void ResetFreeList() { free_list_.Reset(); } |
1866 free_list_.Reset(); | |
1867 } | |
1868 | 1782 |
1869 // Set space allocation info. | 1783 // Set space allocation info. |
1870 void SetTopAndLimit(Address top, Address limit) { | 1784 void SetTopAndLimit(Address top, Address limit) { |
1871 DCHECK(top == limit || | 1785 DCHECK(top == limit || |
1872 Page::FromAddress(top) == Page::FromAddress(limit - 1)); | 1786 Page::FromAddress(top) == Page::FromAddress(limit - 1)); |
1873 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); | 1787 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
1874 allocation_info_.set_top(top); | 1788 allocation_info_.set_top(top); |
1875 allocation_info_.set_limit(limit); | 1789 allocation_info_.set_limit(limit); |
1876 } | 1790 } |
1877 | 1791 |
1878 // Empty space allocation info, returning unused area to free list. | 1792 // Empty space allocation info, returning unused area to free list. |
1879 void EmptyAllocationInfo() { | 1793 void EmptyAllocationInfo() { |
1880 // Mark the old linear allocation area with a free space map so it can be | 1794 // Mark the old linear allocation area with a free space map so it can be |
1881 // skipped when scanning the heap. | 1795 // skipped when scanning the heap. |
1882 int old_linear_size = static_cast<int>(limit() - top()); | 1796 int old_linear_size = static_cast<int>(limit() - top()); |
1883 Free(top(), old_linear_size); | 1797 Free(top(), old_linear_size); |
1884 SetTopAndLimit(NULL, NULL); | 1798 SetTopAndLimit(NULL, NULL); |
1885 } | 1799 } |
1886 | 1800 |
1887 void Allocate(int bytes) { | 1801 void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); } |
1888 accounting_stats_.AllocateBytes(bytes); | |
1889 } | |
1890 | 1802 |
1891 void IncreaseCapacity(int size); | 1803 void IncreaseCapacity(int size); |
1892 | 1804 |
1893 // Releases an unused page and shrinks the space. | 1805 // Releases an unused page and shrinks the space. |
1894 void ReleasePage(Page* page); | 1806 void ReleasePage(Page* page); |
1895 | 1807 |
1896 // The dummy page that anchors the linked list of pages. | 1808 // The dummy page that anchors the linked list of pages. |
1897 Page* anchor() { return &anchor_; } | 1809 Page* anchor() { return &anchor_; } |
1898 | 1810 |
1899 #ifdef VERIFY_HEAP | 1811 #ifdef VERIFY_HEAP |
(...skipping 18 matching lines...) Expand all Loading... |
1918 static void ResetCodeStatistics(Isolate* isolate); | 1830 static void ResetCodeStatistics(Isolate* isolate); |
1919 #endif | 1831 #endif |
1920 | 1832 |
1921 bool swept_precisely() { return swept_precisely_; } | 1833 bool swept_precisely() { return swept_precisely_; } |
1922 void set_swept_precisely(bool b) { swept_precisely_ = b; } | 1834 void set_swept_precisely(bool b) { swept_precisely_ = b; } |
1923 | 1835 |
1924 // Evacuation candidates are swept by evacuator. Needs to return a valid | 1836 // Evacuation candidates are swept by evacuator. Needs to return a valid |
1925 // result before _and_ after evacuation has finished. | 1837 // result before _and_ after evacuation has finished. |
1926 static bool ShouldBeSweptBySweeperThreads(Page* p) { | 1838 static bool ShouldBeSweptBySweeperThreads(Page* p) { |
1927 return !p->IsEvacuationCandidate() && | 1839 return !p->IsEvacuationCandidate() && |
1928 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && | 1840 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSweptPrecisely(); |
1929 !p->WasSweptPrecisely(); | |
1930 } | 1841 } |
1931 | 1842 |
1932 void IncrementUnsweptFreeBytes(intptr_t by) { | 1843 void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; } |
1933 unswept_free_bytes_ += by; | |
1934 } | |
1935 | 1844 |
1936 void IncreaseUnsweptFreeBytes(Page* p) { | 1845 void IncreaseUnsweptFreeBytes(Page* p) { |
1937 DCHECK(ShouldBeSweptBySweeperThreads(p)); | 1846 DCHECK(ShouldBeSweptBySweeperThreads(p)); |
1938 unswept_free_bytes_ += (p->area_size() - p->LiveBytes()); | 1847 unswept_free_bytes_ += (p->area_size() - p->LiveBytes()); |
1939 } | 1848 } |
1940 | 1849 |
1941 void DecrementUnsweptFreeBytes(intptr_t by) { | 1850 void DecrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ -= by; } |
1942 unswept_free_bytes_ -= by; | |
1943 } | |
1944 | 1851 |
1945 void DecreaseUnsweptFreeBytes(Page* p) { | 1852 void DecreaseUnsweptFreeBytes(Page* p) { |
1946 DCHECK(ShouldBeSweptBySweeperThreads(p)); | 1853 DCHECK(ShouldBeSweptBySweeperThreads(p)); |
1947 unswept_free_bytes_ -= (p->area_size() - p->LiveBytes()); | 1854 unswept_free_bytes_ -= (p->area_size() - p->LiveBytes()); |
1948 } | 1855 } |
1949 | 1856 |
1950 void ResetUnsweptFreeBytes() { | 1857 void ResetUnsweptFreeBytes() { unswept_free_bytes_ = 0; } |
1951 unswept_free_bytes_ = 0; | |
1952 } | |
1953 | 1858 |
1954 // This function tries to steal size_in_bytes memory from the sweeper threads | 1859 // This function tries to steal size_in_bytes memory from the sweeper threads |
1955 // free-lists. If it does not succeed stealing enough memory, it will wait | 1860 // free-lists. If it does not succeed stealing enough memory, it will wait |
1956 // for the sweeper threads to finish sweeping. | 1861 // for the sweeper threads to finish sweeping. |
1957 // It returns true when sweeping is completed and false otherwise. | 1862 // It returns true when sweeping is completed and false otherwise. |
1958 bool EnsureSweeperProgress(intptr_t size_in_bytes); | 1863 bool EnsureSweeperProgress(intptr_t size_in_bytes); |
1959 | 1864 |
1960 void set_end_of_unswept_pages(Page* page) { | 1865 void set_end_of_unswept_pages(Page* page) { end_of_unswept_pages_ = page; } |
1961 end_of_unswept_pages_ = page; | |
1962 } | |
1963 | 1866 |
1964 Page* end_of_unswept_pages() { | 1867 Page* end_of_unswept_pages() { return end_of_unswept_pages_; } |
1965 return end_of_unswept_pages_; | |
1966 } | |
1967 | 1868 |
1968 Page* FirstPage() { return anchor_.next_page(); } | 1869 Page* FirstPage() { return anchor_.next_page(); } |
1969 Page* LastPage() { return anchor_.prev_page(); } | 1870 Page* LastPage() { return anchor_.prev_page(); } |
1970 | 1871 |
1971 void EvictEvacuationCandidatesFromFreeLists(); | 1872 void EvictEvacuationCandidatesFromFreeLists(); |
1972 | 1873 |
1973 bool CanExpand(); | 1874 bool CanExpand(); |
1974 | 1875 |
1975 // Returns the number of total pages in this space. | 1876 // Returns the number of total pages in this space. |
1976 int CountTotalPages(); | 1877 int CountTotalPages(); |
1977 | 1878 |
1978 // Return size of allocatable area on a page in this space. | 1879 // Return size of allocatable area on a page in this space. |
1979 inline int AreaSize() { | 1880 inline int AreaSize() { return area_size_; } |
1980 return area_size_; | |
1981 } | |
1982 | 1881 |
1983 void CreateEmergencyMemory(); | 1882 void CreateEmergencyMemory(); |
1984 void FreeEmergencyMemory(); | 1883 void FreeEmergencyMemory(); |
1985 void UseEmergencyMemory(); | 1884 void UseEmergencyMemory(); |
1986 | 1885 |
1987 bool HasEmergencyMemory() { return emergency_memory_ != NULL; } | 1886 bool HasEmergencyMemory() { return emergency_memory_ != NULL; } |
1988 | 1887 |
1989 protected: | 1888 protected: |
1990 FreeList* free_list() { return &free_list_; } | 1889 FreeList* free_list() { return &free_list_; } |
1991 | 1890 |
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2066 } | 1965 } |
2067 | 1966 |
2068 private: | 1967 private: |
2069 int number_; | 1968 int number_; |
2070 int bytes_; | 1969 int bytes_; |
2071 }; | 1970 }; |
2072 | 1971 |
2073 | 1972 |
2074 // HistogramInfo class for recording a single "bar" of a histogram. This | 1973 // HistogramInfo class for recording a single "bar" of a histogram. This |
2075 // class is used for collecting statistics to print to the log file. | 1974 // class is used for collecting statistics to print to the log file. |
2076 class HistogramInfo: public NumberAndSizeInfo { | 1975 class HistogramInfo : public NumberAndSizeInfo { |
2077 public: | 1976 public: |
2078 HistogramInfo() : NumberAndSizeInfo() {} | 1977 HistogramInfo() : NumberAndSizeInfo() {} |
2079 | 1978 |
2080 const char* name() { return name_; } | 1979 const char* name() { return name_; } |
2081 void set_name(const char* name) { name_ = name; } | 1980 void set_name(const char* name) { name_ = name; } |
2082 | 1981 |
2083 private: | 1982 private: |
2084 const char* name_; | 1983 const char* name_; |
2085 }; | 1984 }; |
2086 | 1985 |
2087 | 1986 |
2088 enum SemiSpaceId { | 1987 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 }; |
2089 kFromSpace = 0, | |
2090 kToSpace = 1 | |
2091 }; | |
2092 | 1988 |
2093 | 1989 |
2094 class SemiSpace; | 1990 class SemiSpace; |
2095 | 1991 |
2096 | 1992 |
2097 class NewSpacePage : public MemoryChunk { | 1993 class NewSpacePage : public MemoryChunk { |
2098 public: | 1994 public: |
2099 // GC related flags copied from from-space to to-space when | 1995 // GC related flags copied from from-space to to-space when |
2100 // flipping semispaces. | 1996 // flipping semispaces. |
2101 static const intptr_t kCopyOnFlipFlagsMask = | 1997 static const intptr_t kCopyOnFlipFlagsMask = |
2102 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | | 1998 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | |
2103 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) | | 1999 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) | |
2104 (1 << MemoryChunk::SCAN_ON_SCAVENGE); | 2000 (1 << MemoryChunk::SCAN_ON_SCAVENGE); |
2105 | 2001 |
2106 static const int kAreaSize = Page::kMaxRegularHeapObjectSize; | 2002 static const int kAreaSize = Page::kMaxRegularHeapObjectSize; |
2107 | 2003 |
2108 inline NewSpacePage* next_page() const { | 2004 inline NewSpacePage* next_page() const { |
2109 return static_cast<NewSpacePage*>(next_chunk()); | 2005 return static_cast<NewSpacePage*>(next_chunk()); |
2110 } | 2006 } |
2111 | 2007 |
2112 inline void set_next_page(NewSpacePage* page) { | 2008 inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); } |
2113 set_next_chunk(page); | |
2114 } | |
2115 | 2009 |
2116 inline NewSpacePage* prev_page() const { | 2010 inline NewSpacePage* prev_page() const { |
2117 return static_cast<NewSpacePage*>(prev_chunk()); | 2011 return static_cast<NewSpacePage*>(prev_chunk()); |
2118 } | 2012 } |
2119 | 2013 |
2120 inline void set_prev_page(NewSpacePage* page) { | 2014 inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); } |
2121 set_prev_chunk(page); | |
2122 } | |
2123 | 2015 |
2124 SemiSpace* semi_space() { | 2016 SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); } |
2125 return reinterpret_cast<SemiSpace*>(owner()); | |
2126 } | |
2127 | 2017 |
2128 bool is_anchor() { return !this->InNewSpace(); } | 2018 bool is_anchor() { return !this->InNewSpace(); } |
2129 | 2019 |
2130 static bool IsAtStart(Address addr) { | 2020 static bool IsAtStart(Address addr) { |
2131 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) | 2021 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == |
2132 == kObjectStartOffset; | 2022 kObjectStartOffset; |
2133 } | 2023 } |
2134 | 2024 |
2135 static bool IsAtEnd(Address addr) { | 2025 static bool IsAtEnd(Address addr) { |
2136 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0; | 2026 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0; |
2137 } | 2027 } |
2138 | 2028 |
2139 Address address() { | 2029 Address address() { return reinterpret_cast<Address>(this); } |
2140 return reinterpret_cast<Address>(this); | |
2141 } | |
2142 | 2030 |
2143 // Finds the NewSpacePage containg the given address. | 2031 // Finds the NewSpacePage containg the given address. |
2144 static inline NewSpacePage* FromAddress(Address address_in_page) { | 2032 static inline NewSpacePage* FromAddress(Address address_in_page) { |
2145 Address page_start = | 2033 Address page_start = |
2146 reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) & | 2034 reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) & |
2147 ~Page::kPageAlignmentMask); | 2035 ~Page::kPageAlignmentMask); |
2148 NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start); | 2036 NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start); |
2149 return page; | 2037 return page; |
2150 } | 2038 } |
2151 | 2039 |
2152 // Find the page for a limit address. A limit address is either an address | 2040 // Find the page for a limit address. A limit address is either an address |
2153 // inside a page, or the address right after the last byte of a page. | 2041 // inside a page, or the address right after the last byte of a page. |
2154 static inline NewSpacePage* FromLimit(Address address_limit) { | 2042 static inline NewSpacePage* FromLimit(Address address_limit) { |
2155 return NewSpacePage::FromAddress(address_limit - 1); | 2043 return NewSpacePage::FromAddress(address_limit - 1); |
2156 } | 2044 } |
2157 | 2045 |
2158 // Checks if address1 and address2 are on the same new space page. | 2046 // Checks if address1 and address2 are on the same new space page. |
2159 static inline bool OnSamePage(Address address1, Address address2) { | 2047 static inline bool OnSamePage(Address address1, Address address2) { |
2160 return NewSpacePage::FromAddress(address1) == | 2048 return NewSpacePage::FromAddress(address1) == |
2161 NewSpacePage::FromAddress(address2); | 2049 NewSpacePage::FromAddress(address2); |
2162 } | 2050 } |
2163 | 2051 |
2164 private: | 2052 private: |
2165 // Create a NewSpacePage object that is only used as anchor | 2053 // Create a NewSpacePage object that is only used as anchor |
2166 // for the doubly-linked list of real pages. | 2054 // for the doubly-linked list of real pages. |
2167 explicit NewSpacePage(SemiSpace* owner) { | 2055 explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); } |
2168 InitializeAsAnchor(owner); | |
2169 } | |
2170 | 2056 |
2171 static NewSpacePage* Initialize(Heap* heap, | 2057 static NewSpacePage* Initialize(Heap* heap, Address start, |
2172 Address start, | |
2173 SemiSpace* semi_space); | 2058 SemiSpace* semi_space); |
2174 | 2059 |
2175 // Intialize a fake NewSpacePage used as sentinel at the ends | 2060 // Intialize a fake NewSpacePage used as sentinel at the ends |
2176 // of a doubly-linked list of real NewSpacePages. | 2061 // of a doubly-linked list of real NewSpacePages. |
2177 // Only uses the prev/next links, and sets flags to not be in new-space. | 2062 // Only uses the prev/next links, and sets flags to not be in new-space. |
2178 void InitializeAsAnchor(SemiSpace* owner); | 2063 void InitializeAsAnchor(SemiSpace* owner); |
2179 | 2064 |
2180 friend class SemiSpace; | 2065 friend class SemiSpace; |
2181 friend class SemiSpaceIterator; | 2066 friend class SemiSpaceIterator; |
2182 }; | 2067 }; |
2183 | 2068 |
2184 | 2069 |
2185 // ----------------------------------------------------------------------------- | 2070 // ----------------------------------------------------------------------------- |
2186 // SemiSpace in young generation | 2071 // SemiSpace in young generation |
2187 // | 2072 // |
2188 // A semispace is a contiguous chunk of memory holding page-like memory | 2073 // A semispace is a contiguous chunk of memory holding page-like memory |
2189 // chunks. The mark-compact collector uses the memory of the first page in | 2074 // chunks. The mark-compact collector uses the memory of the first page in |
2190 // the from space as a marking stack when tracing live objects. | 2075 // the from space as a marking stack when tracing live objects. |
2191 | 2076 |
2192 class SemiSpace : public Space { | 2077 class SemiSpace : public Space { |
2193 public: | 2078 public: |
2194 // Constructor. | 2079 // Constructor. |
2195 SemiSpace(Heap* heap, SemiSpaceId semispace) | 2080 SemiSpace(Heap* heap, SemiSpaceId semispace) |
2196 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), | 2081 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), |
2197 start_(NULL), | 2082 start_(NULL), |
2198 age_mark_(NULL), | 2083 age_mark_(NULL), |
2199 id_(semispace), | 2084 id_(semispace), |
2200 anchor_(this), | 2085 anchor_(this), |
2201 current_page_(NULL) { } | 2086 current_page_(NULL) {} |
2202 | 2087 |
2203 // Sets up the semispace using the given chunk. | 2088 // Sets up the semispace using the given chunk. |
2204 void SetUp(Address start, int initial_capacity, int maximum_capacity); | 2089 void SetUp(Address start, int initial_capacity, int maximum_capacity); |
2205 | 2090 |
2206 // Tear down the space. Heap memory was not allocated by the space, so it | 2091 // Tear down the space. Heap memory was not allocated by the space, so it |
2207 // is not deallocated here. | 2092 // is not deallocated here. |
2208 void TearDown(); | 2093 void TearDown(); |
2209 | 2094 |
2210 // True if the space has been set up but not torn down. | 2095 // True if the space has been set up but not torn down. |
2211 bool HasBeenSetUp() { return start_ != NULL; } | 2096 bool HasBeenSetUp() { return start_ != NULL; } |
2212 | 2097 |
2213 // Grow the semispace to the new capacity. The new capacity | 2098 // Grow the semispace to the new capacity. The new capacity |
2214 // requested must be larger than the current capacity and less than | 2099 // requested must be larger than the current capacity and less than |
2215 // the maximum capacity. | 2100 // the maximum capacity. |
2216 bool GrowTo(int new_capacity); | 2101 bool GrowTo(int new_capacity); |
2217 | 2102 |
2218 // Shrinks the semispace to the new capacity. The new capacity | 2103 // Shrinks the semispace to the new capacity. The new capacity |
2219 // requested must be more than the amount of used memory in the | 2104 // requested must be more than the amount of used memory in the |
2220 // semispace and less than the current capacity. | 2105 // semispace and less than the current capacity. |
2221 bool ShrinkTo(int new_capacity); | 2106 bool ShrinkTo(int new_capacity); |
2222 | 2107 |
2223 // Returns the start address of the first page of the space. | 2108 // Returns the start address of the first page of the space. |
2224 Address space_start() { | 2109 Address space_start() { |
2225 DCHECK(anchor_.next_page() != &anchor_); | 2110 DCHECK(anchor_.next_page() != &anchor_); |
2226 return anchor_.next_page()->area_start(); | 2111 return anchor_.next_page()->area_start(); |
2227 } | 2112 } |
2228 | 2113 |
2229 // Returns the start address of the current page of the space. | 2114 // Returns the start address of the current page of the space. |
2230 Address page_low() { | 2115 Address page_low() { return current_page_->area_start(); } |
2231 return current_page_->area_start(); | |
2232 } | |
2233 | 2116 |
2234 // Returns one past the end address of the space. | 2117 // Returns one past the end address of the space. |
2235 Address space_end() { | 2118 Address space_end() { return anchor_.prev_page()->area_end(); } |
2236 return anchor_.prev_page()->area_end(); | |
2237 } | |
2238 | 2119 |
2239 // Returns one past the end address of the current page of the space. | 2120 // Returns one past the end address of the current page of the space. |
2240 Address page_high() { | 2121 Address page_high() { return current_page_->area_end(); } |
2241 return current_page_->area_end(); | |
2242 } | |
2243 | 2122 |
2244 bool AdvancePage() { | 2123 bool AdvancePage() { |
2245 NewSpacePage* next_page = current_page_->next_page(); | 2124 NewSpacePage* next_page = current_page_->next_page(); |
2246 if (next_page == anchor()) return false; | 2125 if (next_page == anchor()) return false; |
2247 current_page_ = next_page; | 2126 current_page_ = next_page; |
2248 return true; | 2127 return true; |
2249 } | 2128 } |
2250 | 2129 |
2251 // Resets the space to using the first page. | 2130 // Resets the space to using the first page. |
2252 void Reset(); | 2131 void Reset(); |
2253 | 2132 |
2254 // Age mark accessors. | 2133 // Age mark accessors. |
2255 Address age_mark() { return age_mark_; } | 2134 Address age_mark() { return age_mark_; } |
2256 void set_age_mark(Address mark); | 2135 void set_age_mark(Address mark); |
2257 | 2136 |
2258 // True if the address is in the address range of this semispace (not | 2137 // True if the address is in the address range of this semispace (not |
2259 // necessarily below the allocation pointer). | 2138 // necessarily below the allocation pointer). |
2260 bool Contains(Address a) { | 2139 bool Contains(Address a) { |
2261 return (reinterpret_cast<uintptr_t>(a) & address_mask_) | 2140 return (reinterpret_cast<uintptr_t>(a) & address_mask_) == |
2262 == reinterpret_cast<uintptr_t>(start_); | 2141 reinterpret_cast<uintptr_t>(start_); |
2263 } | 2142 } |
2264 | 2143 |
2265 // True if the object is a heap object in the address range of this | 2144 // True if the object is a heap object in the address range of this |
2266 // semispace (not necessarily below the allocation pointer). | 2145 // semispace (not necessarily below the allocation pointer). |
2267 bool Contains(Object* o) { | 2146 bool Contains(Object* o) { |
2268 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_; | 2147 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_; |
2269 } | 2148 } |
2270 | 2149 |
2271 // If we don't have these here then SemiSpace will be abstract. However | 2150 // If we don't have these here then SemiSpace will be abstract. However |
2272 // they should never be called. | 2151 // they should never be called. |
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2344 uintptr_t object_expected_; | 2223 uintptr_t object_expected_; |
2345 | 2224 |
2346 bool committed_; | 2225 bool committed_; |
2347 SemiSpaceId id_; | 2226 SemiSpaceId id_; |
2348 | 2227 |
2349 NewSpacePage anchor_; | 2228 NewSpacePage anchor_; |
2350 NewSpacePage* current_page_; | 2229 NewSpacePage* current_page_; |
2351 | 2230 |
2352 friend class SemiSpaceIterator; | 2231 friend class SemiSpaceIterator; |
2353 friend class NewSpacePageIterator; | 2232 friend class NewSpacePageIterator; |
| 2233 |
2354 public: | 2234 public: |
2355 TRACK_MEMORY("SemiSpace") | 2235 TRACK_MEMORY("SemiSpace") |
2356 }; | 2236 }; |
2357 | 2237 |
2358 | 2238 |
2359 // A SemiSpaceIterator is an ObjectIterator that iterates over the active | 2239 // A SemiSpaceIterator is an ObjectIterator that iterates over the active |
2360 // semispace of the heap's new space. It iterates over the objects in the | 2240 // semispace of the heap's new space. It iterates over the objects in the |
2361 // semispace from a given start address (defaulting to the bottom of the | 2241 // semispace from a given start address (defaulting to the bottom of the |
2362 // semispace) to the top of the semispace. New objects allocated after the | 2242 // semispace) to the top of the semispace. New objects allocated after the |
2363 // iterator is created are not iterated. | 2243 // iterator is created are not iterated. |
(...skipping 27 matching lines...) Expand all Loading... |
2391 int size = (size_func_ == NULL) ? object->Size() : size_func_(object); | 2271 int size = (size_func_ == NULL) ? object->Size() : size_func_(object); |
2392 | 2272 |
2393 current_ += size; | 2273 current_ += size; |
2394 return object; | 2274 return object; |
2395 } | 2275 } |
2396 | 2276 |
2397 // Implementation of the ObjectIterator functions. | 2277 // Implementation of the ObjectIterator functions. |
2398 virtual HeapObject* next_object() { return Next(); } | 2278 virtual HeapObject* next_object() { return Next(); } |
2399 | 2279 |
2400 private: | 2280 private: |
2401 void Initialize(Address start, | 2281 void Initialize(Address start, Address end, HeapObjectCallback size_func); |
2402 Address end, | |
2403 HeapObjectCallback size_func); | |
2404 | 2282 |
2405 // The current iteration point. | 2283 // The current iteration point. |
2406 Address current_; | 2284 Address current_; |
2407 // The end of iteration. | 2285 // The end of iteration. |
2408 Address limit_; | 2286 Address limit_; |
2409 // The callback function. | 2287 // The callback function. |
2410 HeapObjectCallback size_func_; | 2288 HeapObjectCallback size_func_; |
2411 }; | 2289 }; |
2412 | 2290 |
2413 | 2291 |
(...skipping 28 matching lines...) Expand all Loading... |
2442 // ----------------------------------------------------------------------------- | 2320 // ----------------------------------------------------------------------------- |
2443 // The young generation space. | 2321 // The young generation space. |
2444 // | 2322 // |
2445 // The new space consists of a contiguous pair of semispaces. It simply | 2323 // The new space consists of a contiguous pair of semispaces. It simply |
2446 // forwards most functions to the appropriate semispace. | 2324 // forwards most functions to the appropriate semispace. |
2447 | 2325 |
2448 class NewSpace : public Space { | 2326 class NewSpace : public Space { |
2449 public: | 2327 public: |
2450 // Constructor. | 2328 // Constructor. |
2451 explicit NewSpace(Heap* heap) | 2329 explicit NewSpace(Heap* heap) |
2452 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), | 2330 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), |
2453 to_space_(heap, kToSpace), | 2331 to_space_(heap, kToSpace), |
2454 from_space_(heap, kFromSpace), | 2332 from_space_(heap, kFromSpace), |
2455 reservation_(), | 2333 reservation_(), |
2456 inline_allocation_limit_step_(0) {} | 2334 inline_allocation_limit_step_(0) {} |
2457 | 2335 |
2458 // Sets up the new space using the given chunk. | 2336 // Sets up the new space using the given chunk. |
2459 bool SetUp(int reserved_semispace_size_, int max_semi_space_size); | 2337 bool SetUp(int reserved_semispace_size_, int max_semi_space_size); |
2460 | 2338 |
2461 // Tears down the space. Heap memory was not allocated by the space, so it | 2339 // Tears down the space. Heap memory was not allocated by the space, so it |
2462 // is not deallocated here. | 2340 // is not deallocated here. |
2463 void TearDown(); | 2341 void TearDown(); |
2464 | 2342 |
2465 // True if the space has been set up but not torn down. | 2343 // True if the space has been set up but not torn down. |
2466 bool HasBeenSetUp() { | 2344 bool HasBeenSetUp() { |
2467 return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp(); | 2345 return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp(); |
2468 } | 2346 } |
2469 | 2347 |
2470 // Flip the pair of spaces. | 2348 // Flip the pair of spaces. |
2471 void Flip(); | 2349 void Flip(); |
2472 | 2350 |
2473 // Grow the capacity of the semispaces. Assumes that they are not at | 2351 // Grow the capacity of the semispaces. Assumes that they are not at |
2474 // their maximum capacity. | 2352 // their maximum capacity. |
2475 void Grow(); | 2353 void Grow(); |
2476 | 2354 |
2477 // Shrink the capacity of the semispaces. | 2355 // Shrink the capacity of the semispaces. |
2478 void Shrink(); | 2356 void Shrink(); |
2479 | 2357 |
2480 // True if the address or object lies in the address range of either | 2358 // True if the address or object lies in the address range of either |
2481 // semispace (not necessarily below the allocation pointer). | 2359 // semispace (not necessarily below the allocation pointer). |
2482 bool Contains(Address a) { | 2360 bool Contains(Address a) { |
2483 return (reinterpret_cast<uintptr_t>(a) & address_mask_) | 2361 return (reinterpret_cast<uintptr_t>(a) & address_mask_) == |
2484 == reinterpret_cast<uintptr_t>(start_); | 2362 reinterpret_cast<uintptr_t>(start_); |
2485 } | 2363 } |
2486 | 2364 |
2487 bool Contains(Object* o) { | 2365 bool Contains(Object* o) { |
2488 Address a = reinterpret_cast<Address>(o); | 2366 Address a = reinterpret_cast<Address>(o); |
2489 return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_; | 2367 return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_; |
2490 } | 2368 } |
2491 | 2369 |
2492 // Return the allocated bytes in the active semispace. | 2370 // Return the allocated bytes in the active semispace. |
2493 virtual intptr_t Size() { | 2371 virtual intptr_t Size() { |
2494 return pages_used_ * NewSpacePage::kAreaSize + | 2372 return pages_used_ * NewSpacePage::kAreaSize + |
2495 static_cast<int>(top() - to_space_.page_low()); | 2373 static_cast<int>(top() - to_space_.page_low()); |
2496 } | 2374 } |
2497 | 2375 |
2498 // The same, but returning an int. We have to have the one that returns | 2376 // The same, but returning an int. We have to have the one that returns |
2499 // intptr_t because it is inherited, but if we know we are dealing with the | 2377 // intptr_t because it is inherited, but if we know we are dealing with the |
2500 // new space, which can't get as big as the other spaces then this is useful: | 2378 // new space, which can't get as big as the other spaces then this is useful: |
2501 int SizeAsInt() { return static_cast<int>(Size()); } | 2379 int SizeAsInt() { return static_cast<int>(Size()); } |
2502 | 2380 |
2503 // Return the current capacity of a semispace. | 2381 // Return the current capacity of a semispace. |
2504 intptr_t EffectiveCapacity() { | 2382 intptr_t EffectiveCapacity() { |
2505 SLOW_DCHECK(to_space_.Capacity() == from_space_.Capacity()); | 2383 SLOW_DCHECK(to_space_.Capacity() == from_space_.Capacity()); |
2506 return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize; | 2384 return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize; |
2507 } | 2385 } |
2508 | 2386 |
2509 // Return the current capacity of a semispace. | 2387 // Return the current capacity of a semispace. |
2510 intptr_t Capacity() { | 2388 intptr_t Capacity() { |
2511 DCHECK(to_space_.Capacity() == from_space_.Capacity()); | 2389 DCHECK(to_space_.Capacity() == from_space_.Capacity()); |
2512 return to_space_.Capacity(); | 2390 return to_space_.Capacity(); |
2513 } | 2391 } |
2514 | 2392 |
2515 // Return the total amount of memory committed for new space. | 2393 // Return the total amount of memory committed for new space. |
2516 intptr_t CommittedMemory() { | 2394 intptr_t CommittedMemory() { |
2517 if (from_space_.is_committed()) return 2 * Capacity(); | 2395 if (from_space_.is_committed()) return 2 * Capacity(); |
2518 return Capacity(); | 2396 return Capacity(); |
2519 } | 2397 } |
2520 | 2398 |
2521 // Return the total amount of memory committed for new space. | 2399 // Return the total amount of memory committed for new space. |
2522 intptr_t MaximumCommittedMemory() { | 2400 intptr_t MaximumCommittedMemory() { |
2523 return to_space_.MaximumCommittedMemory() + | 2401 return to_space_.MaximumCommittedMemory() + |
2524 from_space_.MaximumCommittedMemory(); | 2402 from_space_.MaximumCommittedMemory(); |
2525 } | 2403 } |
2526 | 2404 |
2527 // Approximate amount of physical memory committed for this space. | 2405 // Approximate amount of physical memory committed for this space. |
2528 size_t CommittedPhysicalMemory(); | 2406 size_t CommittedPhysicalMemory(); |
2529 | 2407 |
2530 // Return the available bytes without growing. | 2408 // Return the available bytes without growing. |
2531 intptr_t Available() { | 2409 intptr_t Available() { return Capacity() - Size(); } |
2532 return Capacity() - Size(); | |
2533 } | |
2534 | 2410 |
2535 // Return the maximum capacity of a semispace. | 2411 // Return the maximum capacity of a semispace. |
2536 int MaximumCapacity() { | 2412 int MaximumCapacity() { |
2537 DCHECK(to_space_.MaximumCapacity() == from_space_.MaximumCapacity()); | 2413 DCHECK(to_space_.MaximumCapacity() == from_space_.MaximumCapacity()); |
2538 return to_space_.MaximumCapacity(); | 2414 return to_space_.MaximumCapacity(); |
2539 } | 2415 } |
2540 | 2416 |
2541 bool IsAtMaximumCapacity() { | 2417 bool IsAtMaximumCapacity() { return Capacity() == MaximumCapacity(); } |
2542 return Capacity() == MaximumCapacity(); | |
2543 } | |
2544 | 2418 |
2545 // Returns the initial capacity of a semispace. | 2419 // Returns the initial capacity of a semispace. |
2546 int InitialCapacity() { | 2420 int InitialCapacity() { |
2547 DCHECK(to_space_.InitialCapacity() == from_space_.InitialCapacity()); | 2421 DCHECK(to_space_.InitialCapacity() == from_space_.InitialCapacity()); |
2548 return to_space_.InitialCapacity(); | 2422 return to_space_.InitialCapacity(); |
2549 } | 2423 } |
2550 | 2424 |
2551 // Return the address of the allocation pointer in the active semispace. | 2425 // Return the address of the allocation pointer in the active semispace. |
2552 Address top() { | 2426 Address top() { |
2553 DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top())); | 2427 DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top())); |
(...skipping 29 matching lines...) Expand all Loading... |
2583 DCHECK(IsAligned(OffsetFrom(addr), kPointerSize) || | 2457 DCHECK(IsAligned(OffsetFrom(addr), kPointerSize) || |
2584 IsAligned(OffsetFrom(addr) - 1, kPointerSize)); | 2458 IsAligned(OffsetFrom(addr) - 1, kPointerSize)); |
2585 return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2; | 2459 return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2; |
2586 } | 2460 } |
2587 | 2461 |
2588 INLINE(Address MarkbitIndexToAddress(uint32_t index)) { | 2462 INLINE(Address MarkbitIndexToAddress(uint32_t index)) { |
2589 return reinterpret_cast<Address>(index << kPointerSizeLog2); | 2463 return reinterpret_cast<Address>(index << kPointerSizeLog2); |
2590 } | 2464 } |
2591 | 2465 |
2592 // The allocation top and limit address. | 2466 // The allocation top and limit address. |
2593 Address* allocation_top_address() { | 2467 Address* allocation_top_address() { return allocation_info_.top_address(); } |
2594 return allocation_info_.top_address(); | |
2595 } | |
2596 | 2468 |
2597 // The allocation limit address. | 2469 // The allocation limit address. |
2598 Address* allocation_limit_address() { | 2470 Address* allocation_limit_address() { |
2599 return allocation_info_.limit_address(); | 2471 return allocation_info_.limit_address(); |
2600 } | 2472 } |
2601 | 2473 |
2602 MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes)); | 2474 MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes)); |
2603 | 2475 |
2604 // Reset the allocation pointer to the beginning of the active semispace. | 2476 // Reset the allocation pointer to the beginning of the active semispace. |
2605 void ResetAllocationInfo(); | 2477 void ResetAllocationInfo(); |
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2725 }; | 2597 }; |
2726 | 2598 |
2727 | 2599 |
2728 // ----------------------------------------------------------------------------- | 2600 // ----------------------------------------------------------------------------- |
2729 // Old object space (excluding map objects) | 2601 // Old object space (excluding map objects) |
2730 | 2602 |
2731 class OldSpace : public PagedSpace { | 2603 class OldSpace : public PagedSpace { |
2732 public: | 2604 public: |
2733 // Creates an old space object with a given maximum capacity. | 2605 // Creates an old space object with a given maximum capacity. |
2734 // The constructor does not allocate pages from OS. | 2606 // The constructor does not allocate pages from OS. |
2735 OldSpace(Heap* heap, | 2607 OldSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id, |
2736 intptr_t max_capacity, | |
2737 AllocationSpace id, | |
2738 Executability executable) | 2608 Executability executable) |
2739 : PagedSpace(heap, max_capacity, id, executable) { | 2609 : PagedSpace(heap, max_capacity, id, executable) {} |
2740 } | |
2741 | 2610 |
2742 public: | 2611 public: |
2743 TRACK_MEMORY("OldSpace") | 2612 TRACK_MEMORY("OldSpace") |
2744 }; | 2613 }; |
2745 | 2614 |
2746 | 2615 |
2747 // For contiguous spaces, top should be in the space (or at the end) and limit | 2616 // For contiguous spaces, top should be in the space (or at the end) and limit |
2748 // should be the end of the space. | 2617 // should be the end of the space. |
2749 #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \ | 2618 #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \ |
2750 SLOW_DCHECK((space).page_low() <= (info).top() \ | 2619 SLOW_DCHECK((space).page_low() <= (info).top() && \ |
2751 && (info).top() <= (space).page_high() \ | 2620 (info).top() <= (space).page_high() && \ |
2752 && (info).limit() <= (space).page_high()) | 2621 (info).limit() <= (space).page_high()) |
2753 | 2622 |
2754 | 2623 |
2755 // ----------------------------------------------------------------------------- | 2624 // ----------------------------------------------------------------------------- |
2756 // Old space for all map objects | 2625 // Old space for all map objects |
2757 | 2626 |
2758 class MapSpace : public PagedSpace { | 2627 class MapSpace : public PagedSpace { |
2759 public: | 2628 public: |
2760 // Creates a map space object with a maximum capacity. | 2629 // Creates a map space object with a maximum capacity. |
2761 MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id) | 2630 MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id) |
2762 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), | 2631 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), |
2763 max_map_space_pages_(kMaxMapPageIndex - 1) { | 2632 max_map_space_pages_(kMaxMapPageIndex - 1) {} |
2764 } | |
2765 | 2633 |
2766 // Given an index, returns the page address. | 2634 // Given an index, returns the page address. |
2767 // TODO(1600): this limit is artifical just to keep code compilable | 2635 // TODO(1600): this limit is artifical just to keep code compilable |
2768 static const int kMaxMapPageIndex = 1 << 16; | 2636 static const int kMaxMapPageIndex = 1 << 16; |
2769 | 2637 |
2770 virtual int RoundSizeDownToObjectAlignment(int size) { | 2638 virtual int RoundSizeDownToObjectAlignment(int size) { |
2771 if (IsPowerOf2(Map::kSize)) { | 2639 if (IsPowerOf2(Map::kSize)) { |
2772 return RoundDown(size, Map::kSize); | 2640 return RoundDown(size, Map::kSize); |
2773 } else { | 2641 } else { |
2774 return (size / Map::kSize) * Map::kSize; | 2642 return (size / Map::kSize) * Map::kSize; |
(...skipping 18 matching lines...) Expand all Loading... |
2793 }; | 2661 }; |
2794 | 2662 |
2795 | 2663 |
2796 // ----------------------------------------------------------------------------- | 2664 // ----------------------------------------------------------------------------- |
2797 // Old space for simple property cell objects | 2665 // Old space for simple property cell objects |
2798 | 2666 |
2799 class CellSpace : public PagedSpace { | 2667 class CellSpace : public PagedSpace { |
2800 public: | 2668 public: |
2801 // Creates a property cell space object with a maximum capacity. | 2669 // Creates a property cell space object with a maximum capacity. |
2802 CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id) | 2670 CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id) |
2803 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) { | 2671 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {} |
2804 } | |
2805 | 2672 |
2806 virtual int RoundSizeDownToObjectAlignment(int size) { | 2673 virtual int RoundSizeDownToObjectAlignment(int size) { |
2807 if (IsPowerOf2(Cell::kSize)) { | 2674 if (IsPowerOf2(Cell::kSize)) { |
2808 return RoundDown(size, Cell::kSize); | 2675 return RoundDown(size, Cell::kSize); |
2809 } else { | 2676 } else { |
2810 return (size / Cell::kSize) * Cell::kSize; | 2677 return (size / Cell::kSize) * Cell::kSize; |
2811 } | 2678 } |
2812 } | 2679 } |
2813 | 2680 |
2814 protected: | 2681 protected: |
2815 virtual void VerifyObject(HeapObject* obj); | 2682 virtual void VerifyObject(HeapObject* obj); |
2816 | 2683 |
2817 public: | 2684 public: |
2818 TRACK_MEMORY("CellSpace") | 2685 TRACK_MEMORY("CellSpace") |
2819 }; | 2686 }; |
2820 | 2687 |
2821 | 2688 |
2822 // ----------------------------------------------------------------------------- | 2689 // ----------------------------------------------------------------------------- |
2823 // Old space for all global object property cell objects | 2690 // Old space for all global object property cell objects |
2824 | 2691 |
2825 class PropertyCellSpace : public PagedSpace { | 2692 class PropertyCellSpace : public PagedSpace { |
2826 public: | 2693 public: |
2827 // Creates a property cell space object with a maximum capacity. | 2694 // Creates a property cell space object with a maximum capacity. |
2828 PropertyCellSpace(Heap* heap, intptr_t max_capacity, | 2695 PropertyCellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id) |
2829 AllocationSpace id) | 2696 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {} |
2830 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) { | |
2831 } | |
2832 | 2697 |
2833 virtual int RoundSizeDownToObjectAlignment(int size) { | 2698 virtual int RoundSizeDownToObjectAlignment(int size) { |
2834 if (IsPowerOf2(PropertyCell::kSize)) { | 2699 if (IsPowerOf2(PropertyCell::kSize)) { |
2835 return RoundDown(size, PropertyCell::kSize); | 2700 return RoundDown(size, PropertyCell::kSize); |
2836 } else { | 2701 } else { |
2837 return (size / PropertyCell::kSize) * PropertyCell::kSize; | 2702 return (size / PropertyCell::kSize) * PropertyCell::kSize; |
2838 } | 2703 } |
2839 } | 2704 } |
2840 | 2705 |
2841 protected: | 2706 protected: |
(...skipping 22 matching lines...) Expand all Loading... |
2864 // Releases internal resources, frees objects in this space. | 2729 // Releases internal resources, frees objects in this space. |
2865 void TearDown(); | 2730 void TearDown(); |
2866 | 2731 |
2867 static intptr_t ObjectSizeFor(intptr_t chunk_size) { | 2732 static intptr_t ObjectSizeFor(intptr_t chunk_size) { |
2868 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0; | 2733 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0; |
2869 return chunk_size - Page::kPageSize - Page::kObjectStartOffset; | 2734 return chunk_size - Page::kPageSize - Page::kObjectStartOffset; |
2870 } | 2735 } |
2871 | 2736 |
2872 // Shared implementation of AllocateRaw, AllocateRawCode and | 2737 // Shared implementation of AllocateRaw, AllocateRawCode and |
2873 // AllocateRawFixedArray. | 2738 // AllocateRawFixedArray. |
2874 MUST_USE_RESULT AllocationResult AllocateRaw(int object_size, | 2739 MUST_USE_RESULT AllocationResult |
2875 Executability executable); | 2740 AllocateRaw(int object_size, Executability executable); |
2876 | 2741 |
2877 // Available bytes for objects in this space. | 2742 // Available bytes for objects in this space. |
2878 inline intptr_t Available(); | 2743 inline intptr_t Available(); |
2879 | 2744 |
2880 virtual intptr_t Size() { | 2745 virtual intptr_t Size() { return size_; } |
2881 return size_; | |
2882 } | |
2883 | 2746 |
2884 virtual intptr_t SizeOfObjects() { | 2747 virtual intptr_t SizeOfObjects() { return objects_size_; } |
2885 return objects_size_; | |
2886 } | |
2887 | 2748 |
2888 intptr_t MaximumCommittedMemory() { | 2749 intptr_t MaximumCommittedMemory() { return maximum_committed_; } |
2889 return maximum_committed_; | |
2890 } | |
2891 | 2750 |
2892 intptr_t CommittedMemory() { | 2751 intptr_t CommittedMemory() { return Size(); } |
2893 return Size(); | |
2894 } | |
2895 | 2752 |
2896 // Approximate amount of physical memory committed for this space. | 2753 // Approximate amount of physical memory committed for this space. |
2897 size_t CommittedPhysicalMemory(); | 2754 size_t CommittedPhysicalMemory(); |
2898 | 2755 |
2899 int PageCount() { | 2756 int PageCount() { return page_count_; } |
2900 return page_count_; | |
2901 } | |
2902 | 2757 |
2903 // Finds an object for a given address, returns a Smi if it is not found. | 2758 // Finds an object for a given address, returns a Smi if it is not found. |
2904 // The function iterates through all objects in this space, may be slow. | 2759 // The function iterates through all objects in this space, may be slow. |
2905 Object* FindObject(Address a); | 2760 Object* FindObject(Address a); |
2906 | 2761 |
2907 // Finds a large object page containing the given address, returns NULL | 2762 // Finds a large object page containing the given address, returns NULL |
2908 // if such a page doesn't exist. | 2763 // if such a page doesn't exist. |
2909 LargePage* FindPage(Address a); | 2764 LargePage* FindPage(Address a); |
2910 | 2765 |
2911 // Frees unmarked objects. | 2766 // Frees unmarked objects. |
(...skipping 18 matching lines...) Expand all Loading... |
2930 #endif | 2785 #endif |
2931 // Checks whether an address is in the object area in this space. It | 2786 // Checks whether an address is in the object area in this space. It |
2932 // iterates all objects in the space. May be slow. | 2787 // iterates all objects in the space. May be slow. |
2933 bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); } | 2788 bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); } |
2934 | 2789 |
2935 private: | 2790 private: |
2936 intptr_t max_capacity_; | 2791 intptr_t max_capacity_; |
2937 intptr_t maximum_committed_; | 2792 intptr_t maximum_committed_; |
2938 // The head of the linked list of large object chunks. | 2793 // The head of the linked list of large object chunks. |
2939 LargePage* first_page_; | 2794 LargePage* first_page_; |
2940 intptr_t size_; // allocated bytes | 2795 intptr_t size_; // allocated bytes |
2941 int page_count_; // number of chunks | 2796 int page_count_; // number of chunks |
2942 intptr_t objects_size_; // size of objects | 2797 intptr_t objects_size_; // size of objects |
2943 // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them | 2798 // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them |
2944 HashMap chunk_map_; | 2799 HashMap chunk_map_; |
2945 | 2800 |
2946 friend class LargeObjectIterator; | 2801 friend class LargeObjectIterator; |
2947 | 2802 |
2948 public: | 2803 public: |
2949 TRACK_MEMORY("LargeObjectSpace") | 2804 TRACK_MEMORY("LargeObjectSpace") |
2950 }; | 2805 }; |
2951 | 2806 |
2952 | 2807 |
2953 class LargeObjectIterator: public ObjectIterator { | 2808 class LargeObjectIterator : public ObjectIterator { |
2954 public: | 2809 public: |
2955 explicit LargeObjectIterator(LargeObjectSpace* space); | 2810 explicit LargeObjectIterator(LargeObjectSpace* space); |
2956 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func); | 2811 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func); |
2957 | 2812 |
2958 HeapObject* Next(); | 2813 HeapObject* Next(); |
2959 | 2814 |
2960 // implementation of ObjectIterator. | 2815 // implementation of ObjectIterator. |
2961 virtual HeapObject* next_object() { return Next(); } | 2816 virtual HeapObject* next_object() { return Next(); } |
2962 | 2817 |
2963 private: | 2818 private: |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3007 return NULL; | 2862 return NULL; |
3008 default: | 2863 default: |
3009 break; | 2864 break; |
3010 } | 2865 } |
3011 UNREACHABLE(); | 2866 UNREACHABLE(); |
3012 return NULL; | 2867 return NULL; |
3013 } | 2868 } |
3014 | 2869 |
3015 | 2870 |
3016 private: | 2871 private: |
3017 enum State { | 2872 enum State { kOldPointerState, kMapState, kLargeObjectState, kFinishedState }; |
3018 kOldPointerState, | |
3019 kMapState, | |
3020 kLargeObjectState, | |
3021 kFinishedState | |
3022 }; | |
3023 State state_; | 2873 State state_; |
3024 PageIterator old_pointer_iterator_; | 2874 PageIterator old_pointer_iterator_; |
3025 PageIterator map_iterator_; | 2875 PageIterator map_iterator_; |
3026 LargeObjectIterator lo_iterator_; | 2876 LargeObjectIterator lo_iterator_; |
3027 }; | 2877 }; |
3028 | 2878 |
3029 | 2879 |
3030 #ifdef DEBUG | 2880 #ifdef DEBUG |
3031 struct CommentStatistic { | 2881 struct CommentStatistic { |
3032 const char* comment; | 2882 const char* comment; |
3033 int size; | 2883 int size; |
3034 int count; | 2884 int count; |
3035 void Clear() { | 2885 void Clear() { |
3036 comment = NULL; | 2886 comment = NULL; |
3037 size = 0; | 2887 size = 0; |
3038 count = 0; | 2888 count = 0; |
3039 } | 2889 } |
3040 // Must be small, since an iteration is used for lookup. | 2890 // Must be small, since an iteration is used for lookup. |
3041 static const int kMaxComments = 64; | 2891 static const int kMaxComments = 64; |
3042 }; | 2892 }; |
3043 #endif | 2893 #endif |
| 2894 } |
| 2895 } // namespace v8::internal |
3044 | 2896 |
3045 | 2897 #endif // V8_HEAP_SPACES_H_ |
3046 } } // namespace v8::internal | |
3047 | |
3048 #endif // V8_SPACES_H_ | |
OLD | NEW |