Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(3)

Side by Side Diff: src/spaces.h

Issue 6321008: Introduce conservative sweeping. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/gc
Patch Set: Created 9 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2006-2010 the V8 project authors. All rights reserved. 1 // Copyright 2006-2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after
128 } 128 }
129 129
130 int CellsCount() { 130 int CellsCount() {
131 return StorageDescriptor::CellsCount(this->address()); 131 return StorageDescriptor::CellsCount(this->address());
132 } 132 }
133 133
134 static int SizeFor(int cells_count) { 134 static int SizeFor(int cells_count) {
135 return sizeof(CellType)*cells_count; 135 return sizeof(CellType)*cells_count;
136 } 136 }
137 137
138 INLINE(static uint32_t Index2Cell(uint32_t index)) {
139 return index >> kBitsPerCellLog2;
140 }
141
142 INLINE(static uint32_t Index2Bit(uint32_t index)) {
143 return index & kBitIndexMask;
144 }
145
146 INLINE(static uint32_t Cell2Index(uint32_t index)) {
147 return index << kBitsPerCellLog2;
148 }
149
150 INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
151 return (index + kBitIndexMask) & ~kBitIndexMask;
152 }
153
138 INLINE(CellType* cells()) { 154 INLINE(CellType* cells()) {
139 return reinterpret_cast<CellType*>(this); 155 return reinterpret_cast<CellType*>(this);
140 } 156 }
141 157
142 INLINE(Address address()) { 158 INLINE(Address address()) {
143 return reinterpret_cast<Address>(this); 159 return reinterpret_cast<Address>(this);
144 } 160 }
145 161
146 INLINE(static Bitmap* FromAddress(Address addr)) { 162 INLINE(static Bitmap* FromAddress(Address addr)) {
147 return reinterpret_cast<Bitmap*>(addr); 163 return reinterpret_cast<Bitmap*>(addr);
(...skipping 30 matching lines...) Expand all
178 194
179 INLINE(void ClearRange(uint32_t start, uint32_t size)) { 195 INLINE(void ClearRange(uint32_t start, uint32_t size)) {
180 const uint32_t end = start + size; 196 const uint32_t end = start + size;
181 const uint32_t start_cell = start >> kBitsPerCellLog2; 197 const uint32_t start_cell = start >> kBitsPerCellLog2;
182 const uint32_t end_cell = end >> kBitsPerCellLog2; 198 const uint32_t end_cell = end >> kBitsPerCellLog2;
183 199
184 const uint32_t start_mask = (-1) << (start & kBitIndexMask); 200 const uint32_t start_mask = (-1) << (start & kBitIndexMask);
185 const uint32_t end_mask = (1 << (end & kBitIndexMask)) - 1; 201 const uint32_t end_mask = (1 << (end & kBitIndexMask)) - 1;
186 202
187 ASSERT(static_cast<int>(start_cell) < CellsCount()); 203 ASSERT(static_cast<int>(start_cell) < CellsCount());
188 ASSERT(static_cast<int>(end_cell) < CellsCount()); 204 ASSERT(static_cast<int>(end_cell) < CellsCount() ||
205 (end_mask == 0 && static_cast<int>(end_cell) == CellsCount()));
189 206
190 if (start_cell == end_cell) { 207 if (start_cell == end_cell) {
191 cells()[start_cell] &= ~(start_mask & end_mask); 208 cells()[start_cell] &= ~(start_mask & end_mask);
192 } else { 209 } else {
193 cells()[start_cell] &= ~start_mask; 210 cells()[start_cell] &= ~start_mask;
194 if (end_mask != 0) cells()[end_cell] &= ~end_mask; 211 if (end_mask != 0) cells()[end_cell] &= ~end_mask;
195 212
196 for (uint32_t cell = start_cell + 1, last_cell = end_cell - 1; 213 for (uint32_t cell = start_cell + 1, last_cell = end_cell - 1;
197 cell <= last_cell; 214 cell <= last_cell;
198 cell++) { 215 cell++) {
199 cells()[cell] = 0; 216 cells()[cell] = 0;
200 } 217 }
201 } 218 }
202 } 219 }
203 220
204 INLINE(void Clear()) { 221 INLINE(void Clear()) {
205 for (int i = 0; i < CellsCount(); i++) cells()[i] = 0; 222 for (int i = 0; i < CellsCount(); i++) cells()[i] = 0;
206 } 223 }
207 224
208 static void PrintWord(const uint32_t& word, const char* sep = " ") { 225 static void PrintWord(uint32_t word, uint32_t himask = 0) {
209 for (uint32_t mask = 1; mask != 0; mask <<= 1) { 226 for (uint32_t mask = 1; mask != 0; mask <<= 1) {
227 if ((mask & himask) != 0) PrintF("[");
210 PrintF((mask & word) ? "1" : "0"); 228 PrintF((mask & word) ? "1" : "0");
229 if ((mask & himask) != 0) PrintF("]");
211 } 230 }
212 PrintF("%s", sep);
213 } 231 }
214 232
233 class CellPrinter {
234 public:
235 CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
236
237 void Print(uint32_t pos, uint32_t cell) {
238 if (cell == seq_type) {
239 seq_length++;
240 return;
241 }
242
243 Flush();
244
245 if (IsSeq(cell)) {
246 seq_start = pos;
247 seq_length = 0;
248 seq_type = cell;
249 return;
250 }
251
252 PrintF("%d: ", pos);
253 PrintWord(cell);
254 PrintF("\n");
255 }
256
257 void Flush() {
258 if (seq_length > 0) {
259 PrintF("%d: %dx%d\n",
260 seq_start,
261 seq_type == 0 ? 0 : 1,
262 seq_length * kBitsPerCell);
263 seq_length = 0;
264 }
265 }
266
267 static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
268 private:
269 uint32_t seq_start;
270 uint32_t seq_type;
271 uint32_t seq_length;
272 };
273
215 void Print() { 274 void Print() {
275 CellPrinter printer;
216 for (int i = 0; i < CellsCount(); i++) { 276 for (int i = 0; i < CellsCount(); i++) {
217 PrintWord(cells()[i]); 277 printer.Print(i, cells()[i]);
218 } 278 }
279 printer.Flush();
219 PrintF("\n"); 280 PrintF("\n");
220 } 281 }
221 282
222 bool IsClean() { 283 bool IsClean() {
223 for (int i = 0; i < CellsCount(); i++) { 284 for (int i = 0; i < CellsCount(); i++) {
224 if (cells()[i] != 0) return false; 285 if (cells()[i] != 0) return false;
225 } 286 }
226 return true; 287 return true;
227 } 288 }
228 }; 289 };
(...skipping 21 matching lines...) Expand all
250 311
251 Address body() { return address() + kBodyOffset; } 312 Address body() { return address() + kBodyOffset; }
252 313
253 int body_size() { return size() - kBodyOffset; } 314 int body_size() { return size() - kBodyOffset; }
254 315
255 enum MemoryChunkFlags { 316 enum MemoryChunkFlags {
256 IS_EXECUTABLE, 317 IS_EXECUTABLE,
257 NUM_MEMORY_CHUNK_FLAGS 318 NUM_MEMORY_CHUNK_FLAGS
258 }; 319 };
259 320
260 void SetFlag(MemoryChunkFlags flag) { 321 void SetFlag(int flag) {
261 flags_ |= 1 << flag; 322 flags_ |= 1 << flag;
262 } 323 }
263 324
264 void ClearFlag(MemoryChunkFlags flag) { 325 void ClearFlag(int flag) {
265 flags_ &= ~(1 << flag); 326 flags_ &= ~(1 << flag);
266 } 327 }
267 328
268 bool IsFlagSet(MemoryChunkFlags flag) { 329 bool IsFlagSet(int flag) {
269 return (flags_ & (1 << flag)) != 0; 330 return (flags_ & (1 << flag)) != 0;
270 } 331 }
271 332
272 static const intptr_t kAlignment = (1 << kPageSizeBits); 333 static const intptr_t kAlignment = (1 << kPageSizeBits);
273 334
274 static const intptr_t kAlignmentMask = kAlignment - 1; 335 static const intptr_t kAlignmentMask = kAlignment - 1;
275 336
276 static const size_t kHeaderSize = kPointerSize + kPointerSize + kPointerSize + 337 static const size_t kHeaderSize = kPointerSize + kPointerSize + kPointerSize +
277 kPointerSize + kPointerSize; 338 kPointerSize + kPointerSize + kPointerSize;
278 339
279 static const size_t kMarksBitmapLength = 340 static const size_t kMarksBitmapLength =
280 (1 << kPageSizeBits) >> (kPointerSizeLog2); 341 (1 << kPageSizeBits) >> (kPointerSizeLog2);
281 342
282 static const size_t kMarksBitmapSize = 343 static const size_t kMarksBitmapSize =
283 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2); 344 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
284 345
285 static const int kBodyOffset = 346 static const int kBodyOffset =
286 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + kMarksBitmapSize)); 347 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + kMarksBitmapSize));
287 348
288 size_t size() const { return size_; } 349 size_t size() const { return size_; }
289 350
290 Executability executable() { 351 Executability executable() {
291 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; 352 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
292 } 353 }
293 354
294 // --------------------------------------------------------------------- 355 // ---------------------------------------------------------------------
295 // Markbits support 356 // Markbits support
357
296 class BitmapStorageDescriptor { 358 class BitmapStorageDescriptor {
297 public: 359 public:
298 INLINE(static int CellsCount(Address addr)) { 360 INLINE(static int CellsCount(Address addr)) {
299 return Bitmap<BitmapStorageDescriptor>::CellsForLength( 361 return Bitmap<BitmapStorageDescriptor>::CellsForLength(
300 kMarksBitmapLength); 362 kMarksBitmapLength);
301 } 363 }
302 }; 364 };
303 365
304 typedef Bitmap<BitmapStorageDescriptor> MarkbitsBitmap; 366 typedef Bitmap<BitmapStorageDescriptor> MarkbitsBitmap;
305 367
306 inline MarkbitsBitmap* markbits() { 368 inline MarkbitsBitmap* markbits() {
307 return MarkbitsBitmap::FromAddress(address() + kHeaderSize); 369 return MarkbitsBitmap::FromAddress(address() + kHeaderSize);
308 } 370 }
309 371
372 void PrintMarkbits() { markbits()->Print(); }
373
310 inline uint32_t Address2Markbit(Address addr) { 374 inline uint32_t Address2Markbit(Address addr) {
311 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2; 375 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
312 } 376 }
313 377
314 inline static uint32_t FastAddress2Markbit(Address addr) { 378 inline static uint32_t FastAddress2Markbit(Address addr) {
315 const intptr_t offset = 379 const intptr_t offset =
316 reinterpret_cast<intptr_t>(addr) & kAlignmentMask; 380 reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
317 381
318 return static_cast<uint32_t>(offset) >> kPointerSizeLog2; 382 return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
319 } 383 }
(...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after
462 // The start offset of the object area in a page. Aligned to both maps and 526 // The start offset of the object area in a page. Aligned to both maps and
463 // code alignment to be suitable for both. 527 // code alignment to be suitable for both.
464 static const int kObjectStartOffset = kBodyOffset; 528 static const int kObjectStartOffset = kBodyOffset;
465 529
466 // Object area size in bytes. 530 // Object area size in bytes.
467 static const int kObjectAreaSize = kPageSize - kObjectStartOffset; 531 static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
468 532
469 // Maximum object size that fits in a page. 533 // Maximum object size that fits in a page.
470 static const int kMaxHeapObjectSize = kObjectAreaSize; 534 static const int kMaxHeapObjectSize = kObjectAreaSize;
471 535
536 static const int kFirstUsedCell =
537 (kBodyOffset/kPointerSize) >> MarkbitsBitmap::kBitsPerCellLog2;
538
539 static const int kLastUsedCell =
540 ((kPageSize - kPointerSize)/kPointerSize) >>
541 MarkbitsBitmap::kBitsPerCellLog2;
542
543
472 #ifdef ENABLE_CARDMARKING_WRITE_BARRIER 544 #ifdef ENABLE_CARDMARKING_WRITE_BARRIER
473 static const int kDirtyFlagOffset = 2 * kPointerSize; 545 static const int kDirtyFlagOffset = 2 * kPointerSize;
474 static const int kRegionSizeLog2 = 8; 546 static const int kRegionSizeLog2 = 8;
475 static const int kRegionSize = 1 << kRegionSizeLog2; 547 static const int kRegionSize = 1 << kRegionSizeLog2;
476 static const intptr_t kRegionAlignmentMask = (kRegionSize - 1); 548 static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
477 549
478 STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt); 550 STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
479 #endif 551 #endif
480 552
481 enum PageFlag { 553 enum PageFlag {
482 // Page allocation watermark was bumped by preallocation during scavenge. 554 // Page allocation watermark was bumped by preallocation during scavenge.
483 // Correct watermark can be retrieved by CachedAllocationWatermark() method 555 // Correct watermark can be retrieved by CachedAllocationWatermark() method
484 WATERMARK_INVALIDATED = NUM_MEMORY_CHUNK_FLAGS, 556 WATERMARK_INVALIDATED = NUM_MEMORY_CHUNK_FLAGS,
557 IS_CONTINIOUS,
485 NUM_PAGE_FLAGS // Must be last 558 NUM_PAGE_FLAGS // Must be last
486 }; 559 };
487 560
488 static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
489
490 // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during 561 // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
491 // scavenge we just invalidate the watermark on each old space page after 562 // scavenge we just invalidate the watermark on each old space page after
492 // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED 563 // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
493 // flag at the beginning of the next scavenge and each page becomes marked as 564 // flag at the beginning of the next scavenge and each page becomes marked as
494 // having a valid watermark. 565 // having a valid watermark.
495 // 566 //
496 // The following invariant must hold for pages in old pointer and map spaces: 567 // The following invariant must hold for pages in old pointer and map spaces:
497 // If page is in use then page is marked as having invalid watermark at 568 // If page is in use then page is marked as having invalid watermark at
498 // the beginning and at the end of any GC. 569 // the beginning and at the end of any GC.
499 // 570 //
500 // This invariant guarantees that after flipping flag meaning at the 571 // This invariant guarantees that after flipping flag meaning at the
501 // beginning of scavenge all pages in use will be marked as having valid 572 // beginning of scavenge all pages in use will be marked as having valid
502 // watermark. 573 // watermark.
503 static inline void FlipMeaningOfInvalidatedWatermarkFlag(); 574 static inline void FlipMeaningOfInvalidatedWatermarkFlag();
504 575
505 // Returns true if the page allocation watermark was not altered during 576 // Returns true if the page allocation watermark was not altered during
506 // scavenge. 577 // scavenge.
507 inline bool IsWatermarkValid(); 578 inline bool IsWatermarkValid();
508 579
509 inline void InvalidateWatermark(bool value); 580 inline void InvalidateWatermark(bool value);
510 581
511 inline void ClearGCFields(); 582 inline void ClearGCFields();
512 583
513 static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1; 584 static const int kAllocationWatermarkOffsetShift = NUM_PAGE_FLAGS;
514 static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1; 585 static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1;
515 static const uint32_t kAllocationWatermarkOffsetMask = 586 static const uint32_t kAllocationWatermarkOffsetMask =
516 ((1 << kAllocationWatermarkOffsetBits) - 1) << 587 ((1 << kAllocationWatermarkOffsetBits) - 1) <<
517 kAllocationWatermarkOffsetShift; 588 kAllocationWatermarkOffsetShift;
518 589
519 static const uint32_t kFlagsMask = 590 static const uint32_t kFlagsMask =
520 ((1 << kAllocationWatermarkOffsetShift) - 1); 591 ((1 << kAllocationWatermarkOffsetShift) - 1);
521 592
522 STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >= 593 STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
523 kAllocationWatermarkOffsetBits); 594 kAllocationWatermarkOffsetBits);
524 595
525 // This field contains the meaning of the WATERMARK_INVALIDATED flag. 596 // This field contains the meaning of the WATERMARK_INVALIDATED flag.
526 // Instead of clearing this flag from all pages we just flip 597 // Instead of clearing this flag from all pages we just flip
527 // its meaning at the beginning of a scavenge. 598 // its meaning at the beginning of a scavenge.
528 static intptr_t watermark_invalidated_mark_; 599 static intptr_t watermark_invalidated_mark_;
529 600
601 Address linearity_boundary() { return linearity_boundary_; }
602 void set_linearity_boundary(Address linearity_boundary) {
603 linearity_boundary_ = linearity_boundary;
604 }
605
530 private: 606 private:
531 static Page* Initialize(MemoryChunk* chunk) { 607 static Page* Initialize(MemoryChunk* chunk) {
532 Page* page = static_cast<Page*>(chunk); 608 Page* page = static_cast<Page*>(chunk);
533 page->allocation_watermark_ = page->body(); 609 page->allocation_watermark_ = page->body();
534 page->InvalidateWatermark(true); 610 page->InvalidateWatermark(true);
611 page->SetFlag(IS_CONTINIOUS);
535 return page; 612 return page;
536 } 613 }
537 614
538 Address allocation_watermark_; 615 Address allocation_watermark_;
616 Address linearity_boundary_;
Erik Corry 2011/01/19 13:46:48 Comment?
539 617
540 friend class MemoryAllocator; 618 friend class MemoryAllocator;
541 }; 619 };
542 620
543 STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize); 621 STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
544 622
545 class LargePage : public MemoryChunk { 623 class LargePage : public MemoryChunk {
546 public: 624 public:
547 HeapObject* GetObject() { 625 HeapObject* GetObject() {
548 return HeapObject::FromAddress(body()); 626 return HeapObject::FromAddress(body());
(...skipping 289 matching lines...) Expand 10 before | Expand all | Expand 10 after
838 public: 916 public:
839 virtual ~ObjectIterator() { } 917 virtual ~ObjectIterator() { }
840 918
841 virtual HeapObject* next_object() = 0; 919 virtual HeapObject* next_object() = 0;
842 }; 920 };
843 921
844 922
845 // ----------------------------------------------------------------------------- 923 // -----------------------------------------------------------------------------
846 // Heap object iterator in new/old/map spaces. 924 // Heap object iterator in new/old/map spaces.
847 // 925 //
848 // A HeapObjectIterator iterates objects from a given address to the 926 // A HeapObjectIterator iterates objects from the bottom of the given space of
Erik Corry 2011/01/19 13:46:48 I can't quite parse this.
Vyacheslav Egorov (Chromium) 2011/01/20 16:40:21 Done.
849 // top of a space. The given address must be below the current 927 // page given address to the top of a space. The given address must be below the
850 // allocation pointer (space top). There are some caveats. 928 // current allocation pointer (space top). There are some caveats.
851 // 929 //
852 // (1) If the space top changes upward during iteration (because of 930 // (1) If the space top changes upward during iteration (because of
853 // allocating new objects), the iterator does not iterate objects 931 // allocating new objects), the iterator does not iterate objects
854 // above the original space top. The caller must create a new 932 // above the original space top. The caller must create a new
855 // iterator starting from the old top in order to visit these new 933 // iterator starting from the old top in order to visit these new
856 // objects. 934 // objects.
857 // 935 //
858 // (2) If new objects are allocated below the original allocation top 936 // (2) If new objects are allocated below the original allocation top
859 // (e.g., free-list allocation in paged spaces), the new objects 937 // (e.g., free-list allocation in paged spaces), the new objects
860 // may or may not be iterated depending on their position with 938 // may or may not be iterated depending on their position with
861 // respect to the current point of iteration. 939 // respect to the current point of iteration.
862 // 940 //
863 // (3) The space top should not change downward during iteration, 941 // (3) The space top should not change downward during iteration,
864 // otherwise the iterator will return not-necessarily-valid 942 // otherwise the iterator will return not-necessarily-valid
865 // objects. 943 // objects.
866 944
867 class HeapObjectIterator: public ObjectIterator { 945 class HeapObjectIterator: public ObjectIterator {
868 public: 946 public:
869 // Creates a new object iterator in a given space. If a start 947 // Creates a new object iterator in a given space.
870 // address is not given, the iterator starts from the space bottom.
871 // If the size function is not given, the iterator calls the default 948 // If the size function is not given, the iterator calls the default
872 // Object::Size(). 949 // Object::Size().
873 explicit HeapObjectIterator(PagedSpace* space); 950 explicit HeapObjectIterator(PagedSpace* space);
874 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func); 951 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
875 HeapObjectIterator(PagedSpace* space, Address start);
876 HeapObjectIterator(PagedSpace* space,
877 Address start,
878 HeapObjectCallback size_func);
879 HeapObjectIterator(Page* page, HeapObjectCallback size_func); 952 HeapObjectIterator(Page* page, HeapObjectCallback size_func);
880 953
881 inline HeapObject* next() { 954 inline HeapObject* next() {
882 return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage(); 955 return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
883 } 956 }
884 957
885 // implementation of ObjectIterator. 958 // implementation of ObjectIterator.
886 virtual HeapObject* next_object() { return next(); } 959 virtual HeapObject* next_object() { return next(); }
887 960
888 private: 961 private:
889 Address cur_addr_; // current iteration point 962 Address cur_addr_; // current iteration point
890 Address end_addr_; // end iteration point 963 Address end_addr_; // end iteration point
891 Address cur_limit_; // current page limit 964 Address cur_limit_; // current page limit
892 HeapObjectCallback size_func_; // size function 965 HeapObjectCallback size_func_; // size function
893 Page* end_page_; // caches the page of the end address 966 Page* end_page_; // caches the page of the end address
894 967
895 HeapObject* FromCurrentPage() { 968 HeapObject* FromCurrentPage() {
896 ASSERT(cur_addr_ < cur_limit_); 969 ASSERT(cur_addr_ < cur_limit_);
970 HeapObject* obj = HeapObject::FromAddress(cur_addr_);
897 971
898 HeapObject* obj = HeapObject::FromAddress(cur_addr_); 972 Page* p = Page::FromAddress(cur_addr_);
899 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj); 973 if (p->IsFlagSet(Page::IS_CONTINIOUS)) {
900 ASSERT_OBJECT_SIZE(obj_size); 974 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
975 ASSERT_OBJECT_SIZE(obj_size);
901 976
902 cur_addr_ += obj_size; 977 cur_addr_ += obj_size;
903 ASSERT(cur_addr_ <= cur_limit_); 978 ASSERT(cur_addr_ <= cur_limit_);
979 } else {
980 AdvanceUsingMarkbits();
981 }
904 982
905 return obj; 983 return obj;
906 } 984 }
907 985
986 void AdvanceUsingMarkbits();
987
908 // Slow path of next, goes into the next page. 988 // Slow path of next, goes into the next page.
909 HeapObject* FromNextPage(); 989 HeapObject* FromNextPage();
910 990
911 // Initializes fields. 991 // Initializes fields.
912 void Initialize(Address start, Address end, HeapObjectCallback size_func); 992 void Initialize(Address start, Address end, HeapObjectCallback size_func);
913 993
914 #ifdef DEBUG 994 #ifdef DEBUG
915 // Verifies whether fields have valid values. 995 // Verifies whether fields have valid values.
916 void Verify(); 996 void Verify();
917 #endif 997 #endif
(...skipping 1338 matching lines...) Expand 10 before | Expand all | Expand 10 after
2256 2336
2257 private: 2337 private:
2258 LargePage* current_; 2338 LargePage* current_;
2259 HeapObjectCallback size_func_; 2339 HeapObjectCallback size_func_;
2260 }; 2340 };
2261 2341
2262 2342
2263 } } // namespace v8::internal 2343 } } // namespace v8::internal
2264 2344
2265 #endif // V8_SPACES_H_ 2345 #endif // V8_SPACES_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698