Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(66)

Side by Side Diff: src/spaces.h

Issue 11028027: Revert trunk to bleeding_edge at r12484 (Closed) Base URL: https://v8.googlecode.com/svn/trunk
Patch Set: Created 8 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/snapshot-empty.cc ('k') | src/spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 266 matching lines...) Expand 10 before | Expand all | Expand 10 after
277 CellPrinter printer; 277 CellPrinter printer;
278 for (int i = 0; i < CellsCount(); i++) { 278 for (int i = 0; i < CellsCount(); i++) {
279 printer.Print(i, cells()[i]); 279 printer.Print(i, cells()[i]);
280 } 280 }
281 printer.Flush(); 281 printer.Flush();
282 PrintF("\n"); 282 PrintF("\n");
283 } 283 }
284 284
285 bool IsClean() { 285 bool IsClean() {
286 for (int i = 0; i < CellsCount(); i++) { 286 for (int i = 0; i < CellsCount(); i++) {
287 if (cells()[i] != 0) { 287 if (cells()[i] != 0) return false;
288 return false;
289 }
290 } 288 }
291 return true; 289 return true;
292 } 290 }
293 }; 291 };
294 292
295 293
296 class SkipList; 294 class SkipList;
297 class SlotsBuffer; 295 class SlotsBuffer;
298 296
299 // MemoryChunk represents a memory region owned by a specific space. 297 // MemoryChunk represents a memory region owned by a specific space.
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
368 return addr >= area_start() && addr < area_end(); 366 return addr >= area_start() && addr < area_end();
369 } 367 }
370 368
371 // Checks whether addr can be a limit of addresses in this page. 369 // Checks whether addr can be a limit of addresses in this page.
372 // It's a limit if it's in the page, or if it's just after the 370 // It's a limit if it's in the page, or if it's just after the
373 // last byte of the page. 371 // last byte of the page.
374 bool ContainsLimit(Address addr) { 372 bool ContainsLimit(Address addr) {
375 return addr >= area_start() && addr <= area_end(); 373 return addr >= area_start() && addr <= area_end();
376 } 374 }
377 375
378 // Every n write barrier invocations we go to runtime even though
379 // we could have handled it in generated code. This lets us check
380 // whether we have hit the limit and should do some more marking.
381 static const int kWriteBarrierCounterGranularity = 500;
382
383 enum MemoryChunkFlags { 376 enum MemoryChunkFlags {
384 IS_EXECUTABLE, 377 IS_EXECUTABLE,
385 ABOUT_TO_BE_FREED, 378 ABOUT_TO_BE_FREED,
386 POINTERS_TO_HERE_ARE_INTERESTING, 379 POINTERS_TO_HERE_ARE_INTERESTING,
387 POINTERS_FROM_HERE_ARE_INTERESTING, 380 POINTERS_FROM_HERE_ARE_INTERESTING,
388 SCAN_ON_SCAVENGE, 381 SCAN_ON_SCAVENGE,
389 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. 382 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
390 IN_TO_SPACE, // All pages in new space has one of these two set. 383 IN_TO_SPACE, // All pages in new space has one of these two set.
391 NEW_SPACE_BELOW_AGE_MARK, 384 NEW_SPACE_BELOW_AGE_MARK,
392 CONTAINS_ONLY_DATA, 385 CONTAINS_ONLY_DATA,
393 EVACUATION_CANDIDATE, 386 EVACUATION_CANDIDATE,
394 RESCAN_ON_EVACUATION, 387 RESCAN_ON_EVACUATION,
395 388
396 // Pages swept precisely can be iterated, hitting only the live objects. 389 // Pages swept precisely can be iterated, hitting only the live objects.
397 // Whereas those swept conservatively cannot be iterated over. Both flags 390 // Whereas those swept conservatively cannot be iterated over. Both flags
398 // indicate that marking bits have been cleared by the sweeper, otherwise 391 // indicate that marking bits have been cleared by the sweeper, otherwise
399 // marking bits are still intact. 392 // marking bits are still intact.
400 WAS_SWEPT_PRECISELY, 393 WAS_SWEPT_PRECISELY,
401 WAS_SWEPT_CONSERVATIVELY, 394 WAS_SWEPT_CONSERVATIVELY,
402 395
403 // Used for large objects only. Indicates that the object has been
404 // partially scanned by the incremental mark-sweep GC. Objects that have
405 // been partially scanned are marked black so that the write barrier
406 // triggers for them, and they are counted as live bytes. If the mutator
407 // writes to them they may be turned grey and subtracted from the live byte
408 // list. They move back to the marking deque either by an iteration over
409 // the large object space or in the write barrier.
410 IS_PARTIALLY_SCANNED,
411
412 // Last flag, keep at bottom. 396 // Last flag, keep at bottom.
413 NUM_MEMORY_CHUNK_FLAGS 397 NUM_MEMORY_CHUNK_FLAGS
414 }; 398 };
415 399
416 400
417 static const int kPointersToHereAreInterestingMask = 401 static const int kPointersToHereAreInterestingMask =
418 1 << POINTERS_TO_HERE_ARE_INTERESTING; 402 1 << POINTERS_TO_HERE_ARE_INTERESTING;
419 403
420 static const int kPointersFromHereAreInterestingMask = 404 static const int kPointersFromHereAreInterestingMask =
421 1 << POINTERS_FROM_HERE_ARE_INTERESTING; 405 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
422 406
423 static const int kEvacuationCandidateMask = 407 static const int kEvacuationCandidateMask =
424 1 << EVACUATION_CANDIDATE; 408 1 << EVACUATION_CANDIDATE;
425 409
426 static const int kSkipEvacuationSlotsRecordingMask = 410 static const int kSkipEvacuationSlotsRecordingMask =
427 (1 << EVACUATION_CANDIDATE) | 411 (1 << EVACUATION_CANDIDATE) |
428 (1 << RESCAN_ON_EVACUATION) | 412 (1 << RESCAN_ON_EVACUATION) |
429 (1 << IN_FROM_SPACE) | 413 (1 << IN_FROM_SPACE) |
430 (1 << IN_TO_SPACE); 414 (1 << IN_TO_SPACE);
431 415
432 static const int kIsPartiallyScannedMask = 1 << IS_PARTIALLY_SCANNED;
433
434 void SetPartiallyScannedProgress(int progress) {
435 SetFlag(IS_PARTIALLY_SCANNED);
436 partially_scanned_progress_ = progress;
437 }
438
439 bool IsPartiallyScanned() {
440 return IsFlagSet(IS_PARTIALLY_SCANNED);
441 }
442
443 void SetCompletelyScanned() {
444 ClearFlag(IS_PARTIALLY_SCANNED);
445 }
446
447 int PartiallyScannedProgress() {
448 ASSERT(IsPartiallyScanned());
449 return partially_scanned_progress_;
450 }
451 416
452 void SetFlag(int flag) { 417 void SetFlag(int flag) {
453 flags_ |= static_cast<uintptr_t>(1) << flag; 418 flags_ |= static_cast<uintptr_t>(1) << flag;
454 } 419 }
455 420
456 void ClearFlag(int flag) { 421 void ClearFlag(int flag) {
457 flags_ &= ~(static_cast<uintptr_t>(1) << flag); 422 flags_ &= ~(static_cast<uintptr_t>(1) << flag);
458 } 423 }
459 424
460 void SetFlagTo(int flag, bool value) { 425 void SetFlagTo(int flag, bool value) {
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
496 live_byte_count_ + by); 461 live_byte_count_ + by);
497 } 462 }
498 live_byte_count_ += by; 463 live_byte_count_ += by;
499 ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_); 464 ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
500 } 465 }
501 int LiveBytes() { 466 int LiveBytes() {
502 ASSERT(static_cast<unsigned>(live_byte_count_) <= size_); 467 ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
503 return live_byte_count_; 468 return live_byte_count_;
504 } 469 }
505 470
506 int write_barrier_counter() {
507 return static_cast<int>(write_barrier_counter_);
508 }
509
510 void set_write_barrier_counter(int counter) {
511 write_barrier_counter_ = counter;
512 }
513
514
515 static void IncrementLiveBytesFromGC(Address address, int by) { 471 static void IncrementLiveBytesFromGC(Address address, int by) {
516 MemoryChunk::FromAddress(address)->IncrementLiveBytes(by); 472 MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
517 } 473 }
518 474
519 static void IncrementLiveBytesFromMutator(Address address, int by); 475 static void IncrementLiveBytesFromMutator(Address address, int by);
520 476
521 static const intptr_t kAlignment = 477 static const intptr_t kAlignment =
522 (static_cast<uintptr_t>(1) << kPageSizeBits); 478 (static_cast<uintptr_t>(1) << kPageSizeBits);
523 479
524 static const intptr_t kAlignmentMask = kAlignment - 1; 480 static const intptr_t kAlignmentMask = kAlignment - 1;
525 481
526 static const intptr_t kSizeOffset = kPointerSize + kPointerSize; 482 static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
527 483
528 static const intptr_t kLiveBytesOffset = 484 static const intptr_t kLiveBytesOffset =
529 kSizeOffset + kPointerSize + kPointerSize + kPointerSize + 485 kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
530 kPointerSize + kPointerSize + 486 kPointerSize + kPointerSize +
531 kPointerSize + kPointerSize + kPointerSize + kIntSize; 487 kPointerSize + kPointerSize + kPointerSize + kIntSize;
532 488
533 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize; 489 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
534 490
535 static const size_t kWriteBarrierCounterOffset = 491 static const size_t kHeaderSize =
536 kSlotsBufferOffset + kPointerSize + kPointerSize; 492 kSlotsBufferOffset + kPointerSize + kPointerSize;
537 static const size_t kPartiallyScannedProgress =
538 kWriteBarrierCounterOffset + kPointerSize;
539
540 // Actually the partially_scanned_progress_ member is only an int, but on
541 // 64 bit the size of MemoryChunk gets rounded up to a 64 bit size so we
542 // have to have the header start kPointerSize after the
543 // partially_scanned_progress_ member.
544 static const size_t kHeaderSize = kPartiallyScannedProgress + kPointerSize;
545 493
546 static const int kBodyOffset = 494 static const int kBodyOffset =
547 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize)); 495 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize));
548 496
549 // The start offset of the object area in a page. Aligned to both maps and 497 // The start offset of the object area in a page. Aligned to both maps and
550 // code alignment to be suitable for both. Also aligned to 32 words because 498 // code alignment to be suitable for both. Also aligned to 32 words because
551 // the marking bitmap is arranged in 32 bit chunks. 499 // the marking bitmap is arranged in 32 bit chunks.
552 static const int kObjectStartAlignment = 32 * kPointerSize; 500 static const int kObjectStartAlignment = 32 * kPointerSize;
553 static const int kObjectStartOffset = kBodyOffset - 1 + 501 static const int kObjectStartOffset = kBodyOffset - 1 +
554 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); 502 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
670 // in a fixed array. 618 // in a fixed array.
671 Address owner_; 619 Address owner_;
672 Heap* heap_; 620 Heap* heap_;
673 // Used by the store buffer to keep track of which pages to mark scan-on- 621 // Used by the store buffer to keep track of which pages to mark scan-on-
674 // scavenge. 622 // scavenge.
675 int store_buffer_counter_; 623 int store_buffer_counter_;
676 // Count of bytes marked black on page. 624 // Count of bytes marked black on page.
677 int live_byte_count_; 625 int live_byte_count_;
678 SlotsBuffer* slots_buffer_; 626 SlotsBuffer* slots_buffer_;
679 SkipList* skip_list_; 627 SkipList* skip_list_;
680 intptr_t write_barrier_counter_;
681 int partially_scanned_progress_;
682 628
683 static MemoryChunk* Initialize(Heap* heap, 629 static MemoryChunk* Initialize(Heap* heap,
684 Address base, 630 Address base,
685 size_t size, 631 size_t size,
686 Address area_start, 632 Address area_start,
687 Address area_end, 633 Address area_end,
688 Executability executable, 634 Executability executable,
689 Space* owner); 635 Space* owner);
690 636
691 friend class MemoryAllocator; 637 friend class MemoryAllocator;
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after
837 return RoundDown(size, kCodeAlignment); 783 return RoundDown(size, kCodeAlignment);
838 } else { 784 } else {
839 return RoundDown(size, kPointerSize); 785 return RoundDown(size, kPointerSize);
840 } 786 }
841 } 787 }
842 788
843 #ifdef DEBUG 789 #ifdef DEBUG
844 virtual void Print() = 0; 790 virtual void Print() = 0;
845 #endif 791 #endif
846 792
793 // After calling this we can allocate a certain number of bytes using only
794 // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
795 // without using freelists or causing a GC. This is used by partial
796 // snapshots. It returns true of space was reserved or false if a GC is
797 // needed. For paged spaces the space requested must include the space wasted
798 // at the end of each when allocating linearly.
799 virtual bool ReserveSpace(int bytes) = 0;
800
847 private: 801 private:
848 Heap* heap_; 802 Heap* heap_;
849 AllocationSpace id_; 803 AllocationSpace id_;
850 Executability executable_; 804 Executability executable_;
851 }; 805 };
852 806
853 807
854 // ---------------------------------------------------------------------------- 808 // ----------------------------------------------------------------------------
855 // All heap objects containing executable code (code objects) must be allocated 809 // All heap objects containing executable code (code objects) must be allocated
856 // from a 2 GB range of memory, so that they can call each other using 32-bit 810 // from a 2 GB range of memory, so that they can call each other using 32-bit
(...skipping 500 matching lines...) Expand 10 before | Expand all | Expand 10 after
1357 // functions. 1311 // functions.
1358 void set_size(Heap* heap, int size_in_bytes); 1312 void set_size(Heap* heap, int size_in_bytes);
1359 1313
1360 // Accessors for the next field. 1314 // Accessors for the next field.
1361 inline FreeListNode* next(); 1315 inline FreeListNode* next();
1362 inline FreeListNode** next_address(); 1316 inline FreeListNode** next_address();
1363 inline void set_next(FreeListNode* next); 1317 inline void set_next(FreeListNode* next);
1364 1318
1365 inline void Zap(); 1319 inline void Zap();
1366 1320
1367 static inline FreeListNode* cast(MaybeObject* maybe) {
1368 ASSERT(!maybe->IsFailure());
1369 return reinterpret_cast<FreeListNode*>(maybe);
1370 }
1371
1372 private: 1321 private:
1373 static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize); 1322 static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
1374 1323
1375 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode); 1324 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1376 }; 1325 };
1377 1326
1378 1327
1379 // The free list for the old space. The free list is organized in such a way 1328 // The free list for the old space. The free list is organized in such a way
1380 // as to encourage objects allocated around the same time to be near each 1329 // as to encourage objects allocated around the same time to be near each
1381 // other. The normal way to allocate is intended to be by bumping a 'top' 1330 // other. The normal way to allocate is intended to be by bumping a 'top'
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
1424 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); 1373 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1425 1374
1426 #ifdef DEBUG 1375 #ifdef DEBUG
1427 void Zap(); 1376 void Zap();
1428 static intptr_t SumFreeList(FreeListNode* node); 1377 static intptr_t SumFreeList(FreeListNode* node);
1429 static int FreeListLength(FreeListNode* cur); 1378 static int FreeListLength(FreeListNode* cur);
1430 intptr_t SumFreeLists(); 1379 intptr_t SumFreeLists();
1431 bool IsVeryLong(); 1380 bool IsVeryLong();
1432 #endif 1381 #endif
1433 1382
1434 // Used after booting the VM.
1435 void RepairLists(Heap* heap);
1436
1437 struct SizeStats { 1383 struct SizeStats {
1438 intptr_t Total() { 1384 intptr_t Total() {
1439 return small_size_ + medium_size_ + large_size_ + huge_size_; 1385 return small_size_ + medium_size_ + large_size_ + huge_size_;
1440 } 1386 }
1441 1387
1442 intptr_t small_size_; 1388 intptr_t small_size_;
1443 intptr_t medium_size_; 1389 intptr_t medium_size_;
1444 intptr_t large_size_; 1390 intptr_t large_size_;
1445 intptr_t huge_size_; 1391 intptr_t huge_size_;
1446 }; 1392 };
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
1507 // Checks whether an object/address is in this space. 1453 // Checks whether an object/address is in this space.
1508 inline bool Contains(Address a); 1454 inline bool Contains(Address a);
1509 bool Contains(HeapObject* o) { return Contains(o->address()); } 1455 bool Contains(HeapObject* o) { return Contains(o->address()); }
1510 1456
1511 // Given an address occupied by a live object, return that object if it is 1457 // Given an address occupied by a live object, return that object if it is
1512 // in this space, or Failure::Exception() if it is not. The implementation 1458 // in this space, or Failure::Exception() if it is not. The implementation
1513 // iterates over objects in the page containing the address, the cost is 1459 // iterates over objects in the page containing the address, the cost is
1514 // linear in the number of objects in the page. It may be slow. 1460 // linear in the number of objects in the page. It may be slow.
1515 MUST_USE_RESULT MaybeObject* FindObject(Address addr); 1461 MUST_USE_RESULT MaybeObject* FindObject(Address addr);
1516 1462
1517 // During boot the free_space_map is created, and afterwards we may need
1518 // to write it into the free list nodes that were already created.
1519 virtual void RepairFreeListsAfterBoot();
1520
1521 // Prepares for a mark-compact GC. 1463 // Prepares for a mark-compact GC.
1522 virtual void PrepareForMarkCompact(); 1464 virtual void PrepareForMarkCompact();
1523 1465
1524 // Current capacity without growing (Size() + Available()). 1466 // Current capacity without growing (Size() + Available()).
1525 intptr_t Capacity() { return accounting_stats_.Capacity(); } 1467 intptr_t Capacity() { return accounting_stats_.Capacity(); }
1526 1468
1527 // Total amount of memory committed for this space. For paged 1469 // Total amount of memory committed for this space. For paged
1528 // spaces this equals the capacity. 1470 // spaces this equals the capacity.
1529 intptr_t CommittedMemory() { return Capacity(); } 1471 intptr_t CommittedMemory() { return Capacity(); }
1530 1472
(...skipping 1172 matching lines...) Expand 10 before | Expand all | Expand 10 after
2703 } 2645 }
2704 // Must be small, since an iteration is used for lookup. 2646 // Must be small, since an iteration is used for lookup.
2705 static const int kMaxComments = 64; 2647 static const int kMaxComments = 64;
2706 }; 2648 };
2707 #endif 2649 #endif
2708 2650
2709 2651
2710 } } // namespace v8::internal 2652 } } // namespace v8::internal
2711 2653
2712 #endif // V8_SPACES_H_ 2654 #endif // V8_SPACES_H_
OLDNEW
« no previous file with comments | « src/snapshot-empty.cc ('k') | src/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698