Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/spaces.h

Issue 316133002: Move atomic ops and related files to base library (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: updates Created 6 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_SPACES_H_ 5 #ifndef V8_SPACES_H_
6 #define V8_SPACES_H_ 6 #define V8_SPACES_H_
7 7
8 #include "src/allocation.h" 8 #include "src/allocation.h"
9 #include "src/hashmap.h" 9 #include "src/hashmap.h"
10 #include "src/list.h" 10 #include "src/list.h"
(...skipping 273 matching lines...) Expand 10 before | Expand all | Expand 10 after
284 } 284 }
285 285
286 // Only works for addresses in pointer spaces, not data or code spaces. 286 // Only works for addresses in pointer spaces, not data or code spaces.
287 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr); 287 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
288 288
289 Address address() { return reinterpret_cast<Address>(this); } 289 Address address() { return reinterpret_cast<Address>(this); }
290 290
291 bool is_valid() { return address() != NULL; } 291 bool is_valid() { return address() != NULL; }
292 292
293 MemoryChunk* next_chunk() const { 293 MemoryChunk* next_chunk() const {
294 return reinterpret_cast<MemoryChunk*>(Acquire_Load(&next_chunk_)); 294 return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&next_chunk_));
Jakob Kummerow 2014/06/05 11:49:06 IWYU?
295 } 295 }
296 296
297 MemoryChunk* prev_chunk() const { 297 MemoryChunk* prev_chunk() const {
298 return reinterpret_cast<MemoryChunk*>(Acquire_Load(&prev_chunk_)); 298 return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&prev_chunk_));
299 } 299 }
300 300
301 void set_next_chunk(MemoryChunk* next) { 301 void set_next_chunk(MemoryChunk* next) {
302 Release_Store(&next_chunk_, reinterpret_cast<AtomicWord>(next)); 302 base::Release_Store(&next_chunk_, reinterpret_cast<base::AtomicWord>(next));
303 } 303 }
304 304
305 void set_prev_chunk(MemoryChunk* prev) { 305 void set_prev_chunk(MemoryChunk* prev) {
306 Release_Store(&prev_chunk_, reinterpret_cast<AtomicWord>(prev)); 306 base::Release_Store(&prev_chunk_, reinterpret_cast<base::AtomicWord>(prev));
307 } 307 }
308 308
309 Space* owner() const { 309 Space* owner() const {
310 if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) == 310 if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
311 kFailureTag) { 311 kFailureTag) {
312 return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) - 312 return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
313 kFailureTag); 313 kFailureTag);
314 } else { 314 } else {
315 return NULL; 315 return NULL;
316 } 316 }
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after
454 // PARALLEL_SWEEPING_PENDING - This page is ready for parallel sweeping. 454 // PARALLEL_SWEEPING_PENDING - This page is ready for parallel sweeping.
455 enum ParallelSweepingState { 455 enum ParallelSweepingState {
456 PARALLEL_SWEEPING_DONE, 456 PARALLEL_SWEEPING_DONE,
457 PARALLEL_SWEEPING_FINALIZE, 457 PARALLEL_SWEEPING_FINALIZE,
458 PARALLEL_SWEEPING_IN_PROGRESS, 458 PARALLEL_SWEEPING_IN_PROGRESS,
459 PARALLEL_SWEEPING_PENDING 459 PARALLEL_SWEEPING_PENDING
460 }; 460 };
461 461
462 ParallelSweepingState parallel_sweeping() { 462 ParallelSweepingState parallel_sweeping() {
463 return static_cast<ParallelSweepingState>( 463 return static_cast<ParallelSweepingState>(
464 Acquire_Load(&parallel_sweeping_)); 464 base::Acquire_Load(&parallel_sweeping_));
465 } 465 }
466 466
467 void set_parallel_sweeping(ParallelSweepingState state) { 467 void set_parallel_sweeping(ParallelSweepingState state) {
468 Release_Store(&parallel_sweeping_, state); 468 base::Release_Store(&parallel_sweeping_, state);
469 } 469 }
470 470
471 bool TryParallelSweeping() { 471 bool TryParallelSweeping() {
472 return Acquire_CompareAndSwap(&parallel_sweeping_, 472 return base::Acquire_CompareAndSwap(
473 PARALLEL_SWEEPING_PENDING, 473 &parallel_sweeping_, PARALLEL_SWEEPING_PENDING,
474 PARALLEL_SWEEPING_IN_PROGRESS) == 474 PARALLEL_SWEEPING_IN_PROGRESS) == PARALLEL_SWEEPING_PENDING;
475 PARALLEL_SWEEPING_PENDING;
476 } 475 }
477 476
478 // Manage live byte count (count of bytes known to be live, 477 // Manage live byte count (count of bytes known to be live,
479 // because they are marked black). 478 // because they are marked black).
480 void ResetLiveBytes() { 479 void ResetLiveBytes() {
481 if (FLAG_gc_verbose) { 480 if (FLAG_gc_verbose) {
482 PrintF("ResetLiveBytes:%p:%x->0\n", 481 PrintF("ResetLiveBytes:%p:%x->0\n",
483 static_cast<void*>(this), live_byte_count_); 482 static_cast<void*>(this), live_byte_count_);
484 } 483 }
485 live_byte_count_ = 0; 484 live_byte_count_ = 0;
(...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after
700 SlotsBuffer* slots_buffer_; 699 SlotsBuffer* slots_buffer_;
701 SkipList* skip_list_; 700 SkipList* skip_list_;
702 intptr_t write_barrier_counter_; 701 intptr_t write_barrier_counter_;
703 // Used by the incremental marker to keep track of the scanning progress in 702 // Used by the incremental marker to keep track of the scanning progress in
704 // large objects that have a progress bar and are scanned in increments. 703 // large objects that have a progress bar and are scanned in increments.
705 int progress_bar_; 704 int progress_bar_;
706 // Assuming the initial allocation on a page is sequential, 705 // Assuming the initial allocation on a page is sequential,
707 // count highest number of bytes ever allocated on the page. 706 // count highest number of bytes ever allocated on the page.
708 int high_water_mark_; 707 int high_water_mark_;
709 708
710 AtomicWord parallel_sweeping_; 709 base::AtomicWord parallel_sweeping_;
711 710
712 // PagedSpace free-list statistics. 711 // PagedSpace free-list statistics.
713 intptr_t available_in_small_free_list_; 712 intptr_t available_in_small_free_list_;
714 intptr_t available_in_medium_free_list_; 713 intptr_t available_in_medium_free_list_;
715 intptr_t available_in_large_free_list_; 714 intptr_t available_in_large_free_list_;
716 intptr_t available_in_huge_free_list_; 715 intptr_t available_in_huge_free_list_;
717 intptr_t non_available_small_blocks_; 716 intptr_t non_available_small_blocks_;
718 717
719 static MemoryChunk* Initialize(Heap* heap, 718 static MemoryChunk* Initialize(Heap* heap,
720 Address base, 719 Address base,
721 size_t size, 720 size_t size,
722 Address area_start, 721 Address area_start,
723 Address area_end, 722 Address area_end,
724 Executability executable, 723 Executability executable,
725 Space* owner); 724 Space* owner);
726 725
727 private: 726 private:
728 // next_chunk_ holds a pointer of type MemoryChunk 727 // next_chunk_ holds a pointer of type MemoryChunk
729 AtomicWord next_chunk_; 728 base::AtomicWord next_chunk_;
730 // prev_chunk_ holds a pointer of type MemoryChunk 729 // prev_chunk_ holds a pointer of type MemoryChunk
731 AtomicWord prev_chunk_; 730 base::AtomicWord prev_chunk_;
732 731
733 friend class MemoryAllocator; 732 friend class MemoryAllocator;
734 }; 733 };
735 734
736 735
737 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); 736 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
738 737
739 738
740 // ----------------------------------------------------------------------------- 739 // -----------------------------------------------------------------------------
741 // A page is a memory chunk of a size 1MB. Large object pages may be larger. 740 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
(...skipping 783 matching lines...) Expand 10 before | Expand all | Expand 10 after
1525 1524
1526 FreeListNode* PickNodeFromList(int *node_size); 1525 FreeListNode* PickNodeFromList(int *node_size);
1527 FreeListNode* PickNodeFromList(int size_in_bytes, int *node_size); 1526 FreeListNode* PickNodeFromList(int size_in_bytes, int *node_size);
1528 1527
1529 intptr_t EvictFreeListItemsInList(Page* p); 1528 intptr_t EvictFreeListItemsInList(Page* p);
1530 bool ContainsPageFreeListItemsInList(Page* p); 1529 bool ContainsPageFreeListItemsInList(Page* p);
1531 1530
1532 void RepairFreeList(Heap* heap); 1531 void RepairFreeList(Heap* heap);
1533 1532
1534 FreeListNode* top() const { 1533 FreeListNode* top() const {
1535 return reinterpret_cast<FreeListNode*>(NoBarrier_Load(&top_)); 1534 return reinterpret_cast<FreeListNode*>(base::NoBarrier_Load(&top_));
1536 } 1535 }
1537 1536
1538 void set_top(FreeListNode* top) { 1537 void set_top(FreeListNode* top) {
1539 NoBarrier_Store(&top_, reinterpret_cast<AtomicWord>(top)); 1538 base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top));
1540 } 1539 }
1541 1540
1542 FreeListNode** GetEndAddress() { return &end_; } 1541 FreeListNode** GetEndAddress() { return &end_; }
1543 FreeListNode* end() const { return end_; } 1542 FreeListNode* end() const { return end_; }
1544 void set_end(FreeListNode* end) { end_ = end; } 1543 void set_end(FreeListNode* end) { end_ = end; }
1545 1544
1546 int* GetAvailableAddress() { return &available_; } 1545 int* GetAvailableAddress() { return &available_; }
1547 int available() const { return available_; } 1546 int available() const { return available_; }
1548 void set_available(int available) { available_ = available; } 1547 void set_available(int available) { available_ = available; }
1549 1548
1550 Mutex* mutex() { return &mutex_; } 1549 Mutex* mutex() { return &mutex_; }
1551 1550
1552 bool IsEmpty() { 1551 bool IsEmpty() {
1553 return top() == 0; 1552 return top() == 0;
1554 } 1553 }
1555 1554
1556 #ifdef DEBUG 1555 #ifdef DEBUG
1557 intptr_t SumFreeList(); 1556 intptr_t SumFreeList();
1558 int FreeListLength(); 1557 int FreeListLength();
1559 #endif 1558 #endif
1560 1559
1561 private: 1560 private:
1562 // top_ points to the top FreeListNode* in the free list category. 1561 // top_ points to the top FreeListNode* in the free list category.
1563 AtomicWord top_; 1562 base::AtomicWord top_;
1564 FreeListNode* end_; 1563 FreeListNode* end_;
1565 Mutex mutex_; 1564 Mutex mutex_;
1566 1565
1567 // Total available bytes in all blocks of this free list category. 1566 // Total available bytes in all blocks of this free list category.
1568 int available_; 1567 int available_;
1569 }; 1568 };
1570 1569
1571 1570
1572 // The free list for the old space. The free list is organized in such a way 1571 // The free list for the old space. The free list is organized in such a way
1573 // as to encourage objects allocated around the same time to be near each 1572 // as to encourage objects allocated around the same time to be near each
(...skipping 1426 matching lines...) Expand 10 before | Expand all | Expand 10 after
3000 } 2999 }
3001 // Must be small, since an iteration is used for lookup. 3000 // Must be small, since an iteration is used for lookup.
3002 static const int kMaxComments = 64; 3001 static const int kMaxComments = 64;
3003 }; 3002 };
3004 #endif 3003 #endif
3005 3004
3006 3005
3007 } } // namespace v8::internal 3006 } } // namespace v8::internal
3008 3007
3009 #endif // V8_SPACES_H_ 3008 #endif // V8_SPACES_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698