OLD | NEW |
---|---|
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_STORE_BUFFER_H_ | 5 #ifndef V8_STORE_BUFFER_H_ |
6 #define V8_STORE_BUFFER_H_ | 6 #define V8_STORE_BUFFER_H_ |
7 | 7 |
8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
9 #include "src/base/logging.h" | 9 #include "src/base/logging.h" |
10 #include "src/base/platform/platform.h" | 10 #include "src/base/platform/platform.h" |
11 #include "src/cancelable-task.h" | |
11 #include "src/globals.h" | 12 #include "src/globals.h" |
12 #include "src/heap/slot-set.h" | 13 #include "src/heap/slot-set.h" |
13 | 14 |
14 namespace v8 { | 15 namespace v8 { |
15 namespace internal { | 16 namespace internal { |
16 | 17 |
17 // Intermediate buffer that accumulates old-to-new stores from the generated | 18 // Intermediate buffer that accumulates old-to-new stores from the generated |
18 // code. On buffer overflow the slots are moved to the remembered set. | 19 // code. Moreover, it stores invalid old-to-new slots with two two entries. |
ulan
2016/10/28 09:18:02
Nit: with _two_ two entries.
Hannes Payer (out of office)
2016/10/28 09:58:32
Done.
| |
20 // The first is a tagged address of the start of the invalid range, the second | |
21 // one is the end address of the invalid range or null if there is just one slot | |
22 // that needs to be removed from the remembered set. On buffer overflow the | |
23 // slots are moved to the remembered set. | |
19 class StoreBuffer { | 24 class StoreBuffer { |
20 public: | 25 public: |
21 static const int kStoreBufferSize = 1 << (14 + kPointerSizeLog2); | 26 static const int kStoreBufferSize = 1 << (14 + kPointerSizeLog2); |
22 static const int kStoreBufferMask = kStoreBufferSize - 1; | 27 static const int kStoreBufferMask = kStoreBufferSize - 1; |
28 static const int kStoreBuffers = 2; | |
29 static const intptr_t kDeletionTag = 1; | |
23 | 30 |
24 static void StoreBufferOverflow(Isolate* isolate); | 31 static void StoreBufferOverflow(Isolate* isolate); |
25 | 32 |
26 explicit StoreBuffer(Heap* heap); | 33 explicit StoreBuffer(Heap* heap); |
27 void SetUp(); | 34 void SetUp(); |
28 void TearDown(); | 35 void TearDown(); |
29 | 36 |
30 // Used to add entries from generated code. | 37 // Used to add entries from generated code. |
31 inline Address* top_address() { return reinterpret_cast<Address*>(&top_); } | 38 inline Address* top_address() { return reinterpret_cast<Address*>(&top_); } |
32 | 39 |
33 void MoveEntriesToRememberedSet(); | 40 // Moves entries from a specific store buffer to the remembered set. This |
41 // method takes a lock. | |
42 void MoveEntriesToRememberedSet(int index); | |
43 | |
44 // This method ensures that all used store buffer entries are transfered to | |
45 // the remembered set. | |
46 void MoveAllEntriesToRememberedSet(); | |
47 | |
48 inline bool IsDeletionAddress(Address address) const { | |
49 return reinterpret_cast<intptr_t>(address) & kDeletionTag; | |
50 } | |
51 | |
52 inline Address MarkDeletionAddress(Address address) { | |
53 return reinterpret_cast<Address>(reinterpret_cast<intptr_t>(address) | | |
54 kDeletionTag); | |
55 } | |
56 | |
57 inline Address UnmarkDeletionAddress(Address address) { | |
58 return reinterpret_cast<Address>(reinterpret_cast<intptr_t>(address) & | |
59 ~kDeletionTag); | |
60 } | |
61 | |
62 // If we only want to delete a single slot, end should be set to null which | |
63 // will be written into the second field. When processing the store buffer | |
64 // the more efficient Remove method will be called in this case. | |
65 void DeleteEntry(Address start, Address end = nullptr); | |
66 | |
67 // Used by the concurrent processing thread to transfer entries from the | |
68 // store buffer to the remembered set. | |
69 void ConcurrentlyProcessStoreBuffer(); | |
70 | |
71 bool Empty() { | |
72 for (int i = 0; i < kStoreBuffers; i++) { | |
73 if (lazy_top_[i].Value()) { | |
74 return false; | |
75 } | |
76 } | |
77 return top_ == start_[current_]; | |
78 } | |
34 | 79 |
35 private: | 80 private: |
81 // There are two store buffers. If one store buffer fills up, the main thread | |
82 // publishes the top pointer of the store buffer that needs processing in its | |
83 // global lazy_top_ field. After that it start the concurrent processing | |
84 // thread. The concurrent processing thread iterates over the lazy_top_ area | |
85 // and will look for a set top pointer. If one is set, it will grab the given | |
86 // mutex and transfer its entries to the remembered set. If the concurrent | |
87 // thread does not make progress, the main thread will perform the work. | |
88 // Important: there is an ordering constrained. The store buffer with the | |
89 // older entries has to be processed first. | |
90 class Task : public CancelableTask { | |
91 public: | |
92 Task(Isolate* isolate, StoreBuffer* store_buffer) | |
93 : CancelableTask(isolate), store_buffer_(store_buffer) {} | |
94 virtual ~Task() {} | |
95 | |
96 private: | |
97 void RunInternal() override { | |
98 store_buffer_->ConcurrentlyProcessStoreBuffer(); | |
99 } | |
100 StoreBuffer* store_buffer_; | |
101 DISALLOW_COPY_AND_ASSIGN(Task); | |
102 }; | |
103 | |
104 void FlipStoreBuffers(); | |
105 | |
36 Heap* heap_; | 106 Heap* heap_; |
37 | 107 |
38 Address* top_; | 108 Address* top_; |
39 | 109 |
40 // The start and the limit of the buffer that contains store slots | 110 // The start and the limit of the buffer that contains store slots |
41 // added from the generated code. | 111 // added from the generated code. We have to chunks of store buffers. |
ulan
2016/10/28 09:18:02
typo: We have _two_
Hannes Payer (out of office)
2016/10/28 09:58:32
Done.
| |
42 Address* start_; | 112 // Whenever one fills up, it will be put into a concurrent processing queue |
43 Address* limit_; | 113 // and the other empty one will be used in the meantime. |
114 Address* start_[kStoreBuffers]; | |
115 Address* limit_[kStoreBuffers]; | |
116 base::AtomicValue<Address*> lazy_top_[kStoreBuffers]; | |
ulan
2016/10/28 09:18:02
Is it an invariant that at most one lazy_top_ is n
Hannes Payer (out of office)
2016/10/28 09:58:32
Yes, that is correct. Done.
| |
117 base::Mutex mutex_; | |
118 | |
119 base::AtomicValue<bool> task_running_; | |
120 | |
121 // Points to the current buffer in use. | |
122 int current_; | |
44 | 123 |
45 base::VirtualMemory* virtual_memory_; | 124 base::VirtualMemory* virtual_memory_; |
46 }; | 125 }; |
47 | 126 |
48 } // namespace internal | 127 } // namespace internal |
49 } // namespace v8 | 128 } // namespace v8 |
50 | 129 |
51 #endif // V8_STORE_BUFFER_H_ | 130 #endif // V8_STORE_BUFFER_H_ |
OLD | NEW |