OLD | NEW |
1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // Note 1: Any file that includes this one should include object-macros-undef.h | 5 // Note 1: Any file that includes this one should include object-macros-undef.h |
6 // at the bottom. | 6 // at the bottom. |
7 | 7 |
8 // Note 2: This file is deliberately missing the include guards (the undeffing | 8 // Note 2: This file is deliberately missing the include guards (the undeffing |
9 // approach wouldn't work otherwise). | 9 // approach wouldn't work otherwise). |
10 | 10 |
| 11 // The accessors with RELAXED_, ACQUIRE_, and RELEASE_ prefixes should be used |
| 12 // for fields that can be written to and read from multiple threads at the same |
| 13 // time. See comments in src/base/atomicops.h for the memory ordering sematics. |
| 14 |
11 #define DECL_BOOLEAN_ACCESSORS(name) \ | 15 #define DECL_BOOLEAN_ACCESSORS(name) \ |
12 inline bool name() const; \ | 16 inline bool name() const; \ |
13 inline void set_##name(bool value); | 17 inline void set_##name(bool value); |
14 | 18 |
15 #define DECL_INT_ACCESSORS(name) \ | 19 #define DECL_INT_ACCESSORS(name) \ |
16 inline int name() const; \ | 20 inline int name() const; \ |
17 inline void set_##name(int value); | 21 inline void set_##name(int value); |
18 | 22 |
19 #define DECL_ACCESSORS(name, type) \ | 23 #define DECL_ACCESSORS(name, type) \ |
20 inline type* name() const; \ | 24 inline type* name() const; \ |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
73 | 77 |
74 #define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \ | 78 #define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \ |
75 int holder::synchronized_##name() const { \ | 79 int holder::synchronized_##name() const { \ |
76 Object* value = ACQUIRE_READ_FIELD(this, offset); \ | 80 Object* value = ACQUIRE_READ_FIELD(this, offset); \ |
77 return Smi::cast(value)->value(); \ | 81 return Smi::cast(value)->value(); \ |
78 } \ | 82 } \ |
79 void holder::synchronized_set_##name(int value) { \ | 83 void holder::synchronized_set_##name(int value) { \ |
80 RELEASE_WRITE_FIELD(this, offset, Smi::FromInt(value)); \ | 84 RELEASE_WRITE_FIELD(this, offset, Smi::FromInt(value)); \ |
81 } | 85 } |
82 | 86 |
83 #define NOBARRIER_SMI_ACCESSORS(holder, name, offset) \ | 87 #define RELAXED_SMI_ACCESSORS(holder, name, offset) \ |
84 int holder::nobarrier_##name() const { \ | 88 int holder::relaxed_read_##name() const { \ |
85 Object* value = NOBARRIER_READ_FIELD(this, offset); \ | 89 Object* value = RELAXED_READ_FIELD(this, offset); \ |
86 return Smi::cast(value)->value(); \ | 90 return Smi::cast(value)->value(); \ |
87 } \ | 91 } \ |
88 void holder::nobarrier_set_##name(int value) { \ | 92 void holder::relaxed_write_##name(int value) { \ |
89 NOBARRIER_WRITE_FIELD(this, offset, Smi::FromInt(value)); \ | 93 RELAXED_WRITE_FIELD(this, offset, Smi::FromInt(value)); \ |
90 } | 94 } |
91 | 95 |
92 #define BOOL_GETTER(holder, field, name, offset) \ | 96 #define BOOL_GETTER(holder, field, name, offset) \ |
93 bool holder::name() const { return BooleanBit::get(field(), offset); } | 97 bool holder::name() const { return BooleanBit::get(field(), offset); } |
94 | 98 |
95 #define BOOL_ACCESSORS(holder, field, name, offset) \ | 99 #define BOOL_ACCESSORS(holder, field, name, offset) \ |
96 bool holder::name() const { return BooleanBit::get(field(), offset); } \ | 100 bool holder::name() const { return BooleanBit::get(field(), offset); } \ |
97 void holder::set_##name(bool value) { \ | 101 void holder::set_##name(bool value) { \ |
98 set_##field(BooleanBit::set(field(), offset, value)); \ | 102 set_##field(BooleanBit::set(field(), offset, value)); \ |
99 } | 103 } |
100 | 104 |
101 #define TYPE_CHECKER(type, instancetype) \ | 105 #define TYPE_CHECKER(type, instancetype) \ |
102 bool HeapObject::Is##type() const { \ | 106 bool HeapObject::Is##type() const { \ |
103 return map()->instance_type() == instancetype; \ | 107 return map()->instance_type() == instancetype; \ |
104 } | 108 } |
105 | 109 |
106 #define FIELD_ADDR(p, offset) \ | 110 #define FIELD_ADDR(p, offset) \ |
107 (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag) | 111 (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag) |
108 | 112 |
109 #define FIELD_ADDR_CONST(p, offset) \ | 113 #define FIELD_ADDR_CONST(p, offset) \ |
110 (reinterpret_cast<const byte*>(p) + offset - kHeapObjectTag) | 114 (reinterpret_cast<const byte*>(p) + offset - kHeapObjectTag) |
111 | 115 |
112 #define READ_FIELD(p, offset) \ | 116 #define READ_FIELD(p, offset) \ |
113 (*reinterpret_cast<Object* const*>(FIELD_ADDR_CONST(p, offset))) | 117 (*reinterpret_cast<Object* const*>(FIELD_ADDR_CONST(p, offset))) |
114 | 118 |
115 #define ACQUIRE_READ_FIELD(p, offset) \ | 119 #define ACQUIRE_READ_FIELD(p, offset) \ |
116 reinterpret_cast<Object*>(base::Acquire_Load( \ | 120 reinterpret_cast<Object*>(base::Acquire_Load( \ |
117 reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset)))) | 121 reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset)))) |
118 | 122 |
119 #define NOBARRIER_READ_FIELD(p, offset) \ | 123 #define RELAXED_READ_FIELD(p, offset) \ |
120 reinterpret_cast<Object*>(base::NoBarrier_Load( \ | 124 reinterpret_cast<Object*>(base::Relaxed_Load( \ |
121 reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset)))) | 125 reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset)))) |
122 | 126 |
123 #ifdef V8_CONCURRENT_MARKING | 127 #ifdef V8_CONCURRENT_MARKING |
124 #define WRITE_FIELD(p, offset, value) \ | 128 #define WRITE_FIELD(p, offset, value) \ |
125 base::NoBarrier_Store( \ | 129 base::Relaxed_Store( \ |
126 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ | 130 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ |
127 reinterpret_cast<base::AtomicWord>(value)); | 131 reinterpret_cast<base::AtomicWord>(value)); |
128 #else | 132 #else |
129 #define WRITE_FIELD(p, offset, value) \ | 133 #define WRITE_FIELD(p, offset, value) \ |
130 (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value) | 134 (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value) |
131 #endif | 135 #endif |
132 | 136 |
133 #define RELEASE_WRITE_FIELD(p, offset, value) \ | 137 #define RELEASE_WRITE_FIELD(p, offset, value) \ |
134 base::Release_Store( \ | 138 base::Release_Store( \ |
135 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ | 139 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ |
136 reinterpret_cast<base::AtomicWord>(value)); | 140 reinterpret_cast<base::AtomicWord>(value)); |
137 | 141 |
138 #define NOBARRIER_WRITE_FIELD(p, offset, value) \ | 142 #define RELAXED_WRITE_FIELD(p, offset, value) \ |
139 base::NoBarrier_Store( \ | 143 base::Relaxed_Store( \ |
140 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ | 144 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ |
141 reinterpret_cast<base::AtomicWord>(value)); | 145 reinterpret_cast<base::AtomicWord>(value)); |
142 | 146 |
143 #define WRITE_BARRIER(heap, object, offset, value) \ | 147 #define WRITE_BARRIER(heap, object, offset, value) \ |
144 heap->incremental_marking()->RecordWrite( \ | 148 heap->incremental_marking()->RecordWrite( \ |
145 object, HeapObject::RawField(object, offset), value); \ | 149 object, HeapObject::RawField(object, offset), value); \ |
146 heap->RecordWrite(object, offset, value); | 150 heap->RecordWrite(object, offset, value); |
147 | 151 |
148 #define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \ | 152 #define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \ |
149 if (mode != SKIP_WRITE_BARRIER) { \ | 153 if (mode != SKIP_WRITE_BARRIER) { \ |
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
222 | 226 |
223 #define READ_INT64_FIELD(p, offset) \ | 227 #define READ_INT64_FIELD(p, offset) \ |
224 (*reinterpret_cast<const int64_t*>(FIELD_ADDR_CONST(p, offset))) | 228 (*reinterpret_cast<const int64_t*>(FIELD_ADDR_CONST(p, offset))) |
225 | 229 |
226 #define WRITE_INT64_FIELD(p, offset, value) \ | 230 #define WRITE_INT64_FIELD(p, offset, value) \ |
227 (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)) = value) | 231 (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)) = value) |
228 | 232 |
229 #define READ_BYTE_FIELD(p, offset) \ | 233 #define READ_BYTE_FIELD(p, offset) \ |
230 (*reinterpret_cast<const byte*>(FIELD_ADDR_CONST(p, offset))) | 234 (*reinterpret_cast<const byte*>(FIELD_ADDR_CONST(p, offset))) |
231 | 235 |
232 #define NOBARRIER_READ_BYTE_FIELD(p, offset) \ | 236 #define RELAXED_READ_BYTE_FIELD(p, offset) \ |
233 static_cast<byte>(base::NoBarrier_Load( \ | 237 static_cast<byte>(base::Relaxed_Load( \ |
234 reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)))) | 238 reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)))) |
235 | 239 |
236 #define WRITE_BYTE_FIELD(p, offset, value) \ | 240 #define WRITE_BYTE_FIELD(p, offset, value) \ |
237 (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value) | 241 (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value) |
238 | 242 |
239 #define NOBARRIER_WRITE_BYTE_FIELD(p, offset, value) \ | 243 #define RELAXED_WRITE_BYTE_FIELD(p, offset, value) \ |
240 base::NoBarrier_Store( \ | 244 base::Relaxed_Store(reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \ |
241 reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \ | 245 static_cast<base::Atomic8>(value)); |
242 static_cast<base::Atomic8>(value)); | |
243 | 246 |
244 #ifdef VERIFY_HEAP | 247 #ifdef VERIFY_HEAP |
245 #define DECLARE_VERIFIER(Name) void Name##Verify(); | 248 #define DECLARE_VERIFIER(Name) void Name##Verify(); |
246 #else | 249 #else |
247 #define DECLARE_VERIFIER(Name) | 250 #define DECLARE_VERIFIER(Name) |
248 #endif | 251 #endif |
OLD | NEW |