OLD | NEW |
---|---|
1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // Note 1: Any file that includes this one should include object-macros-undef.h | 5 // Note 1: Any file that includes this one should include object-macros-undef.h |
6 // at the bottom. | 6 // at the bottom. |
7 | 7 |
8 // Note 2: This file is deliberately missing the include guards (the undeffing | 8 // Note 2: This file is deliberately missing the include guards (the undeffing |
9 // approach wouldn't work otherwise). | 9 // approach wouldn't work otherwise). |
10 | 10 |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
73 | 73 |
74 #define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \ | 74 #define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \ |
75 int holder::synchronized_##name() const { \ | 75 int holder::synchronized_##name() const { \ |
76 Object* value = ACQUIRE_READ_FIELD(this, offset); \ | 76 Object* value = ACQUIRE_READ_FIELD(this, offset); \ |
77 return Smi::cast(value)->value(); \ | 77 return Smi::cast(value)->value(); \ |
78 } \ | 78 } \ |
79 void holder::synchronized_set_##name(int value) { \ | 79 void holder::synchronized_set_##name(int value) { \ |
80 RELEASE_WRITE_FIELD(this, offset, Smi::FromInt(value)); \ | 80 RELEASE_WRITE_FIELD(this, offset, Smi::FromInt(value)); \ |
81 } | 81 } |
82 | 82 |
83 #define NOBARRIER_SMI_ACCESSORS(holder, name, offset) \ | 83 #define RELAXED_SMI_ACCESSORS(holder, name, offset) \ |
Hannes Payer (out of office)
2017/05/29 13:12:39
Let's also add a big comment on top of this file e
ulan
2017/05/29 13:41:06
atomicops.h has a comment explaining relaxed, acqu
| |
84 int holder::nobarrier_##name() const { \ | 84 int holder::relaxed_read_##name() const { \ |
85 Object* value = NOBARRIER_READ_FIELD(this, offset); \ | 85 Object* value = RELAXED_READ_FIELD(this, offset); \ |
86 return Smi::cast(value)->value(); \ | 86 return Smi::cast(value)->value(); \ |
87 } \ | 87 } \ |
88 void holder::nobarrier_set_##name(int value) { \ | 88 void holder::relaxed_write_##name(int value) { \ |
89 NOBARRIER_WRITE_FIELD(this, offset, Smi::FromInt(value)); \ | 89 RELAXED_WRITE_FIELD(this, offset, Smi::FromInt(value)); \ |
90 } | 90 } |
91 | 91 |
92 #define BOOL_GETTER(holder, field, name, offset) \ | 92 #define BOOL_GETTER(holder, field, name, offset) \ |
93 bool holder::name() const { return BooleanBit::get(field(), offset); } | 93 bool holder::name() const { return BooleanBit::get(field(), offset); } |
94 | 94 |
95 #define BOOL_ACCESSORS(holder, field, name, offset) \ | 95 #define BOOL_ACCESSORS(holder, field, name, offset) \ |
96 bool holder::name() const { return BooleanBit::get(field(), offset); } \ | 96 bool holder::name() const { return BooleanBit::get(field(), offset); } \ |
97 void holder::set_##name(bool value) { \ | 97 void holder::set_##name(bool value) { \ |
98 set_##field(BooleanBit::set(field(), offset, value)); \ | 98 set_##field(BooleanBit::set(field(), offset, value)); \ |
99 } | 99 } |
100 | 100 |
101 #define TYPE_CHECKER(type, instancetype) \ | 101 #define TYPE_CHECKER(type, instancetype) \ |
102 bool HeapObject::Is##type() const { \ | 102 bool HeapObject::Is##type() const { \ |
103 return map()->instance_type() == instancetype; \ | 103 return map()->instance_type() == instancetype; \ |
104 } | 104 } |
105 | 105 |
106 #define FIELD_ADDR(p, offset) \ | 106 #define FIELD_ADDR(p, offset) \ |
107 (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag) | 107 (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag) |
108 | 108 |
109 #define FIELD_ADDR_CONST(p, offset) \ | 109 #define FIELD_ADDR_CONST(p, offset) \ |
110 (reinterpret_cast<const byte*>(p) + offset - kHeapObjectTag) | 110 (reinterpret_cast<const byte*>(p) + offset - kHeapObjectTag) |
111 | 111 |
112 #define READ_FIELD(p, offset) \ | 112 #define READ_FIELD(p, offset) \ |
113 (*reinterpret_cast<Object* const*>(FIELD_ADDR_CONST(p, offset))) | 113 (*reinterpret_cast<Object* const*>(FIELD_ADDR_CONST(p, offset))) |
114 | 114 |
115 #define ACQUIRE_READ_FIELD(p, offset) \ | 115 #define ACQUIRE_READ_FIELD(p, offset) \ |
116 reinterpret_cast<Object*>(base::Acquire_Load( \ | 116 reinterpret_cast<Object*>(base::Acquire_Load( \ |
117 reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset)))) | 117 reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset)))) |
118 | 118 |
119 #define NOBARRIER_READ_FIELD(p, offset) \ | 119 #define RELAXED_READ_FIELD(p, offset) \ |
120 reinterpret_cast<Object*>(base::NoBarrier_Load( \ | 120 reinterpret_cast<Object*>(base::Relaxed_Load( \ |
121 reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset)))) | 121 reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset)))) |
122 | 122 |
123 #ifdef V8_CONCURRENT_MARKING | 123 #ifdef V8_CONCURRENT_MARKING |
124 #define WRITE_FIELD(p, offset, value) \ | 124 #define WRITE_FIELD(p, offset, value) \ |
125 base::NoBarrier_Store( \ | 125 base::Relaxed_Store( \ |
126 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ | 126 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ |
127 reinterpret_cast<base::AtomicWord>(value)); | 127 reinterpret_cast<base::AtomicWord>(value)); |
128 #else | 128 #else |
129 #define WRITE_FIELD(p, offset, value) \ | 129 #define WRITE_FIELD(p, offset, value) \ |
130 (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value) | 130 (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value) |
131 #endif | 131 #endif |
132 | 132 |
133 #define RELEASE_WRITE_FIELD(p, offset, value) \ | 133 #define RELEASE_WRITE_FIELD(p, offset, value) \ |
134 base::Release_Store( \ | 134 base::Release_Store( \ |
135 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ | 135 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ |
136 reinterpret_cast<base::AtomicWord>(value)); | 136 reinterpret_cast<base::AtomicWord>(value)); |
137 | 137 |
138 #define NOBARRIER_WRITE_FIELD(p, offset, value) \ | 138 #define RELAXED_WRITE_FIELD(p, offset, value) \ |
139 base::NoBarrier_Store( \ | 139 base::Relaxed_Store( \ |
140 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ | 140 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ |
141 reinterpret_cast<base::AtomicWord>(value)); | 141 reinterpret_cast<base::AtomicWord>(value)); |
142 | 142 |
143 #define WRITE_BARRIER(heap, object, offset, value) \ | 143 #define WRITE_BARRIER(heap, object, offset, value) \ |
144 heap->incremental_marking()->RecordWrite( \ | 144 heap->incremental_marking()->RecordWrite( \ |
145 object, HeapObject::RawField(object, offset), value); \ | 145 object, HeapObject::RawField(object, offset), value); \ |
146 heap->RecordWrite(object, offset, value); | 146 heap->RecordWrite(object, offset, value); |
147 | 147 |
148 #define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \ | 148 #define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \ |
149 if (mode != SKIP_WRITE_BARRIER) { \ | 149 if (mode != SKIP_WRITE_BARRIER) { \ |
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
222 | 222 |
223 #define READ_INT64_FIELD(p, offset) \ | 223 #define READ_INT64_FIELD(p, offset) \ |
224 (*reinterpret_cast<const int64_t*>(FIELD_ADDR_CONST(p, offset))) | 224 (*reinterpret_cast<const int64_t*>(FIELD_ADDR_CONST(p, offset))) |
225 | 225 |
226 #define WRITE_INT64_FIELD(p, offset, value) \ | 226 #define WRITE_INT64_FIELD(p, offset, value) \ |
227 (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)) = value) | 227 (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)) = value) |
228 | 228 |
229 #define READ_BYTE_FIELD(p, offset) \ | 229 #define READ_BYTE_FIELD(p, offset) \ |
230 (*reinterpret_cast<const byte*>(FIELD_ADDR_CONST(p, offset))) | 230 (*reinterpret_cast<const byte*>(FIELD_ADDR_CONST(p, offset))) |
231 | 231 |
232 #define NOBARRIER_READ_BYTE_FIELD(p, offset) \ | 232 #define RELAXED_READ_BYTE_FIELD(p, offset) \ |
233 static_cast<byte>(base::NoBarrier_Load( \ | 233 static_cast<byte>(base::Relaxed_Load( \ |
234 reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)))) | 234 reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)))) |
235 | 235 |
236 #define WRITE_BYTE_FIELD(p, offset, value) \ | 236 #define WRITE_BYTE_FIELD(p, offset, value) \ |
237 (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value) | 237 (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value) |
238 | 238 |
239 #define NOBARRIER_WRITE_BYTE_FIELD(p, offset, value) \ | 239 #define RELAXED_WRITE_BYTE_FIELD(p, offset, value) \ |
240 base::NoBarrier_Store( \ | 240 base::Relaxed_Store(reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \ |
241 reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \ | 241 static_cast<base::Atomic8>(value)); |
242 static_cast<base::Atomic8>(value)); | |
243 | 242 |
244 #ifdef VERIFY_HEAP | 243 #ifdef VERIFY_HEAP |
245 #define DECLARE_VERIFIER(Name) void Name##Verify(); | 244 #define DECLARE_VERIFIER(Name) void Name##Verify(); |
246 #else | 245 #else |
247 #define DECLARE_VERIFIER(Name) | 246 #define DECLARE_VERIFIER(Name) |
248 #endif | 247 #endif |
OLD | NEW |