OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 // | 4 // |
5 // Review notes: | 5 // Review notes: |
6 // | 6 // |
7 // - The use of macros in these inline functions may seem superfluous | 7 // - The use of macros in these inline functions may seem superfluous |
8 // but it is absolutely needed to make sure gcc generates optimal | 8 // but it is absolutely needed to make sure gcc generates optimal |
9 // code. gcc is not happy when attempting to inline too deep. | 9 // code. gcc is not happy when attempting to inline too deep. |
10 // | 10 // |
(...skipping 1221 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1232 #define RELEASE_WRITE_FIELD(p, offset, value) \ | 1232 #define RELEASE_WRITE_FIELD(p, offset, value) \ |
1233 base::Release_Store( \ | 1233 base::Release_Store( \ |
1234 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ | 1234 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ |
1235 reinterpret_cast<base::AtomicWord>(value)); | 1235 reinterpret_cast<base::AtomicWord>(value)); |
1236 | 1236 |
1237 #define NOBARRIER_WRITE_FIELD(p, offset, value) \ | 1237 #define NOBARRIER_WRITE_FIELD(p, offset, value) \ |
1238 base::NoBarrier_Store( \ | 1238 base::NoBarrier_Store( \ |
1239 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ | 1239 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ |
1240 reinterpret_cast<base::AtomicWord>(value)); | 1240 reinterpret_cast<base::AtomicWord>(value)); |
1241 | 1241 |
1242 #define WRITE_BARRIER(heap, object, offset, value) \ | 1242 #define WRITE_BARRIER(heap, object, offset, value) \ |
1243 heap->incremental_marking()->RecordWrite( \ | 1243 heap->incremental_marking()->RecordWrite( \ |
1244 object, HeapObject::RawField(object, offset), value); \ | 1244 object, HeapObject::RawField(object, offset), value); \ |
1245 if (heap->InNewSpace(value)) { \ | 1245 heap->RecordWrite(object, offset, value); |
1246 heap->RecordWrite(object->address(), offset); \ | |
1247 } | |
1248 | 1246 |
1249 #define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \ | 1247 #define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \ |
1250 if (mode != SKIP_WRITE_BARRIER) { \ | 1248 if (mode != SKIP_WRITE_BARRIER) { \ |
1251 if (mode == UPDATE_WRITE_BARRIER) { \ | 1249 if (mode == UPDATE_WRITE_BARRIER) { \ |
1252 heap->incremental_marking()->RecordWrite( \ | 1250 heap->incremental_marking()->RecordWrite( \ |
1253 object, HeapObject::RawField(object, offset), value); \ | 1251 object, HeapObject::RawField(object, offset), value); \ |
1254 } \ | 1252 } \ |
1255 if (heap->InNewSpace(value)) { \ | 1253 heap->RecordWrite(object, offset, value); \ |
1256 heap->RecordWrite(object->address(), offset); \ | |
1257 } \ | |
1258 } | 1254 } |
1259 | 1255 |
1260 #define READ_DOUBLE_FIELD(p, offset) \ | 1256 #define READ_DOUBLE_FIELD(p, offset) \ |
1261 ReadDoubleValue(FIELD_ADDR_CONST(p, offset)) | 1257 ReadDoubleValue(FIELD_ADDR_CONST(p, offset)) |
1262 | 1258 |
1263 #define WRITE_DOUBLE_FIELD(p, offset, value) \ | 1259 #define WRITE_DOUBLE_FIELD(p, offset, value) \ |
1264 WriteDoubleValue(FIELD_ADDR(p, offset), value) | 1260 WriteDoubleValue(FIELD_ADDR(p, offset), value) |
1265 | 1261 |
1266 #define READ_INT_FIELD(p, offset) \ | 1262 #define READ_INT_FIELD(p, offset) \ |
1267 (*reinterpret_cast<const int*>(FIELD_ADDR_CONST(p, offset))) | 1263 (*reinterpret_cast<const int*>(FIELD_ADDR_CONST(p, offset))) |
(...skipping 767 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2035 WRITE_FIELD(this, kValueOffset, Smi::FromInt(0)); | 2031 WRITE_FIELD(this, kValueOffset, Smi::FromInt(0)); |
2036 } | 2032 } |
2037 | 2033 |
2038 | 2034 |
2039 void WeakCell::initialize(HeapObject* val) { | 2035 void WeakCell::initialize(HeapObject* val) { |
2040 WRITE_FIELD(this, kValueOffset, val); | 2036 WRITE_FIELD(this, kValueOffset, val); |
2041 Heap* heap = GetHeap(); | 2037 Heap* heap = GetHeap(); |
2042 // We just have to execute the generational barrier here because we never | 2038 // We just have to execute the generational barrier here because we never |
2043 // mark through a weak cell and collect evacuation candidates when we process | 2039 // mark through a weak cell and collect evacuation candidates when we process |
2044 // all weak cells. | 2040 // all weak cells. |
2045 if (heap->InNewSpace(val)) { | 2041 heap->RecordWrite(this, kValueOffset, val); |
2046 heap->RecordWrite(address(), kValueOffset); | |
2047 } | |
2048 } | 2042 } |
2049 | 2043 |
2050 | 2044 |
2051 bool WeakCell::cleared() const { return value() == Smi::FromInt(0); } | 2045 bool WeakCell::cleared() const { return value() == Smi::FromInt(0); } |
2052 | 2046 |
2053 | 2047 |
2054 Object* WeakCell::next() const { return READ_FIELD(this, kNextOffset); } | 2048 Object* WeakCell::next() const { return READ_FIELD(this, kNextOffset); } |
2055 | 2049 |
2056 | 2050 |
2057 void WeakCell::set_next(Object* val, WriteBarrierMode mode) { | 2051 void WeakCell::set_next(Object* val, WriteBarrierMode mode) { |
(...skipping 5817 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7875 #undef WRITE_INT64_FIELD | 7869 #undef WRITE_INT64_FIELD |
7876 #undef READ_BYTE_FIELD | 7870 #undef READ_BYTE_FIELD |
7877 #undef WRITE_BYTE_FIELD | 7871 #undef WRITE_BYTE_FIELD |
7878 #undef NOBARRIER_READ_BYTE_FIELD | 7872 #undef NOBARRIER_READ_BYTE_FIELD |
7879 #undef NOBARRIER_WRITE_BYTE_FIELD | 7873 #undef NOBARRIER_WRITE_BYTE_FIELD |
7880 | 7874 |
7881 } // namespace internal | 7875 } // namespace internal |
7882 } // namespace v8 | 7876 } // namespace v8 |
7883 | 7877 |
7884 #endif // V8_OBJECTS_INL_H_ | 7878 #endif // V8_OBJECTS_INL_H_ |
OLD | NEW |