OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 // | 4 // |
5 // Review notes: | 5 // Review notes: |
6 // | 6 // |
7 // - The use of macros in these inline functions may seem superfluous | 7 // - The use of macros in these inline functions may seem superfluous |
8 // but it is absolutely needed to make sure gcc generates optimal | 8 // but it is absolutely needed to make sure gcc generates optimal |
9 // code. gcc is not happy when attempting to inline too deep. | 9 // code. gcc is not happy when attempting to inline too deep. |
10 // | 10 // |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
60 } | 60 } |
61 | 61 |
62 | 62 |
63 #define CAST_ACCESSOR(type) \ | 63 #define CAST_ACCESSOR(type) \ |
64 type* type::cast(Object* object) { \ | 64 type* type::cast(Object* object) { \ |
65 SLOW_ASSERT(object->Is##type()); \ | 65 SLOW_ASSERT(object->Is##type()); \ |
66 return reinterpret_cast<type*>(object); \ | 66 return reinterpret_cast<type*>(object); \ |
67 } | 67 } |
68 | 68 |
69 | 69 |
70 #define INT_ACCESSORS(holder, name, offset) \ | 70 #define INT_ACCESSORS(holder, name, offset) \ |
71 int holder::name() { return READ_INT_FIELD(this, offset); } \ | 71 int holder::name() const { return READ_INT_FIELD(this, offset); } \ |
72 void holder::set_##name(int value) { WRITE_INT_FIELD(this, offset, value); } | 72 void holder::set_##name(int value) { WRITE_INT_FIELD(this, offset, value); } |
73 | 73 |
74 | 74 |
75 #define ACCESSORS(holder, name, type, offset) \ | 75 #define ACCESSORS(holder, name, type, offset) \ |
76 type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \ | 76 type* holder::name() const { return type::cast(READ_FIELD(this, offset)); } \ |
77 void holder::set_##name(type* value, WriteBarrierMode mode) { \ | 77 void holder::set_##name(type* value, WriteBarrierMode mode) { \ |
78 WRITE_FIELD(this, offset, value); \ | 78 WRITE_FIELD(this, offset, value); \ |
79 CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \ | 79 CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \ |
80 } | 80 } |
81 | 81 |
82 | 82 |
83 // Getter that returns a tagged Smi and setter that writes a tagged Smi. | 83 // Getter that returns a tagged Smi and setter that writes a tagged Smi. |
84 #define ACCESSORS_TO_SMI(holder, name, offset) \ | 84 #define ACCESSORS_TO_SMI(holder, name, offset) \ |
85 Smi* holder::name() { return Smi::cast(READ_FIELD(this, offset)); } \ | 85 Smi* holder::name() const { return Smi::cast(READ_FIELD(this, offset)); } \ |
86 void holder::set_##name(Smi* value, WriteBarrierMode mode) { \ | 86 void holder::set_##name(Smi* value, WriteBarrierMode mode) { \ |
87 WRITE_FIELD(this, offset, value); \ | 87 WRITE_FIELD(this, offset, value); \ |
88 } | 88 } |
89 | 89 |
90 | 90 |
91 // Getter that returns a Smi as an int and writes an int as a Smi. | 91 // Getter that returns a Smi as an int and writes an int as a Smi. |
92 #define SMI_ACCESSORS(holder, name, offset) \ | 92 #define SMI_ACCESSORS(holder, name, offset) \ |
93 int holder::name() { \ | 93 int holder::name() const { \ |
94 Object* value = READ_FIELD(this, offset); \ | 94 Object* value = READ_FIELD(this, offset); \ |
95 return Smi::cast(value)->value(); \ | 95 return Smi::cast(value)->value(); \ |
96 } \ | 96 } \ |
97 void holder::set_##name(int value) { \ | 97 void holder::set_##name(int value) { \ |
98 WRITE_FIELD(this, offset, Smi::FromInt(value)); \ | 98 WRITE_FIELD(this, offset, Smi::FromInt(value)); \ |
99 } | 99 } |
100 | 100 |
101 #define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \ | 101 #define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \ |
102 int holder::synchronized_##name() { \ | 102 int holder::synchronized_##name() const { \ |
103 Object* value = ACQUIRE_READ_FIELD(this, offset); \ | 103 Object* value = ACQUIRE_READ_FIELD(this, offset); \ |
104 return Smi::cast(value)->value(); \ | 104 return Smi::cast(value)->value(); \ |
105 } \ | 105 } \ |
106 void holder::synchronized_set_##name(int value) { \ | 106 void holder::synchronized_set_##name(int value) { \ |
107 RELEASE_WRITE_FIELD(this, offset, Smi::FromInt(value)); \ | 107 RELEASE_WRITE_FIELD(this, offset, Smi::FromInt(value)); \ |
108 } | 108 } |
109 | 109 |
110 #define NOBARRIER_SMI_ACCESSORS(holder, name, offset) \ | 110 #define NOBARRIER_SMI_ACCESSORS(holder, name, offset) \ |
111 int holder::nobarrier_##name() { \ | 111 int holder::nobarrier_##name() const { \ |
112 Object* value = NOBARRIER_READ_FIELD(this, offset); \ | 112 Object* value = NOBARRIER_READ_FIELD(this, offset); \ |
113 return Smi::cast(value)->value(); \ | 113 return Smi::cast(value)->value(); \ |
114 } \ | 114 } \ |
115 void holder::nobarrier_set_##name(int value) { \ | 115 void holder::nobarrier_set_##name(int value) { \ |
116 NOBARRIER_WRITE_FIELD(this, offset, Smi::FromInt(value)); \ | 116 NOBARRIER_WRITE_FIELD(this, offset, Smi::FromInt(value)); \ |
117 } | 117 } |
118 | 118 |
119 #define BOOL_GETTER(holder, field, name, offset) \ | 119 #define BOOL_GETTER(holder, field, name, offset) \ |
120 bool holder::name() { \ | 120 bool holder::name() const { \ |
121 return BooleanBit::get(field(), offset); \ | 121 return BooleanBit::get(field(), offset); \ |
122 } \ | 122 } \ |
123 | 123 |
124 | 124 |
125 #define BOOL_ACCESSORS(holder, field, name, offset) \ | 125 #define BOOL_ACCESSORS(holder, field, name, offset) \ |
126 bool holder::name() { \ | 126 bool holder::name() const { \ |
127 return BooleanBit::get(field(), offset); \ | 127 return BooleanBit::get(field(), offset); \ |
128 } \ | 128 } \ |
129 void holder::set_##name(bool value) { \ | 129 void holder::set_##name(bool value) { \ |
130 set_##field(BooleanBit::set(field(), offset, value)); \ | 130 set_##field(BooleanBit::set(field(), offset, value)); \ |
131 } | 131 } |
132 | 132 |
133 | 133 |
134 bool Object::IsFixedArrayBase() { | 134 bool Object::IsFixedArrayBase() { |
135 return IsFixedArray() || IsFixedDoubleArray() || IsConstantPoolArray() || | 135 return IsFixedArray() || IsFixedDoubleArray() || IsConstantPoolArray() || |
136 IsFixedTypedArrayBase() || IsExternalArray(); | 136 IsFixedTypedArrayBase() || IsExternalArray(); |
(...skipping 977 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1114 bool JSProxy::HasElementWithHandler(Handle<JSProxy> proxy, uint32_t index) { | 1114 bool JSProxy::HasElementWithHandler(Handle<JSProxy> proxy, uint32_t index) { |
1115 Isolate* isolate = proxy->GetIsolate(); | 1115 Isolate* isolate = proxy->GetIsolate(); |
1116 Handle<String> name = isolate->factory()->Uint32ToString(index); | 1116 Handle<String> name = isolate->factory()->Uint32ToString(index); |
1117 return HasPropertyWithHandler(proxy, name); | 1117 return HasPropertyWithHandler(proxy, name); |
1118 } | 1118 } |
1119 | 1119 |
1120 | 1120 |
1121 #define FIELD_ADDR(p, offset) \ | 1121 #define FIELD_ADDR(p, offset) \ |
1122 (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag) | 1122 (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag) |
1123 | 1123 |
| 1124 #define FIELD_ADDR_CONST(p, offset) \ |
| 1125 (reinterpret_cast<const byte*>(p) + offset - kHeapObjectTag) |
| 1126 |
1124 #define READ_FIELD(p, offset) \ | 1127 #define READ_FIELD(p, offset) \ |
1125 (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset))) | 1128 (*reinterpret_cast<Object* const*>(FIELD_ADDR_CONST(p, offset))) |
1126 | 1129 |
1127 #define ACQUIRE_READ_FIELD(p, offset) \ | 1130 #define ACQUIRE_READ_FIELD(p, offset) \ |
1128 reinterpret_cast<Object*>(base::Acquire_Load( \ | 1131 reinterpret_cast<Object*>(base::Acquire_Load( \ |
1129 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)))) | 1132 reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset)))) |
1130 | 1133 |
1131 #define NOBARRIER_READ_FIELD(p, offset) \ | 1134 #define NOBARRIER_READ_FIELD(p, offset) \ |
1132 reinterpret_cast<Object*>(base::NoBarrier_Load( \ | 1135 reinterpret_cast<Object*>(base::NoBarrier_Load( \ |
1133 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)))) | 1136 reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset)))) |
1134 | 1137 |
1135 #define WRITE_FIELD(p, offset, value) \ | 1138 #define WRITE_FIELD(p, offset, value) \ |
1136 (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value) | 1139 (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value) |
1137 | 1140 |
1138 #define RELEASE_WRITE_FIELD(p, offset, value) \ | 1141 #define RELEASE_WRITE_FIELD(p, offset, value) \ |
1139 base::Release_Store( \ | 1142 base::Release_Store( \ |
1140 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ | 1143 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ |
1141 reinterpret_cast<base::AtomicWord>(value)); | 1144 reinterpret_cast<base::AtomicWord>(value)); |
1142 | 1145 |
1143 #define NOBARRIER_WRITE_FIELD(p, offset, value) \ | 1146 #define NOBARRIER_WRITE_FIELD(p, offset, value) \ |
(...skipping 12 matching lines...) Expand all Loading... |
1156 if (mode == UPDATE_WRITE_BARRIER) { \ | 1159 if (mode == UPDATE_WRITE_BARRIER) { \ |
1157 heap->incremental_marking()->RecordWrite( \ | 1160 heap->incremental_marking()->RecordWrite( \ |
1158 object, HeapObject::RawField(object, offset), value); \ | 1161 object, HeapObject::RawField(object, offset), value); \ |
1159 if (heap->InNewSpace(value)) { \ | 1162 if (heap->InNewSpace(value)) { \ |
1160 heap->RecordWrite(object->address(), offset); \ | 1163 heap->RecordWrite(object->address(), offset); \ |
1161 } \ | 1164 } \ |
1162 } | 1165 } |
1163 | 1166 |
1164 #ifndef V8_TARGET_ARCH_MIPS | 1167 #ifndef V8_TARGET_ARCH_MIPS |
1165 #define READ_DOUBLE_FIELD(p, offset) \ | 1168 #define READ_DOUBLE_FIELD(p, offset) \ |
1166 (*reinterpret_cast<double*>(FIELD_ADDR(p, offset))) | 1169 (*reinterpret_cast<const double*>(FIELD_ADDR_CONST(p, offset))) |
1167 #else // V8_TARGET_ARCH_MIPS | 1170 #else // V8_TARGET_ARCH_MIPS |
1168 // Prevent gcc from using load-double (mips ldc1) on (possibly) | 1171 // Prevent gcc from using load-double (mips ldc1) on (possibly) |
1169 // non-64-bit aligned HeapNumber::value. | 1172 // non-64-bit aligned HeapNumber::value. |
1170 static inline double read_double_field(void* p, int offset) { | 1173 static inline double read_double_field(const void* p, int offset) { |
1171 union conversion { | 1174 union conversion { |
1172 double d; | 1175 double d; |
1173 uint32_t u[2]; | 1176 uint32_t u[2]; |
1174 } c; | 1177 } c; |
1175 c.u[0] = (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset))); | 1178 c.u[0] = (*reinterpret_cast<uint32_t*>(FIELD_ADDR_CONST(p, offset))); |
1176 c.u[1] = (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset + 4))); | 1179 c.u[1] = (*reinterpret_cast<uint32_t*>(FIELD_ADDR_CONST(p, offset + 4))); |
1177 return c.d; | 1180 return c.d; |
1178 } | 1181 } |
1179 #define READ_DOUBLE_FIELD(p, offset) read_double_field(p, offset) | 1182 #define READ_DOUBLE_FIELD(p, offset) read_double_field(p, offset) |
1180 #endif // V8_TARGET_ARCH_MIPS | 1183 #endif // V8_TARGET_ARCH_MIPS |
1181 | 1184 |
1182 #ifndef V8_TARGET_ARCH_MIPS | 1185 #ifndef V8_TARGET_ARCH_MIPS |
1183 #define WRITE_DOUBLE_FIELD(p, offset, value) \ | 1186 #define WRITE_DOUBLE_FIELD(p, offset, value) \ |
1184 (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)) = value) | 1187 (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)) = value) |
1185 #else // V8_TARGET_ARCH_MIPS | 1188 #else // V8_TARGET_ARCH_MIPS |
1186 // Prevent gcc from using store-double (mips sdc1) on (possibly) | 1189 // Prevent gcc from using store-double (mips sdc1) on (possibly) |
1187 // non-64-bit aligned HeapNumber::value. | 1190 // non-64-bit aligned HeapNumber::value. |
1188 static inline void write_double_field(void* p, int offset, | 1191 static inline void write_double_field(void* p, int offset, |
1189 double value) { | 1192 double value) { |
1190 union conversion { | 1193 union conversion { |
1191 double d; | 1194 double d; |
1192 uint32_t u[2]; | 1195 uint32_t u[2]; |
1193 } c; | 1196 } c; |
1194 c.d = value; | 1197 c.d = value; |
1195 (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset))) = c.u[0]; | 1198 (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset))) = c.u[0]; |
1196 (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset + 4))) = c.u[1]; | 1199 (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset + 4))) = c.u[1]; |
1197 } | 1200 } |
1198 #define WRITE_DOUBLE_FIELD(p, offset, value) \ | 1201 #define WRITE_DOUBLE_FIELD(p, offset, value) \ |
1199 write_double_field(p, offset, value) | 1202 write_double_field(p, offset, value) |
1200 #endif // V8_TARGET_ARCH_MIPS | 1203 #endif // V8_TARGET_ARCH_MIPS |
1201 | 1204 |
1202 | 1205 |
1203 #define READ_INT_FIELD(p, offset) \ | 1206 #define READ_INT_FIELD(p, offset) \ |
1204 (*reinterpret_cast<int*>(FIELD_ADDR(p, offset))) | 1207 (*reinterpret_cast<const int*>(FIELD_ADDR_CONST(p, offset))) |
1205 | 1208 |
1206 #define WRITE_INT_FIELD(p, offset, value) \ | 1209 #define WRITE_INT_FIELD(p, offset, value) \ |
1207 (*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value) | 1210 (*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value) |
1208 | 1211 |
1209 #define READ_INTPTR_FIELD(p, offset) \ | 1212 #define READ_INTPTR_FIELD(p, offset) \ |
1210 (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset))) | 1213 (*reinterpret_cast<const intptr_t*>(FIELD_ADDR_CONST(p, offset))) |
1211 | 1214 |
1212 #define WRITE_INTPTR_FIELD(p, offset, value) \ | 1215 #define WRITE_INTPTR_FIELD(p, offset, value) \ |
1213 (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value) | 1216 (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value) |
1214 | 1217 |
1215 #define READ_UINT32_FIELD(p, offset) \ | 1218 #define READ_UINT32_FIELD(p, offset) \ |
1216 (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset))) | 1219 (*reinterpret_cast<const uint32_t*>(FIELD_ADDR_CONST(p, offset))) |
1217 | 1220 |
1218 #define WRITE_UINT32_FIELD(p, offset, value) \ | 1221 #define WRITE_UINT32_FIELD(p, offset, value) \ |
1219 (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value) | 1222 (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value) |
1220 | 1223 |
1221 #define READ_INT32_FIELD(p, offset) \ | 1224 #define READ_INT32_FIELD(p, offset) \ |
1222 (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset))) | 1225 (*reinterpret_cast<const int32_t*>(FIELD_ADDR_CONST(p, offset))) |
1223 | 1226 |
1224 #define WRITE_INT32_FIELD(p, offset, value) \ | 1227 #define WRITE_INT32_FIELD(p, offset, value) \ |
1225 (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value) | 1228 (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value) |
1226 | 1229 |
1227 #define READ_INT64_FIELD(p, offset) \ | 1230 #define READ_INT64_FIELD(p, offset) \ |
1228 (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset))) | 1231 (*reinterpret_cast<const int64_t*>(FIELD_ADDR_CONST(p, offset))) |
1229 | 1232 |
1230 #define WRITE_INT64_FIELD(p, offset, value) \ | 1233 #define WRITE_INT64_FIELD(p, offset, value) \ |
1231 (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)) = value) | 1234 (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)) = value) |
1232 | 1235 |
1233 #define READ_SHORT_FIELD(p, offset) \ | 1236 #define READ_SHORT_FIELD(p, offset) \ |
1234 (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset))) | 1237 (*reinterpret_cast<const uint16_t*>(FIELD_ADDR_CONST(p, offset))) |
1235 | 1238 |
1236 #define WRITE_SHORT_FIELD(p, offset, value) \ | 1239 #define WRITE_SHORT_FIELD(p, offset, value) \ |
1237 (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)) = value) | 1240 (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)) = value) |
1238 | 1241 |
1239 #define READ_BYTE_FIELD(p, offset) \ | 1242 #define READ_BYTE_FIELD(p, offset) \ |
1240 (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset))) | 1243 (*reinterpret_cast<const byte*>(FIELD_ADDR_CONST(p, offset))) |
1241 | 1244 |
1242 #define NOBARRIER_READ_BYTE_FIELD(p, offset) \ | 1245 #define NOBARRIER_READ_BYTE_FIELD(p, offset) \ |
1243 static_cast<byte>(base::NoBarrier_Load( \ | 1246 static_cast<byte>(base::NoBarrier_Load( \ |
1244 reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)))) | 1247 reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)))) |
1245 | 1248 |
1246 #define WRITE_BYTE_FIELD(p, offset, value) \ | 1249 #define WRITE_BYTE_FIELD(p, offset, value) \ |
1247 (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value) | 1250 (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value) |
1248 | 1251 |
1249 #define NOBARRIER_WRITE_BYTE_FIELD(p, offset, value) \ | 1252 #define NOBARRIER_WRITE_BYTE_FIELD(p, offset, value) \ |
1250 base::NoBarrier_Store( \ | 1253 base::NoBarrier_Store( \ |
1251 reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \ | 1254 reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \ |
1252 static_cast<base::Atomic8>(value)); | 1255 static_cast<base::Atomic8>(value)); |
1253 | 1256 |
1254 Object** HeapObject::RawField(HeapObject* obj, int byte_offset) { | 1257 Object** HeapObject::RawField(HeapObject* obj, int byte_offset) { |
1255 return &READ_FIELD(obj, byte_offset); | 1258 return reinterpret_cast<Object**>(FIELD_ADDR(obj, byte_offset)); |
1256 } | 1259 } |
1257 | 1260 |
1258 | 1261 |
1259 int Smi::value() { | 1262 int Smi::value() const { |
1260 return Internals::SmiValue(this); | 1263 return Internals::SmiValue(this); |
1261 } | 1264 } |
1262 | 1265 |
1263 | 1266 |
1264 Smi* Smi::FromInt(int value) { | 1267 Smi* Smi::FromInt(int value) { |
1265 ASSERT(Smi::IsValid(value)); | 1268 ASSERT(Smi::IsValid(value)); |
1266 return reinterpret_cast<Smi*>(Internals::IntToSmi(value)); | 1269 return reinterpret_cast<Smi*>(Internals::IntToSmi(value)); |
1267 } | 1270 } |
1268 | 1271 |
1269 | 1272 |
1270 Smi* Smi::FromIntptr(intptr_t value) { | 1273 Smi* Smi::FromIntptr(intptr_t value) { |
1271 ASSERT(Smi::IsValid(value)); | 1274 ASSERT(Smi::IsValid(value)); |
1272 int smi_shift_bits = kSmiTagSize + kSmiShiftSize; | 1275 int smi_shift_bits = kSmiTagSize + kSmiShiftSize; |
1273 return reinterpret_cast<Smi*>((value << smi_shift_bits) | kSmiTag); | 1276 return reinterpret_cast<Smi*>((value << smi_shift_bits) | kSmiTag); |
1274 } | 1277 } |
1275 | 1278 |
1276 | 1279 |
1277 bool Smi::IsValid(intptr_t value) { | 1280 bool Smi::IsValid(intptr_t value) { |
1278 bool result = Internals::IsValidSmi(value); | 1281 bool result = Internals::IsValidSmi(value); |
1279 ASSERT_EQ(result, value >= kMinValue && value <= kMaxValue); | 1282 ASSERT_EQ(result, value >= kMinValue && value <= kMaxValue); |
1280 return result; | 1283 return result; |
1281 } | 1284 } |
1282 | 1285 |
1283 | 1286 |
1284 MapWord MapWord::FromMap(Map* map) { | 1287 MapWord MapWord::FromMap(const Map* map) { |
1285 return MapWord(reinterpret_cast<uintptr_t>(map)); | 1288 return MapWord(reinterpret_cast<uintptr_t>(map)); |
1286 } | 1289 } |
1287 | 1290 |
1288 | 1291 |
1289 Map* MapWord::ToMap() { | 1292 Map* MapWord::ToMap() { |
1290 return reinterpret_cast<Map*>(value_); | 1293 return reinterpret_cast<Map*>(value_); |
1291 } | 1294 } |
1292 | 1295 |
1293 | 1296 |
1294 bool MapWord::IsForwardingAddress() { | 1297 bool MapWord::IsForwardingAddress() { |
(...skipping 17 matching lines...) Expand all Loading... |
1312 void HeapObject::VerifyObjectField(int offset) { | 1315 void HeapObject::VerifyObjectField(int offset) { |
1313 VerifyPointer(READ_FIELD(this, offset)); | 1316 VerifyPointer(READ_FIELD(this, offset)); |
1314 } | 1317 } |
1315 | 1318 |
1316 void HeapObject::VerifySmiField(int offset) { | 1319 void HeapObject::VerifySmiField(int offset) { |
1317 CHECK(READ_FIELD(this, offset)->IsSmi()); | 1320 CHECK(READ_FIELD(this, offset)->IsSmi()); |
1318 } | 1321 } |
1319 #endif | 1322 #endif |
1320 | 1323 |
1321 | 1324 |
1322 Heap* HeapObject::GetHeap() { | 1325 Heap* HeapObject::GetHeap() const { |
1323 Heap* heap = | 1326 Heap* heap = |
1324 MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap(); | 1327 MemoryChunk::FromAddress(reinterpret_cast<const byte*>(this))->heap(); |
1325 SLOW_ASSERT(heap != NULL); | 1328 SLOW_ASSERT(heap != NULL); |
1326 return heap; | 1329 return heap; |
1327 } | 1330 } |
1328 | 1331 |
1329 | 1332 |
1330 Isolate* HeapObject::GetIsolate() { | 1333 Isolate* HeapObject::GetIsolate() { |
1331 return GetHeap()->isolate(); | 1334 return GetHeap()->isolate(); |
1332 } | 1335 } |
1333 | 1336 |
1334 | 1337 |
1335 Map* HeapObject::map() { | 1338 Map* HeapObject::map() const { |
1336 #ifdef DEBUG | 1339 #ifdef DEBUG |
1337 // Clear mark potentially added by PathTracer. | 1340 // Clear mark potentially added by PathTracer. |
1338 uintptr_t raw_value = | 1341 uintptr_t raw_value = |
1339 map_word().ToRawValue() & ~static_cast<uintptr_t>(PathTracer::kMarkTag); | 1342 map_word().ToRawValue() & ~static_cast<uintptr_t>(PathTracer::kMarkTag); |
1340 return MapWord::FromRawValue(raw_value).ToMap(); | 1343 return MapWord::FromRawValue(raw_value).ToMap(); |
1341 #else | 1344 #else |
1342 return map_word().ToMap(); | 1345 return map_word().ToMap(); |
1343 #endif | 1346 #endif |
1344 } | 1347 } |
1345 | 1348 |
(...skipping 27 matching lines...) Expand all Loading... |
1373 synchronized_set_map_word(MapWord::FromMap(value)); | 1376 synchronized_set_map_word(MapWord::FromMap(value)); |
1374 } | 1377 } |
1375 | 1378 |
1376 | 1379 |
1377 // Unsafe accessor omitting write barrier. | 1380 // Unsafe accessor omitting write barrier. |
1378 void HeapObject::set_map_no_write_barrier(Map* value) { | 1381 void HeapObject::set_map_no_write_barrier(Map* value) { |
1379 set_map_word(MapWord::FromMap(value)); | 1382 set_map_word(MapWord::FromMap(value)); |
1380 } | 1383 } |
1381 | 1384 |
1382 | 1385 |
1383 MapWord HeapObject::map_word() { | 1386 MapWord HeapObject::map_word() const { |
1384 return MapWord( | 1387 return MapWord( |
1385 reinterpret_cast<uintptr_t>(NOBARRIER_READ_FIELD(this, kMapOffset))); | 1388 reinterpret_cast<uintptr_t>(NOBARRIER_READ_FIELD(this, kMapOffset))); |
1386 } | 1389 } |
1387 | 1390 |
1388 | 1391 |
1389 void HeapObject::set_map_word(MapWord map_word) { | 1392 void HeapObject::set_map_word(MapWord map_word) { |
1390 NOBARRIER_WRITE_FIELD( | 1393 NOBARRIER_WRITE_FIELD( |
1391 this, kMapOffset, reinterpret_cast<Object*>(map_word.value_)); | 1394 this, kMapOffset, reinterpret_cast<Object*>(map_word.value_)); |
1392 } | 1395 } |
1393 | 1396 |
1394 | 1397 |
1395 MapWord HeapObject::synchronized_map_word() { | 1398 MapWord HeapObject::synchronized_map_word() const { |
1396 return MapWord( | 1399 return MapWord( |
1397 reinterpret_cast<uintptr_t>(ACQUIRE_READ_FIELD(this, kMapOffset))); | 1400 reinterpret_cast<uintptr_t>(ACQUIRE_READ_FIELD(this, kMapOffset))); |
1398 } | 1401 } |
1399 | 1402 |
1400 | 1403 |
1401 void HeapObject::synchronized_set_map_word(MapWord map_word) { | 1404 void HeapObject::synchronized_set_map_word(MapWord map_word) { |
1402 RELEASE_WRITE_FIELD( | 1405 RELEASE_WRITE_FIELD( |
1403 this, kMapOffset, reinterpret_cast<Object*>(map_word.value_)); | 1406 this, kMapOffset, reinterpret_cast<Object*>(map_word.value_)); |
1404 } | 1407 } |
1405 | 1408 |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1469 Object* the_hole = GetHeap()->the_hole_value(); | 1472 Object* the_hole = GetHeap()->the_hole_value(); |
1470 Object** current = GetFirstElementAddress(); | 1473 Object** current = GetFirstElementAddress(); |
1471 for (int i = 0; i < length(); ++i) { | 1474 for (int i = 0; i < length(); ++i) { |
1472 Object* candidate = *current++; | 1475 Object* candidate = *current++; |
1473 if (!candidate->IsSmi() && candidate != the_hole) return false; | 1476 if (!candidate->IsSmi() && candidate != the_hole) return false; |
1474 } | 1477 } |
1475 return true; | 1478 return true; |
1476 } | 1479 } |
1477 | 1480 |
1478 | 1481 |
1479 FixedArrayBase* JSObject::elements() { | 1482 FixedArrayBase* JSObject::elements() const { |
1480 Object* array = READ_FIELD(this, kElementsOffset); | 1483 Object* array = READ_FIELD(this, kElementsOffset); |
1481 return static_cast<FixedArrayBase*>(array); | 1484 return static_cast<FixedArrayBase*>(array); |
1482 } | 1485 } |
1483 | 1486 |
1484 | 1487 |
1485 void JSObject::ValidateElements(Handle<JSObject> object) { | 1488 void JSObject::ValidateElements(Handle<JSObject> object) { |
1486 #ifdef ENABLE_SLOW_ASSERTS | 1489 #ifdef ENABLE_SLOW_ASSERTS |
1487 if (FLAG_enable_slow_asserts) { | 1490 if (FLAG_enable_slow_asserts) { |
1488 ElementsAccessor* accessor = object->GetElementsAccessor(); | 1491 ElementsAccessor* accessor = object->GetElementsAccessor(); |
1489 accessor->Validate(object); | 1492 accessor->Validate(object); |
(...skipping 320 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1810 byte Oddball::kind() { | 1813 byte Oddball::kind() { |
1811 return Smi::cast(READ_FIELD(this, kKindOffset))->value(); | 1814 return Smi::cast(READ_FIELD(this, kKindOffset))->value(); |
1812 } | 1815 } |
1813 | 1816 |
1814 | 1817 |
1815 void Oddball::set_kind(byte value) { | 1818 void Oddball::set_kind(byte value) { |
1816 WRITE_FIELD(this, kKindOffset, Smi::FromInt(value)); | 1819 WRITE_FIELD(this, kKindOffset, Smi::FromInt(value)); |
1817 } | 1820 } |
1818 | 1821 |
1819 | 1822 |
1820 Object* Cell::value() { | 1823 Object* Cell::value() const { |
1821 return READ_FIELD(this, kValueOffset); | 1824 return READ_FIELD(this, kValueOffset); |
1822 } | 1825 } |
1823 | 1826 |
1824 | 1827 |
1825 void Cell::set_value(Object* val, WriteBarrierMode ignored) { | 1828 void Cell::set_value(Object* val, WriteBarrierMode ignored) { |
1826 // The write barrier is not used for global property cells. | 1829 // The write barrier is not used for global property cells. |
1827 ASSERT(!val->IsPropertyCell() && !val->IsCell()); | 1830 ASSERT(!val->IsPropertyCell() && !val->IsCell()); |
1828 WRITE_FIELD(this, kValueOffset, val); | 1831 WRITE_FIELD(this, kValueOffset, val); |
1829 } | 1832 } |
1830 | 1833 |
1831 ACCESSORS(PropertyCell, dependent_code, DependentCode, kDependentCodeOffset) | 1834 ACCESSORS(PropertyCell, dependent_code, DependentCode, kDependentCodeOffset) |
1832 | 1835 |
1833 Object* PropertyCell::type_raw() { | 1836 Object* PropertyCell::type_raw() const { |
1834 return READ_FIELD(this, kTypeOffset); | 1837 return READ_FIELD(this, kTypeOffset); |
1835 } | 1838 } |
1836 | 1839 |
1837 | 1840 |
1838 void PropertyCell::set_type_raw(Object* val, WriteBarrierMode ignored) { | 1841 void PropertyCell::set_type_raw(Object* val, WriteBarrierMode ignored) { |
1839 WRITE_FIELD(this, kTypeOffset, val); | 1842 WRITE_FIELD(this, kTypeOffset, val); |
1840 } | 1843 } |
1841 | 1844 |
1842 | 1845 |
1843 int JSObject::GetHeaderSize() { | 1846 int JSObject::GetHeaderSize() { |
(...skipping 1794 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3638 } | 3641 } |
3639 | 3642 |
3640 | 3643 |
3641 void ExternalUint8ClampedArray::set(int index, uint8_t value) { | 3644 void ExternalUint8ClampedArray::set(int index, uint8_t value) { |
3642 ASSERT((index >= 0) && (index < this->length())); | 3645 ASSERT((index >= 0) && (index < this->length())); |
3643 uint8_t* ptr = external_uint8_clamped_pointer(); | 3646 uint8_t* ptr = external_uint8_clamped_pointer(); |
3644 ptr[index] = value; | 3647 ptr[index] = value; |
3645 } | 3648 } |
3646 | 3649 |
3647 | 3650 |
3648 void* ExternalArray::external_pointer() { | 3651 void* ExternalArray::external_pointer() const { |
3649 intptr_t ptr = READ_INTPTR_FIELD(this, kExternalPointerOffset); | 3652 intptr_t ptr = READ_INTPTR_FIELD(this, kExternalPointerOffset); |
3650 return reinterpret_cast<void*>(ptr); | 3653 return reinterpret_cast<void*>(ptr); |
3651 } | 3654 } |
3652 | 3655 |
3653 | 3656 |
3654 void ExternalArray::set_external_pointer(void* value, WriteBarrierMode mode) { | 3657 void ExternalArray::set_external_pointer(void* value, WriteBarrierMode mode) { |
3655 intptr_t ptr = reinterpret_cast<intptr_t>(value); | 3658 intptr_t ptr = reinterpret_cast<intptr_t>(value); |
3656 WRITE_INTPTR_FIELD(this, kExternalPointerOffset, ptr); | 3659 WRITE_INTPTR_FIELD(this, kExternalPointerOffset, ptr); |
3657 } | 3660 } |
3658 | 3661 |
(...skipping 1219 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4878 }; | 4881 }; |
4879 | 4882 |
4880 | 4883 |
4881 bool Code::IsWeakObjectInIC(Object* object) { | 4884 bool Code::IsWeakObjectInIC(Object* object) { |
4882 return object->IsMap() && Map::cast(object)->CanTransition() && | 4885 return object->IsMap() && Map::cast(object)->CanTransition() && |
4883 FLAG_collect_maps && | 4886 FLAG_collect_maps && |
4884 FLAG_weak_embedded_maps_in_ic; | 4887 FLAG_weak_embedded_maps_in_ic; |
4885 } | 4888 } |
4886 | 4889 |
4887 | 4890 |
4888 Object* Map::prototype() { | 4891 Object* Map::prototype() const { |
4889 return READ_FIELD(this, kPrototypeOffset); | 4892 return READ_FIELD(this, kPrototypeOffset); |
4890 } | 4893 } |
4891 | 4894 |
4892 | 4895 |
4893 void Map::set_prototype(Object* value, WriteBarrierMode mode) { | 4896 void Map::set_prototype(Object* value, WriteBarrierMode mode) { |
4894 ASSERT(value->IsNull() || value->IsJSReceiver()); | 4897 ASSERT(value->IsNull() || value->IsJSReceiver()); |
4895 WRITE_FIELD(this, kPrototypeOffset, value); | 4898 WRITE_FIELD(this, kPrototypeOffset, value); |
4896 CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, value, mode); | 4899 CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, value, mode); |
4897 } | 4900 } |
4898 | 4901 |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4954 return object; | 4957 return object; |
4955 } | 4958 } |
4956 } | 4959 } |
4957 | 4960 |
4958 | 4961 |
4959 bool Map::HasElementsTransition() { | 4962 bool Map::HasElementsTransition() { |
4960 return HasTransitionArray() && transitions()->HasElementsTransition(); | 4963 return HasTransitionArray() && transitions()->HasElementsTransition(); |
4961 } | 4964 } |
4962 | 4965 |
4963 | 4966 |
4964 bool Map::HasTransitionArray() { | 4967 bool Map::HasTransitionArray() const { |
4965 Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset); | 4968 Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset); |
4966 return object->IsTransitionArray(); | 4969 return object->IsTransitionArray(); |
4967 } | 4970 } |
4968 | 4971 |
4969 | 4972 |
4970 Map* Map::elements_transition_map() { | 4973 Map* Map::elements_transition_map() { |
4971 int index = transitions()->Search(GetHeap()->elements_transition_symbol()); | 4974 int index = transitions()->Search(GetHeap()->elements_transition_symbol()); |
4972 return transitions()->GetTarget(index); | 4975 return transitions()->GetTarget(index); |
4973 } | 4976 } |
4974 | 4977 |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5014 map->transitions()->SetPrototypeTransitions(*proto_transitions); | 5017 map->transitions()->SetPrototypeTransitions(*proto_transitions); |
5015 map->SetNumberOfProtoTransitions(old_number_of_transitions); | 5018 map->SetNumberOfProtoTransitions(old_number_of_transitions); |
5016 } | 5019 } |
5017 | 5020 |
5018 | 5021 |
5019 bool Map::HasPrototypeTransitions() { | 5022 bool Map::HasPrototypeTransitions() { |
5020 return HasTransitionArray() && transitions()->HasPrototypeTransitions(); | 5023 return HasTransitionArray() && transitions()->HasPrototypeTransitions(); |
5021 } | 5024 } |
5022 | 5025 |
5023 | 5026 |
5024 TransitionArray* Map::transitions() { | 5027 TransitionArray* Map::transitions() const { |
5025 ASSERT(HasTransitionArray()); | 5028 ASSERT(HasTransitionArray()); |
5026 Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset); | 5029 Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset); |
5027 return TransitionArray::cast(object); | 5030 return TransitionArray::cast(object); |
5028 } | 5031 } |
5029 | 5032 |
5030 | 5033 |
5031 void Map::set_transitions(TransitionArray* transition_array, | 5034 void Map::set_transitions(TransitionArray* transition_array, |
5032 WriteBarrierMode mode) { | 5035 WriteBarrierMode mode) { |
5033 // Transition arrays are not shared. When one is replaced, it should not | 5036 // Transition arrays are not shared. When one is replaced, it should not |
5034 // keep referenced objects alive, so we zap it. | 5037 // keep referenced objects alive, so we zap it. |
(...skipping 244 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5279 SMI_ACCESSORS(SharedFunctionInfo, opt_count_and_bailout_reason, | 5282 SMI_ACCESSORS(SharedFunctionInfo, opt_count_and_bailout_reason, |
5280 kOptCountAndBailoutReasonOffset) | 5283 kOptCountAndBailoutReasonOffset) |
5281 SMI_ACCESSORS(SharedFunctionInfo, counters, kCountersOffset) | 5284 SMI_ACCESSORS(SharedFunctionInfo, counters, kCountersOffset) |
5282 SMI_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset) | 5285 SMI_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset) |
5283 SMI_ACCESSORS(SharedFunctionInfo, profiler_ticks, kProfilerTicksOffset) | 5286 SMI_ACCESSORS(SharedFunctionInfo, profiler_ticks, kProfilerTicksOffset) |
5284 | 5287 |
5285 #else | 5288 #else |
5286 | 5289 |
5287 #define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \ | 5290 #define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \ |
5288 STATIC_ASSERT(holder::offset % kPointerSize == 0); \ | 5291 STATIC_ASSERT(holder::offset % kPointerSize == 0); \ |
5289 int holder::name() { \ | 5292 int holder::name() const { \ |
5290 int value = READ_INT_FIELD(this, offset); \ | 5293 int value = READ_INT_FIELD(this, offset); \ |
5291 ASSERT(kHeapObjectTag == 1); \ | 5294 ASSERT(kHeapObjectTag == 1); \ |
5292 ASSERT((value & kHeapObjectTag) == 0); \ | 5295 ASSERT((value & kHeapObjectTag) == 0); \ |
5293 return value >> 1; \ | 5296 return value >> 1; \ |
5294 } \ | 5297 } \ |
5295 void holder::set_##name(int value) { \ | 5298 void holder::set_##name(int value) { \ |
5296 ASSERT(kHeapObjectTag == 1); \ | 5299 ASSERT(kHeapObjectTag == 1); \ |
5297 ASSERT((value & 0xC0000000) == 0xC0000000 || \ | 5300 ASSERT((value & 0xC0000000) == 0xC0000000 || \ |
5298 (value & 0xC0000000) == 0x0); \ | 5301 (value & 0xC0000000) == 0x0); \ |
5299 WRITE_INT_FIELD(this, \ | 5302 WRITE_INT_FIELD(this, \ |
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5421 return start_position_and_type() >> kStartPositionShift; | 5424 return start_position_and_type() >> kStartPositionShift; |
5422 } | 5425 } |
5423 | 5426 |
5424 | 5427 |
5425 void SharedFunctionInfo::set_start_position(int start_position) { | 5428 void SharedFunctionInfo::set_start_position(int start_position) { |
5426 set_start_position_and_type((start_position << kStartPositionShift) | 5429 set_start_position_and_type((start_position << kStartPositionShift) |
5427 | (start_position_and_type() & ~kStartPositionMask)); | 5430 | (start_position_and_type() & ~kStartPositionMask)); |
5428 } | 5431 } |
5429 | 5432 |
5430 | 5433 |
5431 Code* SharedFunctionInfo::code() { | 5434 Code* SharedFunctionInfo::code() const { |
5432 return Code::cast(READ_FIELD(this, kCodeOffset)); | 5435 return Code::cast(READ_FIELD(this, kCodeOffset)); |
5433 } | 5436 } |
5434 | 5437 |
5435 | 5438 |
5436 void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) { | 5439 void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) { |
5437 ASSERT(value->kind() != Code::OPTIMIZED_FUNCTION); | 5440 ASSERT(value->kind() != Code::OPTIMIZED_FUNCTION); |
5438 WRITE_FIELD(this, kCodeOffset, value); | 5441 WRITE_FIELD(this, kCodeOffset, value); |
5439 CONDITIONAL_WRITE_BARRIER(value->GetHeap(), this, kCodeOffset, value, mode); | 5442 CONDITIONAL_WRITE_BARRIER(value->GetHeap(), this, kCodeOffset, value, mode); |
5440 } | 5443 } |
5441 | 5444 |
5442 | 5445 |
5443 void SharedFunctionInfo::ReplaceCode(Code* value) { | 5446 void SharedFunctionInfo::ReplaceCode(Code* value) { |
5444 // If the GC metadata field is already used then the function was | 5447 // If the GC metadata field is already used then the function was |
5445 // enqueued as a code flushing candidate and we remove it now. | 5448 // enqueued as a code flushing candidate and we remove it now. |
5446 if (code()->gc_metadata() != NULL) { | 5449 if (code()->gc_metadata() != NULL) { |
5447 CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher(); | 5450 CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher(); |
5448 flusher->EvictCandidate(this); | 5451 flusher->EvictCandidate(this); |
5449 } | 5452 } |
5450 | 5453 |
5451 ASSERT(code()->gc_metadata() == NULL && value->gc_metadata() == NULL); | 5454 ASSERT(code()->gc_metadata() == NULL && value->gc_metadata() == NULL); |
5452 | 5455 |
5453 set_code(value); | 5456 set_code(value); |
5454 } | 5457 } |
5455 | 5458 |
5456 | 5459 |
5457 ScopeInfo* SharedFunctionInfo::scope_info() { | 5460 ScopeInfo* SharedFunctionInfo::scope_info() const { |
5458 return reinterpret_cast<ScopeInfo*>(READ_FIELD(this, kScopeInfoOffset)); | 5461 return reinterpret_cast<ScopeInfo*>(READ_FIELD(this, kScopeInfoOffset)); |
5459 } | 5462 } |
5460 | 5463 |
5461 | 5464 |
5462 void SharedFunctionInfo::set_scope_info(ScopeInfo* value, | 5465 void SharedFunctionInfo::set_scope_info(ScopeInfo* value, |
5463 WriteBarrierMode mode) { | 5466 WriteBarrierMode mode) { |
5464 WRITE_FIELD(this, kScopeInfoOffset, reinterpret_cast<Object*>(value)); | 5467 WRITE_FIELD(this, kScopeInfoOffset, reinterpret_cast<Object*>(value)); |
5465 CONDITIONAL_WRITE_BARRIER(GetHeap(), | 5468 CONDITIONAL_WRITE_BARRIER(GetHeap(), |
5466 this, | 5469 this, |
5467 kScopeInfoOffset, | 5470 kScopeInfoOffset, |
(...skipping 351 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5819 } | 5822 } |
5820 } | 5823 } |
5821 | 5824 |
5822 | 5825 |
5823 ACCESSORS(JSSet, table, Object, kTableOffset) | 5826 ACCESSORS(JSSet, table, Object, kTableOffset) |
5824 ACCESSORS(JSMap, table, Object, kTableOffset) | 5827 ACCESSORS(JSMap, table, Object, kTableOffset) |
5825 | 5828 |
5826 | 5829 |
5827 #define ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(name, type, offset) \ | 5830 #define ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(name, type, offset) \ |
5828 template<class Derived, class TableType> \ | 5831 template<class Derived, class TableType> \ |
5829 type* OrderedHashTableIterator<Derived, TableType>::name() { \ | 5832 type* OrderedHashTableIterator<Derived, TableType>::name() const { \ |
5830 return type::cast(READ_FIELD(this, offset)); \ | 5833 return type::cast(READ_FIELD(this, offset)); \ |
5831 } \ | 5834 } \ |
5832 template<class Derived, class TableType> \ | 5835 template<class Derived, class TableType> \ |
5833 void OrderedHashTableIterator<Derived, TableType>::set_##name( \ | 5836 void OrderedHashTableIterator<Derived, TableType>::set_##name( \ |
5834 type* value, WriteBarrierMode mode) { \ | 5837 type* value, WriteBarrierMode mode) { \ |
5835 WRITE_FIELD(this, offset, value); \ | 5838 WRITE_FIELD(this, offset, value); \ |
5836 CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \ | 5839 CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \ |
5837 } | 5840 } |
5838 | 5841 |
5839 ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(table, Object, kTableOffset) | 5842 ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(table, Object, kTableOffset) |
(...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6035 | 6038 |
6036 | 6039 |
6037 bool Code::contains(byte* inner_pointer) { | 6040 bool Code::contains(byte* inner_pointer) { |
6038 return (address() <= inner_pointer) && (inner_pointer <= address() + Size()); | 6041 return (address() <= inner_pointer) && (inner_pointer <= address() + Size()); |
6039 } | 6042 } |
6040 | 6043 |
6041 | 6044 |
6042 ACCESSORS(JSArray, length, Object, kLengthOffset) | 6045 ACCESSORS(JSArray, length, Object, kLengthOffset) |
6043 | 6046 |
6044 | 6047 |
6045 void* JSArrayBuffer::backing_store() { | 6048 void* JSArrayBuffer::backing_store() const { |
6046 intptr_t ptr = READ_INTPTR_FIELD(this, kBackingStoreOffset); | 6049 intptr_t ptr = READ_INTPTR_FIELD(this, kBackingStoreOffset); |
6047 return reinterpret_cast<void*>(ptr); | 6050 return reinterpret_cast<void*>(ptr); |
6048 } | 6051 } |
6049 | 6052 |
6050 | 6053 |
6051 void JSArrayBuffer::set_backing_store(void* value, WriteBarrierMode mode) { | 6054 void JSArrayBuffer::set_backing_store(void* value, WriteBarrierMode mode) { |
6052 intptr_t ptr = reinterpret_cast<intptr_t>(value); | 6055 intptr_t ptr = reinterpret_cast<intptr_t>(value); |
6053 WRITE_INTPTR_FIELD(this, kBackingStoreOffset, ptr); | 6056 WRITE_INTPTR_FIELD(this, kBackingStoreOffset, ptr); |
6054 } | 6057 } |
6055 | 6058 |
(...skipping 909 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6965 #undef CAST_ACCESSOR | 6968 #undef CAST_ACCESSOR |
6966 #undef INT_ACCESSORS | 6969 #undef INT_ACCESSORS |
6967 #undef ACCESSORS | 6970 #undef ACCESSORS |
6968 #undef ACCESSORS_TO_SMI | 6971 #undef ACCESSORS_TO_SMI |
6969 #undef SMI_ACCESSORS | 6972 #undef SMI_ACCESSORS |
6970 #undef SYNCHRONIZED_SMI_ACCESSORS | 6973 #undef SYNCHRONIZED_SMI_ACCESSORS |
6971 #undef NOBARRIER_SMI_ACCESSORS | 6974 #undef NOBARRIER_SMI_ACCESSORS |
6972 #undef BOOL_GETTER | 6975 #undef BOOL_GETTER |
6973 #undef BOOL_ACCESSORS | 6976 #undef BOOL_ACCESSORS |
6974 #undef FIELD_ADDR | 6977 #undef FIELD_ADDR |
| 6978 #undef FIELD_ADDR_CONST |
6975 #undef READ_FIELD | 6979 #undef READ_FIELD |
6976 #undef NOBARRIER_READ_FIELD | 6980 #undef NOBARRIER_READ_FIELD |
6977 #undef WRITE_FIELD | 6981 #undef WRITE_FIELD |
6978 #undef NOBARRIER_WRITE_FIELD | 6982 #undef NOBARRIER_WRITE_FIELD |
6979 #undef WRITE_BARRIER | 6983 #undef WRITE_BARRIER |
6980 #undef CONDITIONAL_WRITE_BARRIER | 6984 #undef CONDITIONAL_WRITE_BARRIER |
6981 #undef READ_DOUBLE_FIELD | 6985 #undef READ_DOUBLE_FIELD |
6982 #undef WRITE_DOUBLE_FIELD | 6986 #undef WRITE_DOUBLE_FIELD |
6983 #undef READ_INT_FIELD | 6987 #undef READ_INT_FIELD |
6984 #undef WRITE_INT_FIELD | 6988 #undef WRITE_INT_FIELD |
6985 #undef READ_INTPTR_FIELD | 6989 #undef READ_INTPTR_FIELD |
6986 #undef WRITE_INTPTR_FIELD | 6990 #undef WRITE_INTPTR_FIELD |
6987 #undef READ_UINT32_FIELD | 6991 #undef READ_UINT32_FIELD |
6988 #undef WRITE_UINT32_FIELD | 6992 #undef WRITE_UINT32_FIELD |
6989 #undef READ_SHORT_FIELD | 6993 #undef READ_SHORT_FIELD |
6990 #undef WRITE_SHORT_FIELD | 6994 #undef WRITE_SHORT_FIELD |
6991 #undef READ_BYTE_FIELD | 6995 #undef READ_BYTE_FIELD |
6992 #undef WRITE_BYTE_FIELD | 6996 #undef WRITE_BYTE_FIELD |
6993 #undef NOBARRIER_READ_BYTE_FIELD | 6997 #undef NOBARRIER_READ_BYTE_FIELD |
6994 #undef NOBARRIER_WRITE_BYTE_FIELD | 6998 #undef NOBARRIER_WRITE_BYTE_FIELD |
6995 | 6999 |
6996 } } // namespace v8::internal | 7000 } } // namespace v8::internal |
6997 | 7001 |
6998 #endif // V8_OBJECTS_INL_H_ | 7002 #endif // V8_OBJECTS_INL_H_ |
OLD | NEW |