OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
118 // Getter that returns a Smi as an int and writes an int as a Smi. | 118 // Getter that returns a Smi as an int and writes an int as a Smi. |
119 #define SMI_ACCESSORS(holder, name, offset) \ | 119 #define SMI_ACCESSORS(holder, name, offset) \ |
120 int holder::name() { \ | 120 int holder::name() { \ |
121 Object* value = READ_FIELD(this, offset); \ | 121 Object* value = READ_FIELD(this, offset); \ |
122 return Smi::cast(value)->value(); \ | 122 return Smi::cast(value)->value(); \ |
123 } \ | 123 } \ |
124 void holder::set_##name(int value) { \ | 124 void holder::set_##name(int value) { \ |
125 WRITE_FIELD(this, offset, Smi::FromInt(value)); \ | 125 WRITE_FIELD(this, offset, Smi::FromInt(value)); \ |
126 } | 126 } |
127 | 127 |
| 128 #define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \ |
| 129 int holder::synchronized_##name() { \ |
| 130 Object* value = ACQUIRE_READ_FIELD(this, offset); \ |
| 131 return Smi::cast(value)->value(); \ |
| 132 } \ |
| 133 void holder::synchronized_set_##name(int value) { \ |
| 134 RELEASE_WRITE_FIELD(this, offset, Smi::FromInt(value)); \ |
| 135 } |
| 136 |
128 | 137 |
129 #define BOOL_GETTER(holder, field, name, offset) \ | 138 #define BOOL_GETTER(holder, field, name, offset) \ |
130 bool holder::name() { \ | 139 bool holder::name() { \ |
131 return BooleanBit::get(field(), offset); \ | 140 return BooleanBit::get(field(), offset); \ |
132 } \ | 141 } \ |
133 | 142 |
134 | 143 |
135 #define BOOL_ACCESSORS(holder, field, name, offset) \ | 144 #define BOOL_ACCESSORS(holder, field, name, offset) \ |
136 bool holder::name() { \ | 145 bool holder::name() { \ |
137 return BooleanBit::get(field(), offset); \ | 146 return BooleanBit::get(field(), offset); \ |
(...skipping 950 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1088 return GetPropertyWithReceiver(this, key, attributes); | 1097 return GetPropertyWithReceiver(this, key, attributes); |
1089 } | 1098 } |
1090 | 1099 |
1091 | 1100 |
1092 #define FIELD_ADDR(p, offset) \ | 1101 #define FIELD_ADDR(p, offset) \ |
1093 (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag) | 1102 (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag) |
1094 | 1103 |
1095 #define READ_FIELD(p, offset) \ | 1104 #define READ_FIELD(p, offset) \ |
1096 (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset))) | 1105 (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset))) |
1097 | 1106 |
| 1107 #define ACQUIRE_READ_FIELD(p, offset) \ |
| 1108 reinterpret_cast<Object*>( \ |
| 1109 Acquire_Load(reinterpret_cast<AtomicWord*>(FIELD_ADDR(p, offset)))) |
| 1110 |
| 1111 #define NO_BARRIER_READ_FIELD(p, offset) \ |
| 1112 reinterpret_cast<Object*>( \ |
| 1113 NoBarrier_Load(reinterpret_cast<AtomicWord*>(FIELD_ADDR(p, offset)))) |
| 1114 |
1098 #define WRITE_FIELD(p, offset, value) \ | 1115 #define WRITE_FIELD(p, offset, value) \ |
1099 (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value) | 1116 (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value) |
1100 | 1117 |
| 1118 #define RELEASE_WRITE_FIELD(p, offset, value) \ |
| 1119 Release_Store(reinterpret_cast<AtomicWord*>(FIELD_ADDR(p, offset)), \ |
| 1120 reinterpret_cast<AtomicWord>(value)); |
| 1121 |
| 1122 #define NO_BARRIER_WRITE_FIELD(p, offset, value) \ |
| 1123 NoBarrier_Store(reinterpret_cast<AtomicWord*>(FIELD_ADDR(p, offset)), \ |
| 1124 reinterpret_cast<AtomicWord>(value)); |
| 1125 |
1101 #define WRITE_BARRIER(heap, object, offset, value) \ | 1126 #define WRITE_BARRIER(heap, object, offset, value) \ |
1102 heap->incremental_marking()->RecordWrite( \ | 1127 heap->incremental_marking()->RecordWrite( \ |
1103 object, HeapObject::RawField(object, offset), value); \ | 1128 object, HeapObject::RawField(object, offset), value); \ |
1104 if (heap->InNewSpace(value)) { \ | 1129 if (heap->InNewSpace(value)) { \ |
1105 heap->RecordWrite(object->address(), offset); \ | 1130 heap->RecordWrite(object->address(), offset); \ |
1106 } | 1131 } |
1107 | 1132 |
1108 #define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \ | 1133 #define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \ |
1109 if (mode == UPDATE_WRITE_BARRIER) { \ | 1134 if (mode == UPDATE_WRITE_BARRIER) { \ |
1110 heap->incremental_marking()->RecordWrite( \ | 1135 heap->incremental_marking()->RecordWrite( \ |
(...skipping 230 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1341 void HeapObject::set_map(Map* value) { | 1366 void HeapObject::set_map(Map* value) { |
1342 set_map_word(MapWord::FromMap(value)); | 1367 set_map_word(MapWord::FromMap(value)); |
1343 if (value != NULL) { | 1368 if (value != NULL) { |
1344 // TODO(1600) We are passing NULL as a slot because maps can never be on | 1369 // TODO(1600) We are passing NULL as a slot because maps can never be on |
1345 // evacuation candidate. | 1370 // evacuation candidate. |
1346 value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value); | 1371 value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value); |
1347 } | 1372 } |
1348 } | 1373 } |
1349 | 1374 |
1350 | 1375 |
| 1376 Map* HeapObject::synchronized_map() { |
| 1377 return synchronized_map_word().ToMap(); |
| 1378 } |
| 1379 |
| 1380 |
| 1381 void HeapObject::synchronized_set_map(Map* value) { |
| 1382 synchronized_set_map_word(MapWord::FromMap(value)); |
| 1383 if (value != NULL) { |
| 1384 // TODO(1600) We are passing NULL as a slot because maps can never be on |
| 1385 // evacuation candidate. |
| 1386 value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value); |
| 1387 } |
| 1388 } |
| 1389 |
| 1390 |
| 1391 Map* HeapObject::no_barrier_map() { |
| 1392 return no_barrier_map_word().ToMap(); |
| 1393 } |
| 1394 |
| 1395 |
| 1396 void HeapObject::no_barrier_set_map(Map* value) { |
| 1397 no_barrier_set_map_word(MapWord::FromMap(value)); |
| 1398 if (value != NULL) { |
| 1399 // TODO(1600) We are passing NULL as a slot because maps can never be on |
| 1400 // evacuation candidate. |
| 1401 value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value); |
| 1402 } |
| 1403 } |
| 1404 |
| 1405 |
1351 // Unsafe accessor omitting write barrier. | 1406 // Unsafe accessor omitting write barrier. |
1352 void HeapObject::set_map_no_write_barrier(Map* value) { | 1407 void HeapObject::set_map_no_write_barrier(Map* value) { |
1353 set_map_word(MapWord::FromMap(value)); | 1408 set_map_word(MapWord::FromMap(value)); |
1354 } | 1409 } |
1355 | 1410 |
1356 | 1411 |
1357 MapWord HeapObject::map_word() { | 1412 MapWord HeapObject::map_word() { |
1358 return MapWord(reinterpret_cast<uintptr_t>(READ_FIELD(this, kMapOffset))); | 1413 return MapWord(reinterpret_cast<uintptr_t>(READ_FIELD(this, kMapOffset))); |
1359 } | 1414 } |
1360 | 1415 |
1361 | 1416 |
1362 void HeapObject::set_map_word(MapWord map_word) { | 1417 void HeapObject::set_map_word(MapWord map_word) { |
1363 // WRITE_FIELD does not invoke write barrier, but there is no need | 1418 // WRITE_FIELD does not invoke write barrier, but there is no need |
1364 // here. | 1419 // here. |
1365 WRITE_FIELD(this, kMapOffset, reinterpret_cast<Object*>(map_word.value_)); | 1420 WRITE_FIELD(this, kMapOffset, reinterpret_cast<Object*>(map_word.value_)); |
1366 } | 1421 } |
1367 | 1422 |
1368 | 1423 |
| 1424 MapWord HeapObject::synchronized_map_word() { |
| 1425 return MapWord( |
| 1426 reinterpret_cast<uintptr_t>(ACQUIRE_READ_FIELD(this, kMapOffset))); |
| 1427 } |
| 1428 |
| 1429 |
| 1430 void HeapObject::synchronized_set_map_word(MapWord map_word) { |
| 1431 // RELEASE_WRITE_FIELD does not invoke write barrier, but there is no need |
| 1432 // here. |
| 1433 RELEASE_WRITE_FIELD( |
| 1434 this, kMapOffset, reinterpret_cast<Object*>(map_word.value_)); |
| 1435 } |
| 1436 |
| 1437 |
| 1438 MapWord HeapObject::no_barrier_map_word() { |
| 1439 return MapWord( |
| 1440 reinterpret_cast<uintptr_t>(NO_BARRIER_READ_FIELD(this, kMapOffset))); |
| 1441 } |
| 1442 |
| 1443 |
| 1444 void HeapObject::no_barrier_set_map_word(MapWord map_word) { |
| 1445 // RELEASE_WRITE_FIELD does not invoke write barrier, but there is no need |
| 1446 // here. |
| 1447 NO_BARRIER_WRITE_FIELD( |
| 1448 this, kMapOffset, reinterpret_cast<Object*>(map_word.value_)); |
| 1449 } |
| 1450 |
| 1451 |
1369 HeapObject* HeapObject::FromAddress(Address address) { | 1452 HeapObject* HeapObject::FromAddress(Address address) { |
1370 ASSERT_TAG_ALIGNED(address); | 1453 ASSERT_TAG_ALIGNED(address); |
1371 return reinterpret_cast<HeapObject*>(address + kHeapObjectTag); | 1454 return reinterpret_cast<HeapObject*>(address + kHeapObjectTag); |
1372 } | 1455 } |
1373 | 1456 |
1374 | 1457 |
1375 Address HeapObject::address() { | 1458 Address HeapObject::address() { |
1376 return reinterpret_cast<Address>(this) - kHeapObjectTag; | 1459 return reinterpret_cast<Address>(this) - kHeapObjectTag; |
1377 } | 1460 } |
1378 | 1461 |
(...skipping 1528 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2907 | 2990 |
2908 | 2991 |
2909 template <typename Shape, typename Key> | 2992 template <typename Shape, typename Key> |
2910 HashTable<Shape, Key>* HashTable<Shape, Key>::cast(Object* obj) { | 2993 HashTable<Shape, Key>* HashTable<Shape, Key>::cast(Object* obj) { |
2911 ASSERT(obj->IsHashTable()); | 2994 ASSERT(obj->IsHashTable()); |
2912 return reinterpret_cast<HashTable*>(obj); | 2995 return reinterpret_cast<HashTable*>(obj); |
2913 } | 2996 } |
2914 | 2997 |
2915 | 2998 |
2916 SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset) | 2999 SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset) |
| 3000 SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset) |
| 3001 |
2917 SMI_ACCESSORS(FreeSpace, size, kSizeOffset) | 3002 SMI_ACCESSORS(FreeSpace, size, kSizeOffset) |
2918 | 3003 |
2919 SMI_ACCESSORS(String, length, kLengthOffset) | 3004 SMI_ACCESSORS(String, length, kLengthOffset) |
| 3005 SYNCHRONIZED_SMI_ACCESSORS(String, length, kLengthOffset) |
2920 | 3006 |
2921 | 3007 |
2922 uint32_t Name::hash_field() { | 3008 uint32_t Name::hash_field() { |
2923 return READ_UINT32_FIELD(this, kHashFieldOffset); | 3009 return READ_UINT32_FIELD(this, kHashFieldOffset); |
2924 } | 3010 } |
2925 | 3011 |
2926 | 3012 |
2927 void Name::set_hash_field(uint32_t value) { | 3013 void Name::set_hash_field(uint32_t value) { |
2928 WRITE_UINT32_FIELD(this, kHashFieldOffset, value); | 3014 WRITE_UINT32_FIELD(this, kHashFieldOffset, value); |
2929 #if V8_HOST_ARCH_64_BIT | 3015 #if V8_HOST_ARCH_64_BIT |
(...skipping 3859 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6789 #undef READ_UINT32_FIELD | 6875 #undef READ_UINT32_FIELD |
6790 #undef WRITE_UINT32_FIELD | 6876 #undef WRITE_UINT32_FIELD |
6791 #undef READ_SHORT_FIELD | 6877 #undef READ_SHORT_FIELD |
6792 #undef WRITE_SHORT_FIELD | 6878 #undef WRITE_SHORT_FIELD |
6793 #undef READ_BYTE_FIELD | 6879 #undef READ_BYTE_FIELD |
6794 #undef WRITE_BYTE_FIELD | 6880 #undef WRITE_BYTE_FIELD |
6795 | 6881 |
6796 } } // namespace v8::internal | 6882 } } // namespace v8::internal |
6797 | 6883 |
6798 #endif // V8_OBJECTS_INL_H_ | 6884 #endif // V8_OBJECTS_INL_H_ |
OLD | NEW |