Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(48)

Side by Side Diff: src/heap.cc

Issue 2895008: Virtually dispatched specialized scavengers. (Closed)
Patch Set: Created 10 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap.h ('k') | src/objects.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2009 the V8 project authors. All rights reserved. 1 // Copyright 2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 781 matching lines...) Expand 10 before | Expand all | Expand 10 after
792 private: 792 private:
793 void ScavengePointer(Object** p) { 793 void ScavengePointer(Object** p) {
794 Object* object = *p; 794 Object* object = *p;
795 if (!Heap::InNewSpace(object)) return; 795 if (!Heap::InNewSpace(object)) return;
796 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p), 796 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
797 reinterpret_cast<HeapObject*>(object)); 797 reinterpret_cast<HeapObject*>(object));
798 } 798 }
799 }; 799 };
800 800
801 801
802 // A queue of pointers and maps of to-be-promoted objects during a 802 // A queue of pointers and maps of to-be-promoted objects during a
Mads Ager (chromium) 2010/07/12 11:32:13 This comment needs updating. The maps have been re
803 // scavenge collection. 803 // scavenge collection.
804 class PromotionQueue { 804 class PromotionQueue {
805 public: 805 public:
806 void Initialize(Address start_address) { 806 void Initialize(Address start_address) {
807 front_ = rear_ = reinterpret_cast<HeapObject**>(start_address); 807 front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
808 } 808 }
809 809
810 bool is_empty() { return front_ <= rear_; } 810 bool is_empty() { return front_ <= rear_; }
811 811
812 void insert(HeapObject* object, Map* map) { 812 void insert(HeapObject* target, int size) {
813 *(--rear_) = object; 813 *(--rear_) = reinterpret_cast<intptr_t>(target);
814 *(--rear_) = map; 814 *(--rear_) = size;
815 // Assert no overflow into live objects. 815 // Assert no overflow into live objects.
816 ASSERT(reinterpret_cast<Address>(rear_) >= Heap::new_space()->top()); 816 ASSERT(reinterpret_cast<Address>(rear_) >= Heap::new_space()->top());
817 } 817 }
818 818
819 void remove(HeapObject** object, Map** map) { 819 void remove(HeapObject** target, int* size) {
820 *object = *(--front_); 820 *target = reinterpret_cast<HeapObject*>(*(--front_));
821 *map = Map::cast(*(--front_)); 821 *size = *(--front_);
822 // Assert no underflow. 822 // Assert no underflow.
823 ASSERT(front_ >= rear_); 823 ASSERT(front_ >= rear_);
824 } 824 }
825 825
826 private: 826 private:
827 // The front of the queue is higher in memory than the rear. 827 // The front of the queue is higher in memory than the rear.
828 HeapObject** front_; 828 intptr_t* front_;
829 HeapObject** rear_; 829 intptr_t* rear_;
830 }; 830 };
831 831
832 832
833 // Shared state read by the scavenge collector and set by ScavengeObject. 833 // Shared state read by the scavenge collector and set by ScavengeObject.
834 static PromotionQueue promotion_queue; 834 static PromotionQueue promotion_queue;
835 835
836 836
837 #ifdef DEBUG 837 #ifdef DEBUG
838 // Visitor class to verify pointers in code or data space do not point into 838 // Visitor class to verify pointers in code or data space do not point into
839 // new space. 839 // new space.
(...skipping 194 matching lines...) Expand 10 before | Expand all | Expand 10 after
1034 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor, 1034 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1035 Address new_space_front) { 1035 Address new_space_front) {
1036 do { 1036 do {
1037 ASSERT(new_space_front <= new_space_.top()); 1037 ASSERT(new_space_front <= new_space_.top());
1038 1038
1039 // The addresses new_space_front and new_space_.top() define a 1039 // The addresses new_space_front and new_space_.top() define a
1040 // queue of unprocessed copied objects. Process them until the 1040 // queue of unprocessed copied objects. Process them until the
1041 // queue is empty. 1041 // queue is empty.
1042 while (new_space_front < new_space_.top()) { 1042 while (new_space_front < new_space_.top()) {
1043 HeapObject* object = HeapObject::FromAddress(new_space_front); 1043 HeapObject* object = HeapObject::FromAddress(new_space_front);
1044 object->Iterate(scavenge_visitor); 1044 Map* map = object->map();
1045 new_space_front += object->Size(); 1045 int size = object->SizeFromMap(map);
1046 object->IterateBody(map->instance_type(), size, scavenge_visitor);
1047 new_space_front += size;
1046 } 1048 }
1047 1049
1048 // Promote and process all the to-be-promoted objects. 1050 // Promote and process all the to-be-promoted objects.
1049 while (!promotion_queue.is_empty()) { 1051 while (!promotion_queue.is_empty()) {
1050 HeapObject* source; 1052 HeapObject* target;
1051 Map* map; 1053 int size;
1052 promotion_queue.remove(&source, &map); 1054 promotion_queue.remove(&target, &size);
1053 // Copy the from-space object to its new location (given by the
1054 // forwarding address) and fix its map.
1055 HeapObject* target = source->map_word().ToForwardingAddress();
1056 int size = source->SizeFromMap(map);
1057 CopyBlock(target->address(), source->address(), size);
1058 target->set_map(map);
1059 1055
1060 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 1056 // Promoted object might be already partially visited
1061 // Update NewSpace stats if necessary. 1057 // during dirty regions iteration. Thus we search specificly
1062 RecordCopiedObject(target); 1058 // for pointers to from semispace instead of looking for pointers
1063 #endif 1059 // to new space.
1064 // Visit the newly copied object for pointers to new space.
1065 ASSERT(!target->IsMap()); 1060 ASSERT(!target->IsMap());
1066 IterateAndMarkPointersToNewSpace(target->address(), 1061 IterateAndMarkPointersToFromSpace(target->address(),
1067 target->address() + size, 1062 target->address() + size,
1068 &ScavengePointer); 1063 &ScavengePointer);
1069 } 1064 }
1070 1065
1071 // Take another spin if there are now unswept objects in new space 1066 // Take another spin if there are now unswept objects in new space
1072 // (there are currently no more unswept promoted objects). 1067 // (there are currently no more unswept promoted objects).
1073 } while (new_space_front < new_space_.top()); 1068 } while (new_space_front < new_space_.top());
1074 1069
1075 return new_space_front; 1070 return new_space_front;
1076 } 1071 }
1077 1072
1078 1073
1079 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 1074 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1080 void Heap::RecordCopiedObject(HeapObject* obj) { 1075 static void RecordCopiedObject(HeapObject* obj) {
1081 bool should_record = false; 1076 bool should_record = false;
1082 #ifdef DEBUG 1077 #ifdef DEBUG
1083 should_record = FLAG_heap_stats; 1078 should_record = FLAG_heap_stats;
1084 #endif 1079 #endif
1085 #ifdef ENABLE_LOGGING_AND_PROFILING 1080 #ifdef ENABLE_LOGGING_AND_PROFILING
1086 should_record = should_record || FLAG_log_gc; 1081 should_record = should_record || FLAG_log_gc;
1087 #endif 1082 #endif
1088 if (should_record) { 1083 if (should_record) {
1089 if (new_space_.Contains(obj)) { 1084 if (Heap::new_space()->Contains(obj)) {
1090 new_space_.RecordAllocation(obj); 1085 Heap::new_space()->RecordAllocation(obj);
1091 } else { 1086 } else {
1092 new_space_.RecordPromotion(obj); 1087 Heap::new_space()->RecordPromotion(obj);
1093 } 1088 }
1094 } 1089 }
1095 } 1090 }
1096 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 1091 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1097 1092
1098 1093
1099 1094 // Helper function used by CopyObject to copy a source object to an
1100 HeapObject* Heap::MigrateObject(HeapObject* source, 1095 // allocated target object and update the forwarding pointer in the source
1101 HeapObject* target, 1096 // object. Returns the target object.
1102 int size) { 1097 inline static HeapObject* MigrateObject(HeapObject* source,
1098 HeapObject* target,
1099 int size) {
1103 // Copy the content of source to target. 1100 // Copy the content of source to target.
1104 CopyBlock(target->address(), source->address(), size); 1101 Heap::CopyBlock(target->address(), source->address(), size);
1105 1102
1106 // Set the forwarding address. 1103 // Set the forwarding address.
1107 source->set_map_word(MapWord::FromForwardingAddress(target)); 1104 source->set_map_word(MapWord::FromForwardingAddress(target));
1108 1105
1109 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 1106 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1110 // Update NewSpace stats if necessary. 1107 // Update NewSpace stats if necessary.
1111 RecordCopiedObject(target); 1108 RecordCopiedObject(target);
1112 #endif 1109 #endif
1113 1110
1114 return target; 1111 return target;
1115 } 1112 }
1116 1113
1117 1114
1118 static inline bool IsShortcutCandidate(HeapObject* object, Map* map) { 1115 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1119 STATIC_ASSERT(kNotStringTag != 0 && kSymbolTag != 0); 1116 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1120 ASSERT(object->map() == map); 1117
1121 InstanceType type = map->instance_type(); 1118
1122 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return false; 1119 template<ObjectContents object_contents, SizeRestriction size_restriction>
1123 ASSERT(object->IsString() && !object->IsSymbol()); 1120 static inline void EvacuateObject(Map* map,
1124 return ConsString::cast(object)->unchecked_second() == Heap::empty_string(); 1121 HeapObject** slot,
1122 HeapObject* object,
1123 int object_size) {
1124 ASSERT((size_restriction != SMALL) ||
1125 (object_size <= Page::kMaxHeapObjectSize));
1126 ASSERT(object->Size() == object_size);
1127
1128 if (Heap::ShouldBePromoted(object->address(), object_size)) {
1129 Object* result;
1130
1131 if ((size_restriction != SMALL) &&
1132 (object_size > Page::kMaxHeapObjectSize)) {
1133 result = Heap::lo_space()->AllocateRawFixedArray(object_size);
1134 } else {
1135 if (object_contents == DATA_OBJECT) {
1136 result = Heap::old_data_space()->AllocateRaw(object_size);
1137 } else {
1138 result = Heap::old_pointer_space()->AllocateRaw(object_size);
1139 }
1140 }
1141
1142 if (!result->IsFailure()) {
1143 HeapObject* target = HeapObject::cast(result);
1144 *slot = MigrateObject(object, target, object_size);
1145
1146 if (object_contents == POINTER_OBJECT) {
1147 promotion_queue.insert(target, object_size);
1148 }
1149
1150 Heap::tracer()->increment_promoted_objects_size(object_size);
1151 return;
1152 }
1153 }
1154 Object* result = Heap::new_space()->AllocateRaw(object_size);
1155 ASSERT(!result->IsFailure());
1156 *slot = MigrateObject(object, HeapObject::cast(result), object_size);
1157 return;
1158 }
1159
1160
1161 template<int object_size_in_words, ObjectContents object_contents>
1162 static inline void EvacuateObjectOfFixedSize(Map* map,
1163 HeapObject** slot,
1164 HeapObject* object) {
1165 const int object_size = object_size_in_words << kPointerSizeLog2;
1166 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1167 }
1168
1169
1170 template<ObjectContents object_contents>
1171 static inline void EvacuateObjectOfFixedSize(Map* map,
1172 HeapObject** slot,
1173 HeapObject* object) {
1174 int object_size = map->instance_size();
1175 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1176 }
1177
1178
1179 static inline void EvacuateFixedArray(Map* map,
1180 HeapObject** slot,
1181 HeapObject* object) {
1182 int object_size = FixedArray::cast(object)->FixedArraySize();
1183 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1184 }
1185
1186
1187 static inline void EvacuateByteArray(Map* map,
1188 HeapObject** slot,
1189 HeapObject* object) {
1190 int object_size = ByteArray::cast(object)->ByteArraySize();
1191 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1192 }
1193
1194
1195 static Scavenger GetScavengerForSize(int object_size,
1196 ObjectContents object_contents) {
1197 ASSERT(IsAligned(object_size, kPointerSize));
1198 ASSERT(object_size < Page::kMaxHeapObjectSize);
1199
1200 switch (object_size >> kPointerSizeLog2) {
1201 #define CASE(n) \
1202 case n: \
1203 if (object_contents == DATA_OBJECT) { \
1204 return static_cast<Scavenger>( \
1205 &EvacuateObjectOfFixedSize<n, DATA_OBJECT>); \
1206 } else { \
1207 return static_cast<Scavenger>( \
1208 &EvacuateObjectOfFixedSize<n, POINTER_OBJECT>); \
1209 }
1210
1211 CASE(1);
1212 CASE(2);
1213 CASE(3);
1214 CASE(4);
1215 CASE(5);
1216 CASE(6);
1217 CASE(7);
1218 CASE(8);
1219 CASE(9);
1220 CASE(10);
1221 CASE(11);
1222 CASE(12);
1223 CASE(13);
1224 CASE(14);
1225 CASE(15);
1226 CASE(16);
1227 default:
1228 if (object_contents == DATA_OBJECT) {
1229 return static_cast<Scavenger>(&EvacuateObjectOfFixedSize<DATA_OBJECT>);
1230 } else {
1231 return static_cast<Scavenger>(
1232 &EvacuateObjectOfFixedSize<POINTER_OBJECT>);
1233 }
1234
1235 #undef CASE
1236 }
1237 }
1238
1239
1240 static inline void EvacuateSeqAsciiString(Map* map,
1241 HeapObject** slot,
1242 HeapObject* object) {
1243 int object_size = SeqAsciiString::cast(object)->
1244 SeqAsciiStringSize(map->instance_type());
1245 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1246 }
1247
1248
1249 static inline void EvacuateSeqTwoByteString(Map* map,
1250 HeapObject** slot,
1251 HeapObject* object) {
1252 int object_size = SeqTwoByteString::cast(object)->
1253 SeqTwoByteStringSize(map->instance_type());
1254 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1255 }
1256
1257
1258 static inline bool IsShortcutCandidate(int type) {
1259 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1260 }
1261
1262
1263 static inline void EvacuateShortcutCandidate(Map* map,
1264 HeapObject** slot,
1265 HeapObject* object) {
1266 ASSERT(IsShortcutCandidate(map->instance_type()));
1267
1268 if (ConsString::cast(object)->unchecked_second() == Heap::empty_string()) {
1269 HeapObject* first =
1270 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1271
1272 *slot = first;
1273
1274 if (!Heap::InNewSpace(first)) {
1275 object->set_map_word(MapWord::FromForwardingAddress(first));
1276 return;
1277 }
1278
1279 MapWord first_word = first->map_word();
1280 if (first_word.IsForwardingAddress()) {
1281 HeapObject* target = first_word.ToForwardingAddress();
1282
1283 *slot = target;
1284 object->set_map_word(MapWord::FromForwardingAddress(target));
1285 return;
1286 }
1287
1288 first->map()->Scavenge(slot, first);
1289 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1290 return;
1291 }
1292
1293 int object_size = ConsString::kSize;
1294 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
1295 }
1296
1297
1298 Scavenger Heap::GetScavenger(int instance_type, int instance_size) {
1299 if (instance_type < FIRST_NONSTRING_TYPE) {
1300 switch (instance_type & kStringRepresentationMask) {
1301 case kSeqStringTag:
1302 if ((instance_type & kStringEncodingMask) == kAsciiStringTag) {
1303 return &EvacuateSeqAsciiString;
1304 } else {
1305 return &EvacuateSeqTwoByteString;
1306 }
1307
1308 case kConsStringTag:
1309 if (IsShortcutCandidate(instance_type)) {
1310 return &EvacuateShortcutCandidate;
1311 } else {
1312 ASSERT(instance_size == ConsString::kSize);
1313 return GetScavengerForSize(ConsString::kSize, POINTER_OBJECT);
1314 }
1315
1316 case kExternalStringTag:
1317 ASSERT(instance_size == ExternalString::kSize);
1318 return GetScavengerForSize(ExternalString::kSize, DATA_OBJECT);
1319 }
1320 UNREACHABLE();
1321 }
1322
1323 switch (instance_type) {
1324 case BYTE_ARRAY_TYPE:
1325 return reinterpret_cast<Scavenger>(&EvacuateByteArray);
1326
1327 case FIXED_ARRAY_TYPE:
1328 return reinterpret_cast<Scavenger>(&EvacuateFixedArray);
1329
1330 case JS_OBJECT_TYPE:
1331 case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
1332 case JS_VALUE_TYPE:
1333 case JS_ARRAY_TYPE:
1334 case JS_REGEXP_TYPE:
1335 case JS_FUNCTION_TYPE:
1336 case JS_GLOBAL_PROXY_TYPE:
1337 case JS_GLOBAL_OBJECT_TYPE:
1338 case JS_BUILTINS_OBJECT_TYPE:
1339 return GetScavengerForSize(instance_size, POINTER_OBJECT);
1340
1341 case ODDBALL_TYPE:
1342 return NULL;
1343
1344 case PROXY_TYPE:
1345 return GetScavengerForSize(Proxy::kSize, DATA_OBJECT);
1346
1347 case MAP_TYPE:
1348 return NULL;
1349
1350 case CODE_TYPE:
1351 return NULL;
1352
1353 case JS_GLOBAL_PROPERTY_CELL_TYPE:
1354 return NULL;
1355
1356 case HEAP_NUMBER_TYPE:
1357 case FILLER_TYPE:
1358 case PIXEL_ARRAY_TYPE:
1359 case EXTERNAL_BYTE_ARRAY_TYPE:
1360 case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
1361 case EXTERNAL_SHORT_ARRAY_TYPE:
1362 case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
1363 case EXTERNAL_INT_ARRAY_TYPE:
1364 case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
1365 case EXTERNAL_FLOAT_ARRAY_TYPE:
1366 return GetScavengerForSize(instance_size, DATA_OBJECT);
1367
1368 case SHARED_FUNCTION_INFO_TYPE:
1369 return GetScavengerForSize(SharedFunctionInfo::kSize, POINTER_OBJECT);
1370
1371 #define MAKE_STRUCT_CASE(NAME, Name, name) \
1372 case NAME##_TYPE:
1373 STRUCT_LIST(MAKE_STRUCT_CASE)
1374 #undef MAKE_STRUCT_CASE
1375 return GetScavengerForSize(instance_size, POINTER_OBJECT);
1376 default:
1377 UNREACHABLE();
1378 return NULL;
1379 }
1125 } 1380 }
1126 1381
1127 1382
1128 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) { 1383 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
1129 ASSERT(InFromSpace(object)); 1384 ASSERT(InFromSpace(object));
1130 MapWord first_word = object->map_word(); 1385 MapWord first_word = object->map_word();
1131 ASSERT(!first_word.IsForwardingAddress()); 1386 ASSERT(!first_word.IsForwardingAddress());
1132 1387 Map* map = first_word.ToMap();
1133 // Optimization: Bypass flattened ConsString objects. 1388 map->Scavenge(p, object);
1134 if (IsShortcutCandidate(object, first_word.ToMap())) { 1389 }
1135 object = HeapObject::cast(ConsString::cast(object)->unchecked_first()); 1390
1136 *p = object; 1391
1137 // After patching *p we have to repeat the checks that object is in the
1138 // active semispace of the young generation and not already copied.
1139 if (!InNewSpace(object)) return;
1140 first_word = object->map_word();
1141 if (first_word.IsForwardingAddress()) {
1142 *p = first_word.ToForwardingAddress();
1143 return;
1144 }
1145 }
1146
1147 int object_size = object->SizeFromMap(first_word.ToMap());
1148 // We rely on live objects in new space to be at least two pointers,
1149 // so we can store the from-space address and map pointer of promoted
1150 // objects in the to space.
1151 ASSERT(object_size >= 2 * kPointerSize);
1152
1153 // If the object should be promoted, we try to copy it to old space.
1154 if (ShouldBePromoted(object->address(), object_size)) {
1155 Object* result;
1156 if (object_size > MaxObjectSizeInPagedSpace()) {
1157 result = lo_space_->AllocateRawFixedArray(object_size);
1158 if (!result->IsFailure()) {
1159 HeapObject* target = HeapObject::cast(result);
1160
1161 if (object->IsFixedArray()) {
1162 // Save the from-space object pointer and its map pointer at the
1163 // top of the to space to be swept and copied later. Write the
1164 // forwarding address over the map word of the from-space
1165 // object.
1166 promotion_queue.insert(object, first_word.ToMap());
1167 object->set_map_word(MapWord::FromForwardingAddress(target));
1168
1169 // Give the space allocated for the result a proper map by
1170 // treating it as a free list node (not linked into the free
1171 // list).
1172 FreeListNode* node = FreeListNode::FromAddress(target->address());
1173 node->set_size(object_size);
1174
1175 *p = target;
1176 } else {
1177 // In large object space only fixed arrays might possibly contain
1178 // intergenerational references.
1179 // All other objects can be copied immediately and not revisited.
1180 *p = MigrateObject(object, target, object_size);
1181 }
1182
1183 tracer()->increment_promoted_objects_size(object_size);
1184 return;
1185 }
1186 } else {
1187 OldSpace* target_space = Heap::TargetSpace(object);
1188 ASSERT(target_space == Heap::old_pointer_space_ ||
1189 target_space == Heap::old_data_space_);
1190 result = target_space->AllocateRaw(object_size);
1191 if (!result->IsFailure()) {
1192 HeapObject* target = HeapObject::cast(result);
1193 if (target_space == Heap::old_pointer_space_) {
1194 // Save the from-space object pointer and its map pointer at the
1195 // top of the to space to be swept and copied later. Write the
1196 // forwarding address over the map word of the from-space
1197 // object.
1198 promotion_queue.insert(object, first_word.ToMap());
1199 object->set_map_word(MapWord::FromForwardingAddress(target));
1200
1201 // Give the space allocated for the result a proper map by
1202 // treating it as a free list node (not linked into the free
1203 // list).
1204 FreeListNode* node = FreeListNode::FromAddress(target->address());
1205 node->set_size(object_size);
1206
1207 *p = target;
1208 } else {
1209 // Objects promoted to the data space can be copied immediately
1210 // and not revisited---we will never sweep that space for
1211 // pointers and the copied objects do not contain pointers to
1212 // new space objects.
1213 *p = MigrateObject(object, target, object_size);
1214 #ifdef DEBUG
1215 VerifyNonPointerSpacePointersVisitor v;
1216 (*p)->Iterate(&v);
1217 #endif
1218 }
1219 tracer()->increment_promoted_objects_size(object_size);
1220 return;
1221 }
1222 }
1223 }
1224 // The object should remain in new space or the old space allocation failed.
1225 Object* result = new_space_.AllocateRaw(object_size);
1226 // Failed allocation at this point is utterly unexpected.
1227 ASSERT(!result->IsFailure());
1228 *p = MigrateObject(object, HeapObject::cast(result), object_size);
1229 }
1230
1231
1232 void Heap::ScavengePointer(HeapObject** p) { 1392 void Heap::ScavengePointer(HeapObject** p) {
1233 ScavengeObject(p, *p); 1393 ScavengeObject(p, *p);
1234 } 1394 }
1235 1395
1236 1396
1237 Object* Heap::AllocatePartialMap(InstanceType instance_type, 1397 Object* Heap::AllocatePartialMap(InstanceType instance_type,
1238 int instance_size) { 1398 int instance_size) {
1239 Object* result = AllocateRawMap(); 1399 Object* result = AllocateRawMap();
1240 if (result->IsFailure()) return result; 1400 if (result->IsFailure()) return result;
1241 1401
1242 // Map::cast cannot be used due to uninitialized map field. 1402 // Map::cast cannot be used due to uninitialized map field.
1243 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map()); 1403 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1244 reinterpret_cast<Map*>(result)->set_instance_type(instance_type); 1404 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1245 reinterpret_cast<Map*>(result)->set_instance_size(instance_size); 1405 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
1406 reinterpret_cast<Map*>(result)->
1407 set_scavenger(GetScavenger(instance_type, instance_size));
1246 reinterpret_cast<Map*>(result)->set_inobject_properties(0); 1408 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
1247 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0); 1409 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
1248 reinterpret_cast<Map*>(result)->set_unused_property_fields(0); 1410 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
1249 reinterpret_cast<Map*>(result)->set_bit_field(0); 1411 reinterpret_cast<Map*>(result)->set_bit_field(0);
1250 reinterpret_cast<Map*>(result)->set_bit_field2(0); 1412 reinterpret_cast<Map*>(result)->set_bit_field2(0);
1251 return result; 1413 return result;
1252 } 1414 }
1253 1415
1254 1416
1255 Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) { 1417 Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
1256 Object* result = AllocateRawMap(); 1418 Object* result = AllocateRawMap();
1257 if (result->IsFailure()) return result; 1419 if (result->IsFailure()) return result;
1258 1420
1259 Map* map = reinterpret_cast<Map*>(result); 1421 Map* map = reinterpret_cast<Map*>(result);
1260 map->set_map(meta_map()); 1422 map->set_map(meta_map());
1261 map->set_instance_type(instance_type); 1423 map->set_instance_type(instance_type);
1424 map->set_scavenger(GetScavenger(instance_type, instance_size));
1262 map->set_prototype(null_value()); 1425 map->set_prototype(null_value());
1263 map->set_constructor(null_value()); 1426 map->set_constructor(null_value());
1264 map->set_instance_size(instance_size); 1427 map->set_instance_size(instance_size);
1265 map->set_inobject_properties(0); 1428 map->set_inobject_properties(0);
1266 map->set_pre_allocated_property_fields(0); 1429 map->set_pre_allocated_property_fields(0);
1267 map->set_instance_descriptors(empty_descriptor_array()); 1430 map->set_instance_descriptors(empty_descriptor_array());
1268 map->set_code_cache(empty_fixed_array()); 1431 map->set_code_cache(empty_fixed_array());
1269 map->set_unused_property_fields(0); 1432 map->set_unused_property_fields(0);
1270 map->set_bit_field(0); 1433 map->set_bit_field(0);
1271 map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements)); 1434 map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements));
(...skipping 2412 matching lines...) Expand 10 before | Expand all | Expand 10 after
3684 IteratePointersInDirtyRegion(pointer_fields_start, 3847 IteratePointersInDirtyRegion(pointer_fields_start,
3685 pointer_fields_end, 3848 pointer_fields_end,
3686 copy_object_func) 3849 copy_object_func)
3687 || contains_pointers_to_new_space; 3850 || contains_pointers_to_new_space;
3688 } 3851 }
3689 3852
3690 return contains_pointers_to_new_space; 3853 return contains_pointers_to_new_space;
3691 } 3854 }
3692 3855
3693 3856
3694 void Heap::IterateAndMarkPointersToNewSpace(Address start, 3857 void Heap::IterateAndMarkPointersToFromSpace(Address start,
3695 Address end, 3858 Address end,
3696 ObjectSlotCallback callback) { 3859 ObjectSlotCallback callback) {
3697 Address slot_address = start; 3860 Address slot_address = start;
3698 Page* page = Page::FromAddress(start); 3861 Page* page = Page::FromAddress(start);
3699 3862
3700 uint32_t marks = page->GetRegionMarks(); 3863 uint32_t marks = page->GetRegionMarks();
3701 3864
3702 while (slot_address < end) { 3865 while (slot_address < end) {
3703 Object** slot = reinterpret_cast<Object**>(slot_address); 3866 Object** slot = reinterpret_cast<Object**>(slot_address);
3704 if (Heap::InNewSpace(*slot)) { 3867 if (Heap::InFromSpace(*slot)) {
3705 ASSERT((*slot)->IsHeapObject()); 3868 ASSERT((*slot)->IsHeapObject());
3706 callback(reinterpret_cast<HeapObject**>(slot)); 3869 callback(reinterpret_cast<HeapObject**>(slot));
3707 if (Heap::InNewSpace(*slot)) { 3870 if (Heap::InNewSpace(*slot)) {
3708 ASSERT((*slot)->IsHeapObject()); 3871 ASSERT((*slot)->IsHeapObject());
3709 marks |= page->GetRegionMaskForAddress(slot_address); 3872 marks |= page->GetRegionMaskForAddress(slot_address);
3710 } 3873 }
3711 } 3874 }
3712 slot_address += kPointerSize; 3875 slot_address += kPointerSize;
3713 } 3876 }
3714 3877
(...skipping 1116 matching lines...) Expand 10 before | Expand all | Expand 10 after
4831 void ExternalStringTable::TearDown() { 4994 void ExternalStringTable::TearDown() {
4832 new_space_strings_.Free(); 4995 new_space_strings_.Free();
4833 old_space_strings_.Free(); 4996 old_space_strings_.Free();
4834 } 4997 }
4835 4998
4836 4999
4837 List<Object*> ExternalStringTable::new_space_strings_; 5000 List<Object*> ExternalStringTable::new_space_strings_;
4838 List<Object*> ExternalStringTable::old_space_strings_; 5001 List<Object*> ExternalStringTable::old_space_strings_;
4839 5002
4840 } } // namespace v8::internal 5003 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap.h ('k') | src/objects.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698