Index: src/heap/mark-compact.cc |
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc |
index db7b427458ef841d29dd374fae2b8f98a9c1b27a..1cf0cbb10c21af52854566e4033bb4cea0f0cc62 100644 |
--- a/src/heap/mark-compact.cc |
+++ b/src/heap/mark-compact.cc |
@@ -3251,6 +3251,26 @@ void MarkCompactCollector::RemoveObjectSlots(HeapObject* invalid_object) { |
} |
+void MarkCompactCollector::RecordDeoptimizedCodeSlots(Code* code) { |
+ Object** reloc_info_slot = Code::RawField(code, Code::kRelocationInfoOffset); |
+ RecordSlot(reloc_info_slot, reloc_info_slot, *reloc_info_slot); |
+ Object** handler_table_slot = Code::RawField(code, Code::kHandlerTableOffset); |
+ RecordSlot(handler_table_slot, handler_table_slot, *handler_table_slot); |
+ Object** deopt_data_slot = |
+ Code::RawField(code, Code::kDeoptimizationDataOffset); |
+ RecordSlot(deopt_data_slot, deopt_data_slot, *deopt_data_slot); |
+ Object** type_feedback_slot = |
+ Code::RawField(code, Code::kTypeFeedbackInfoOffset); |
+ RecordSlot(type_feedback_slot, type_feedback_slot, *type_feedback_slot); |
+ Object** next_code_slot = Code::RawField(code, Code::kNextCodeLinkOffset); |
+ RecordSlot(next_code_slot, next_code_slot, *next_code_slot); |
+ Object** meta_data_slot = Code::RawField(code, Code::kGCMetadataOffset); |
+ RecordSlot(meta_data_slot, meta_data_slot, *meta_data_slot); |
+ Object** constant_pool_slot = Code::RawField(code, Code::kConstantPoolOffset); |
+ RecordSlot(constant_pool_slot, constant_pool_slot, *constant_pool_slot); |
+} |
+ |
+ |
void MarkCompactCollector::EvacuateNewSpace() { |
// There are soft limits in the allocation code, designed trigger a mark |
// sweep collection by failing allocations. But since we are already in |