| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 2704 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2715 // to encounter pointers to dead new space objects during traversal of pointers | 2715 // to encounter pointers to dead new space objects during traversal of pointers |
| 2716 // to new space. We should clear them to avoid encountering them during next | 2716 // to new space. We should clear them to avoid encountering them during next |
| 2717 // pointer iteration. This is an issue if the store buffer overflows and we | 2717 // pointer iteration. This is an issue if the store buffer overflows and we |
| 2718 // have to scan the entire old space, including dead objects, looking for | 2718 // have to scan the entire old space, including dead objects, looking for |
| 2719 // pointers to new space. | 2719 // pointers to new space. |
| 2720 void MarkCompactCollector::MigrateObject(Address dst, | 2720 void MarkCompactCollector::MigrateObject(Address dst, |
| 2721 Address src, | 2721 Address src, |
| 2722 int size, | 2722 int size, |
| 2723 AllocationSpace dest) { | 2723 AllocationSpace dest) { |
| 2724 HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst)); | 2724 HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst)); |
| 2725 if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) { | 2725 // TODO(hpayer): Replace that check with an assert. |
| 2726 CHECK(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize); |
| 2727 if (dest == OLD_POINTER_SPACE) { |
| 2726 Address src_slot = src; | 2728 Address src_slot = src; |
| 2727 Address dst_slot = dst; | 2729 Address dst_slot = dst; |
| 2728 ASSERT(IsAligned(size, kPointerSize)); | 2730 ASSERT(IsAligned(size, kPointerSize)); |
| 2729 | 2731 |
| 2730 for (int remaining = size / kPointerSize; remaining > 0; remaining--) { | 2732 for (int remaining = size / kPointerSize; remaining > 0; remaining--) { |
| 2731 Object* value = Memory::Object_at(src_slot); | 2733 Object* value = Memory::Object_at(src_slot); |
| 2732 | 2734 |
| 2733 Memory::Object_at(dst_slot) = value; | 2735 Memory::Object_at(dst_slot) = value; |
| 2734 | 2736 |
| 2735 if (heap_->InNewSpace(value)) { | 2737 if (heap_->InNewSpace(value)) { |
| (...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2887 if (map_word.IsForwardingAddress()) { | 2889 if (map_word.IsForwardingAddress()) { |
| 2888 return String::cast(map_word.ToForwardingAddress()); | 2890 return String::cast(map_word.ToForwardingAddress()); |
| 2889 } | 2891 } |
| 2890 | 2892 |
| 2891 return String::cast(*p); | 2893 return String::cast(*p); |
| 2892 } | 2894 } |
| 2893 | 2895 |
| 2894 | 2896 |
| 2895 bool MarkCompactCollector::TryPromoteObject(HeapObject* object, | 2897 bool MarkCompactCollector::TryPromoteObject(HeapObject* object, |
| 2896 int object_size) { | 2898 int object_size) { |
| 2899 // TODO(hpayer): Replace that check with an assert. |
| 2900 CHECK(object_size <= Page::kMaxNonCodeHeapObjectSize); |
| 2901 |
| 2902 OldSpace* target_space = heap()->TargetSpace(object); |
| 2903 |
| 2904 ASSERT(target_space == heap()->old_pointer_space() || |
| 2905 target_space == heap()->old_data_space()); |
| 2897 Object* result; | 2906 Object* result; |
| 2898 | 2907 MaybeObject* maybe_result = target_space->AllocateRaw(object_size); |
| 2899 if (object_size > Page::kMaxNonCodeHeapObjectSize) { | 2908 if (maybe_result->ToObject(&result)) { |
| 2900 MaybeObject* maybe_result = | 2909 HeapObject* target = HeapObject::cast(result); |
| 2901 heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE); | 2910 MigrateObject(target->address(), |
| 2902 if (maybe_result->ToObject(&result)) { | 2911 object->address(), |
| 2903 HeapObject* target = HeapObject::cast(result); | 2912 object_size, |
| 2904 MigrateObject(target->address(), | 2913 target_space->identity()); |
| 2905 object->address(), | 2914 heap()->mark_compact_collector()->tracer()-> |
| 2906 object_size, | 2915 increment_promoted_objects_size(object_size); |
| 2907 LO_SPACE); | 2916 return true; |
| 2908 heap()->mark_compact_collector()->tracer()-> | |
| 2909 increment_promoted_objects_size(object_size); | |
| 2910 return true; | |
| 2911 } | |
| 2912 } else { | |
| 2913 OldSpace* target_space = heap()->TargetSpace(object); | |
| 2914 | |
| 2915 ASSERT(target_space == heap()->old_pointer_space() || | |
| 2916 target_space == heap()->old_data_space()); | |
| 2917 MaybeObject* maybe_result = target_space->AllocateRaw(object_size); | |
| 2918 if (maybe_result->ToObject(&result)) { | |
| 2919 HeapObject* target = HeapObject::cast(result); | |
| 2920 MigrateObject(target->address(), | |
| 2921 object->address(), | |
| 2922 object_size, | |
| 2923 target_space->identity()); | |
| 2924 heap()->mark_compact_collector()->tracer()-> | |
| 2925 increment_promoted_objects_size(object_size); | |
| 2926 return true; | |
| 2927 } | |
| 2928 } | 2917 } |
| 2929 | 2918 |
| 2930 return false; | 2919 return false; |
| 2931 } | 2920 } |
| 2932 | 2921 |
| 2933 | 2922 |
| 2934 void MarkCompactCollector::EvacuateNewSpace() { | 2923 void MarkCompactCollector::EvacuateNewSpace() { |
| 2935 // There are soft limits in the allocation code, designed trigger a mark | 2924 // There are soft limits in the allocation code, designed trigger a mark |
| 2936 // sweep collection by failing allocations. But since we are already in | 2925 // sweep collection by failing allocations. But since we are already in |
| 2937 // a mark-sweep allocation, there is no sense in trying to trigger one. | 2926 // a mark-sweep allocation, there is no sense in trying to trigger one. |
| (...skipping 1373 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4311 while (buffer != NULL) { | 4300 while (buffer != NULL) { |
| 4312 SlotsBuffer* next_buffer = buffer->next(); | 4301 SlotsBuffer* next_buffer = buffer->next(); |
| 4313 DeallocateBuffer(buffer); | 4302 DeallocateBuffer(buffer); |
| 4314 buffer = next_buffer; | 4303 buffer = next_buffer; |
| 4315 } | 4304 } |
| 4316 *buffer_address = NULL; | 4305 *buffer_address = NULL; |
| 4317 } | 4306 } |
| 4318 | 4307 |
| 4319 | 4308 |
| 4320 } } // namespace v8::internal | 4309 } } // namespace v8::internal |
| OLD | NEW |