Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(165)

Side by Side Diff: src/mark-compact.cc

Issue 35103002: Align double array backing store during compaction and mark-sweep promotion. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Rebase Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/heap-inl.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1997 matching lines...) Expand 10 before | Expand all | Expand 10 after
2008 } 2008 }
2009 2009
2010 offset++; 2010 offset++;
2011 current_cell >>= 1; 2011 current_cell >>= 1;
2012 // Aggressively promote young survivors to the old space. 2012 // Aggressively promote young survivors to the old space.
2013 if (TryPromoteObject(object, size)) { 2013 if (TryPromoteObject(object, size)) {
2014 continue; 2014 continue;
2015 } 2015 }
2016 2016
2017 // Promotion failed. Just migrate object to another semispace. 2017 // Promotion failed. Just migrate object to another semispace.
2018 MaybeObject* allocation = new_space->AllocateRaw(size); 2018 int allocation_size = size;
2019 if (Heap::MustBeDoubleAligned(object)) {
2020 ASSERT(kObjectAlignment != kDoubleAlignment);
2021 allocation_size += kPointerSize;
Michael Starzinger 2013/11/14 13:37:46 Hmm, could this end up using more space in the to-
dusmil 2013/12/13 16:06:15 Yes, this is very unlikely, but it might happen. C
2022 }
2023 MaybeObject* allocation = new_space->AllocateRaw(allocation_size);
2019 if (allocation->IsFailure()) { 2024 if (allocation->IsFailure()) {
2020 if (!new_space->AddFreshPage()) { 2025 if (!new_space->AddFreshPage()) {
2021 // Shouldn't happen. We are sweeping linearly, and to-space 2026 // Shouldn't happen. We are sweeping linearly, and to-space
2022 // has the same number of pages as from-space, so there is 2027 // has the same number of pages as from-space, so there is
2023 // always room. 2028 // always room.
2024 UNREACHABLE(); 2029 UNREACHABLE();
2025 } 2030 }
2026 allocation = new_space->AllocateRaw(size); 2031 allocation = new_space->AllocateRaw(allocation_size);
2027 ASSERT(!allocation->IsFailure()); 2032 ASSERT(!allocation->IsFailure());
2028 } 2033 }
2029 Object* target = allocation->ToObjectUnchecked(); 2034 Object* result = allocation->ToObjectUnchecked();
2030 2035 HeapObject* target = HeapObject::cast(result);
2031 MigrateObject(HeapObject::cast(target)->address(), 2036 if (Heap::MustBeDoubleAligned(object)) {
2037 target = heap()->EnsureDoubleAligned(target, allocation_size);
2038 }
2039 MigrateObject(target->address(),
2032 object->address(), 2040 object->address(),
2033 size, 2041 size,
2034 NEW_SPACE); 2042 NEW_SPACE);
2035 } 2043 }
2036 *cells = 0; 2044 *cells = 0;
2037 } 2045 }
2038 return survivors_size; 2046 return survivors_size;
2039 } 2047 }
2040 2048
2041 2049
(...skipping 885 matching lines...) Expand 10 before | Expand all | Expand 10 after
2927 2935
2928 return String::cast(*p); 2936 return String::cast(*p);
2929 } 2937 }
2930 2938
2931 2939
2932 bool MarkCompactCollector::TryPromoteObject(HeapObject* object, 2940 bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
2933 int object_size) { 2941 int object_size) {
2934 // TODO(hpayer): Replace that check with an assert. 2942 // TODO(hpayer): Replace that check with an assert.
2935 CHECK(object_size <= Page::kMaxNonCodeHeapObjectSize); 2943 CHECK(object_size <= Page::kMaxNonCodeHeapObjectSize);
2936 2944
2945 int allocation_size = object_size;
2946 if (Heap::MustBeDoubleAligned(object)) {
2947 ASSERT(kObjectAlignment != kDoubleAlignment);
2948 allocation_size += kPointerSize;
Michael Starzinger 2013/11/14 13:37:46 This might exceed Page::kMaxNonCodeHeapObjectSize.
dusmil 2013/12/13 16:06:15 According to the definition of Page::kMaxNonCodeHe
2949 }
2937 OldSpace* target_space = heap()->TargetSpace(object); 2950 OldSpace* target_space = heap()->TargetSpace(object);
2938 2951
2939 ASSERT(target_space == heap()->old_pointer_space() || 2952 ASSERT(target_space == heap()->old_pointer_space() ||
2940 target_space == heap()->old_data_space()); 2953 target_space == heap()->old_data_space());
2941 Object* result; 2954 Object* result;
2942 MaybeObject* maybe_result = target_space->AllocateRaw( 2955 MaybeObject* maybe_result = target_space->AllocateRaw(
2943 object_size, 2956 allocation_size,
2944 PagedSpace::MOVE_OBJECT); 2957 PagedSpace::MOVE_OBJECT);
2945 if (maybe_result->ToObject(&result)) { 2958 if (maybe_result->ToObject(&result)) {
2946 HeapObject* target = HeapObject::cast(result); 2959 HeapObject* target = HeapObject::cast(result);
2960 if (Heap::MustBeDoubleAligned(object)) {
2961 target = heap()->EnsureDoubleAligned(target, allocation_size);
2962 }
2947 MigrateObject(target->address(), 2963 MigrateObject(target->address(),
2948 object->address(), 2964 object->address(),
2949 object_size, 2965 object_size,
2950 target_space->identity()); 2966 target_space->identity());
2951 heap()->mark_compact_collector()->tracer()-> 2967 heap()->mark_compact_collector()->tracer()->
2952 increment_promoted_objects_size(object_size); 2968 increment_promoted_objects_size(object_size);
2953 return true; 2969 return true;
2954 } 2970 }
2955 2971
2956 return false; 2972 return false;
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
3007 if (*cell == 0) continue; 3023 if (*cell == 0) continue;
3008 3024
3009 int live_objects = MarkWordToObjectStarts(*cell, offsets); 3025 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3010 for (int i = 0; i < live_objects; i++) { 3026 for (int i = 0; i < live_objects; i++) {
3011 Address object_addr = cell_base + offsets[i] * kPointerSize; 3027 Address object_addr = cell_base + offsets[i] * kPointerSize;
3012 HeapObject* object = HeapObject::FromAddress(object_addr); 3028 HeapObject* object = HeapObject::FromAddress(object_addr);
3013 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object))); 3029 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
3014 3030
3015 int size = object->Size(); 3031 int size = object->Size();
3016 3032
3017 MaybeObject* target = space->AllocateRaw(size, PagedSpace::MOVE_OBJECT); 3033 int allocation_size = size;
3034 if (Heap::MustBeDoubleAligned(object)) {
3035 ASSERT(kObjectAlignment != kDoubleAlignment);
3036 allocation_size += kPointerSize;
3037 }
3038 MaybeObject* target =
3039 space->AllocateRaw(allocation_size, PagedSpace::MOVE_OBJECT);
3018 if (target->IsFailure()) { 3040 if (target->IsFailure()) {
3019 // OS refused to give us memory. 3041 // OS refused to give us memory.
3020 V8::FatalProcessOutOfMemory("Evacuation"); 3042 V8::FatalProcessOutOfMemory("Evacuation");
3021 return; 3043 return;
3022 } 3044 }
3023 3045
3024 Object* target_object = target->ToObjectUnchecked(); 3046 Object* result = target->ToObjectUnchecked();
3025 3047 HeapObject* target_object = HeapObject::cast(result);
3026 MigrateObject(HeapObject::cast(target_object)->address(), 3048 if (Heap::MustBeDoubleAligned(object)) {
3049 target_object =
3050 heap()->EnsureDoubleAligned(target_object, allocation_size);
3051 }
3052 MigrateObject(target_object->address(),
3027 object_addr, 3053 object_addr,
3028 size, 3054 size,
3029 space->identity()); 3055 space->identity());
3030 ASSERT(object->map_word().IsForwardingAddress()); 3056 ASSERT(object->map_word().IsForwardingAddress());
3031 } 3057 }
3032 3058
3033 // Clear marking bits for current cell. 3059 // Clear marking bits for current cell.
3034 *cell = 0; 3060 *cell = 0;
3035 } 3061 }
3036 p->ResetLiveBytes(); 3062 p->ResetLiveBytes();
(...skipping 1316 matching lines...) Expand 10 before | Expand all | Expand 10 after
4353 while (buffer != NULL) { 4379 while (buffer != NULL) {
4354 SlotsBuffer* next_buffer = buffer->next(); 4380 SlotsBuffer* next_buffer = buffer->next();
4355 DeallocateBuffer(buffer); 4381 DeallocateBuffer(buffer);
4356 buffer = next_buffer; 4382 buffer = next_buffer;
4357 } 4383 }
4358 *buffer_address = NULL; 4384 *buffer_address = NULL;
4359 } 4385 }
4360 4386
4361 4387
4362 } } // namespace v8::internal 4388 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap-inl.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698