| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #include "src/api.h" | 7 #include "src/api.h" |
| 8 #include "src/arguments.h" | 8 #include "src/arguments.h" |
| 9 #include "src/base/once.h" | 9 #include "src/base/once.h" |
| 10 #include "src/bootstrapper.h" | 10 #include "src/bootstrapper.h" |
| (...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 175 | 175 |
| 176 | 176 |
| 177 static void MoveDoubleElements(FixedDoubleArray* dst, int dst_index, | 177 static void MoveDoubleElements(FixedDoubleArray* dst, int dst_index, |
| 178 FixedDoubleArray* src, int src_index, int len) { | 178 FixedDoubleArray* src, int src_index, int len) { |
| 179 if (len == 0) return; | 179 if (len == 0) return; |
| 180 MemMove(dst->data_start() + dst_index, src->data_start() + src_index, | 180 MemMove(dst->data_start() + dst_index, src->data_start() + src_index, |
| 181 len * kDoubleSize); | 181 len * kDoubleSize); |
| 182 } | 182 } |
| 183 | 183 |
| 184 | 184 |
| 185 static FixedArrayBase* LeftTrimFixedArray(Heap* heap, | |
| 186 FixedArrayBase* elms, | |
| 187 int to_trim) { | |
| 188 DCHECK(heap->CanMoveObjectStart(elms)); | |
| 189 | |
| 190 Map* map = elms->map(); | |
| 191 int entry_size; | |
| 192 if (elms->IsFixedArray()) { | |
| 193 entry_size = kPointerSize; | |
| 194 } else { | |
| 195 entry_size = kDoubleSize; | |
| 196 } | |
| 197 DCHECK(elms->map() != heap->fixed_cow_array_map()); | |
| 198 // For now this trick is only applied to fixed arrays in new and paged space. | |
| 199 // In large object space the object's start must coincide with chunk | |
| 200 // and thus the trick is just not applicable. | |
| 201 DCHECK(!heap->lo_space()->Contains(elms)); | |
| 202 | |
| 203 STATIC_ASSERT(FixedArrayBase::kMapOffset == 0); | |
| 204 STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize); | |
| 205 STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize); | |
| 206 | |
| 207 Object** former_start = HeapObject::RawField(elms, 0); | |
| 208 | |
| 209 const int len = elms->length(); | |
| 210 | |
| 211 if (to_trim * entry_size > FixedArrayBase::kHeaderSize && | |
| 212 elms->IsFixedArray() && | |
| 213 !heap->new_space()->Contains(elms)) { | |
| 214 // If we are doing a big trim in old space then we zap the space that was | |
| 215 // formerly part of the array so that the GC (aided by the card-based | |
| 216 // remembered set) won't find pointers to new-space there. | |
| 217 Object** zap = reinterpret_cast<Object**>(elms->address()); | |
| 218 zap++; // Header of filler must be at least one word so skip that. | |
| 219 for (int i = 1; i < to_trim; i++) { | |
| 220 *zap++ = Smi::FromInt(0); | |
| 221 } | |
| 222 } | |
| 223 // Technically in new space this write might be omitted (except for | |
| 224 // debug mode which iterates through the heap), but to play safer | |
| 225 // we still do it. | |
| 226 // Since left trimming is only performed on pages which are not concurrently | |
| 227 // swept creating a filler object does not require synchronization. | |
| 228 heap->CreateFillerObjectAt(elms->address(), to_trim * entry_size); | |
| 229 | |
| 230 int new_start_index = to_trim * (entry_size / kPointerSize); | |
| 231 former_start[new_start_index] = map; | |
| 232 former_start[new_start_index + 1] = Smi::FromInt(len - to_trim); | |
| 233 | |
| 234 // Maintain marking consistency for HeapObjectIterator and | |
| 235 // IncrementalMarking. | |
| 236 int size_delta = to_trim * entry_size; | |
| 237 Address new_start = elms->address() + size_delta; | |
| 238 heap->marking()->TransferMark(elms->address(), new_start); | |
| 239 heap->AdjustLiveBytes(new_start, -size_delta, Heap::FROM_MUTATOR); | |
| 240 | |
| 241 FixedArrayBase* new_elms = | |
| 242 FixedArrayBase::cast(HeapObject::FromAddress(new_start)); | |
| 243 | |
| 244 heap->OnMoveEvent(new_elms, elms, new_elms->Size()); | |
| 245 return new_elms; | |
| 246 } | |
| 247 | |
| 248 | |
| 249 static bool ArrayPrototypeHasNoElements(Heap* heap, | 185 static bool ArrayPrototypeHasNoElements(Heap* heap, |
| 250 Context* native_context, | 186 Context* native_context, |
| 251 JSObject* array_proto) { | 187 JSObject* array_proto) { |
| 252 DisallowHeapAllocation no_gc; | 188 DisallowHeapAllocation no_gc; |
| 253 // This method depends on non writability of Object and Array prototype | 189 // This method depends on non writability of Object and Array prototype |
| 254 // fields. | 190 // fields. |
| 255 if (array_proto->elements() != heap->empty_fixed_array()) return false; | 191 if (array_proto->elements() != heap->empty_fixed_array()) return false; |
| 256 // Object.prototype | 192 // Object.prototype |
| 257 PrototypeIterator iter(heap->isolate(), array_proto); | 193 PrototypeIterator iter(heap->isolate(), array_proto); |
| 258 if (iter.IsAtEnd()) { | 194 if (iter.IsAtEnd()) { |
| (...skipping 270 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 529 | 465 |
| 530 // Get first element | 466 // Get first element |
| 531 ElementsAccessor* accessor = array->GetElementsAccessor(); | 467 ElementsAccessor* accessor = array->GetElementsAccessor(); |
| 532 Handle<Object> first = | 468 Handle<Object> first = |
| 533 accessor->Get(array, array, 0, elms_obj).ToHandleChecked(); | 469 accessor->Get(array, array, 0, elms_obj).ToHandleChecked(); |
| 534 if (first->IsTheHole()) { | 470 if (first->IsTheHole()) { |
| 535 return CallJsBuiltin(isolate, "ArrayShift", args); | 471 return CallJsBuiltin(isolate, "ArrayShift", args); |
| 536 } | 472 } |
| 537 | 473 |
| 538 if (heap->CanMoveObjectStart(*elms_obj)) { | 474 if (heap->CanMoveObjectStart(*elms_obj)) { |
| 539 array->set_elements(LeftTrimFixedArray(heap, *elms_obj, 1)); | 475 array->set_elements(heap->LeftTrimFixedArray(*elms_obj, 1)); |
| 540 } else { | 476 } else { |
| 541 // Shift the elements. | 477 // Shift the elements. |
| 542 if (elms_obj->IsFixedArray()) { | 478 if (elms_obj->IsFixedArray()) { |
| 543 Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj); | 479 Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj); |
| 544 DisallowHeapAllocation no_gc; | 480 DisallowHeapAllocation no_gc; |
| 545 heap->MoveElements(*elms, 0, 1, len - 1); | 481 heap->MoveElements(*elms, 0, 1, len - 1); |
| 546 elms->set(len - 1, heap->the_hole_value()); | 482 elms->set(len - 1, heap->the_hole_value()); |
| 547 } else { | 483 } else { |
| 548 Handle<FixedDoubleArray> elms = Handle<FixedDoubleArray>::cast(elms_obj); | 484 Handle<FixedDoubleArray> elms = Handle<FixedDoubleArray>::cast(elms_obj); |
| 549 MoveDoubleElements(*elms, 0, *elms, 1, len - 1); | 485 MoveDoubleElements(*elms, 0, *elms, 1, len - 1); |
| (...skipping 324 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 874 Handle<FixedDoubleArray>::cast(elms_obj); | 810 Handle<FixedDoubleArray>::cast(elms_obj); |
| 875 MoveDoubleElements(*elms, delta, *elms, 0, actual_start); | 811 MoveDoubleElements(*elms, delta, *elms, 0, actual_start); |
| 876 } else { | 812 } else { |
| 877 Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj); | 813 Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj); |
| 878 DisallowHeapAllocation no_gc; | 814 DisallowHeapAllocation no_gc; |
| 879 heap->MoveElements(*elms, delta, 0, actual_start); | 815 heap->MoveElements(*elms, delta, 0, actual_start); |
| 880 } | 816 } |
| 881 | 817 |
| 882 if (heap->CanMoveObjectStart(*elms_obj)) { | 818 if (heap->CanMoveObjectStart(*elms_obj)) { |
| 883 // On the fast path we move the start of the object in memory. | 819 // On the fast path we move the start of the object in memory. |
| 884 elms_obj = handle(LeftTrimFixedArray(heap, *elms_obj, delta), isolate); | 820 elms_obj = handle(heap->LeftTrimFixedArray(*elms_obj, delta)); |
| 885 } else { | 821 } else { |
| 886 // This is the slow path. We are going to move the elements to the left | 822 // This is the slow path. We are going to move the elements to the left |
| 887 // by copying them. For trimmed values we store the hole. | 823 // by copying them. For trimmed values we store the hole. |
| 888 if (elms_obj->IsFixedDoubleArray()) { | 824 if (elms_obj->IsFixedDoubleArray()) { |
| 889 Handle<FixedDoubleArray> elms = | 825 Handle<FixedDoubleArray> elms = |
| 890 Handle<FixedDoubleArray>::cast(elms_obj); | 826 Handle<FixedDoubleArray>::cast(elms_obj); |
| 891 MoveDoubleElements(*elms, 0, *elms, delta, len - delta); | 827 MoveDoubleElements(*elms, 0, *elms, delta, len - delta); |
| 892 elms->FillWithHoles(len - delta, len); | 828 elms->FillWithHoles(len - delta, len); |
| 893 } else { | 829 } else { |
| 894 Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj); | 830 Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj); |
| (...skipping 823 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1718 } | 1654 } |
| 1719 BUILTIN_LIST_C(DEFINE_BUILTIN_ACCESSOR_C) | 1655 BUILTIN_LIST_C(DEFINE_BUILTIN_ACCESSOR_C) |
| 1720 BUILTIN_LIST_A(DEFINE_BUILTIN_ACCESSOR_A) | 1656 BUILTIN_LIST_A(DEFINE_BUILTIN_ACCESSOR_A) |
| 1721 BUILTIN_LIST_H(DEFINE_BUILTIN_ACCESSOR_H) | 1657 BUILTIN_LIST_H(DEFINE_BUILTIN_ACCESSOR_H) |
| 1722 BUILTIN_LIST_DEBUG_A(DEFINE_BUILTIN_ACCESSOR_A) | 1658 BUILTIN_LIST_DEBUG_A(DEFINE_BUILTIN_ACCESSOR_A) |
| 1723 #undef DEFINE_BUILTIN_ACCESSOR_C | 1659 #undef DEFINE_BUILTIN_ACCESSOR_C |
| 1724 #undef DEFINE_BUILTIN_ACCESSOR_A | 1660 #undef DEFINE_BUILTIN_ACCESSOR_A |
| 1725 | 1661 |
| 1726 | 1662 |
| 1727 } } // namespace v8::internal | 1663 } } // namespace v8::internal |
| OLD | NEW |