| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/arm64/codegen-arm64.h" | 5 #include "src/arm64/codegen-arm64.h" |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_ARM64 | 7 #if V8_TARGET_ARCH_ARM64 |
| 8 | 8 |
| 9 #include "src/arm64/simulator-arm64.h" | 9 #include "src/arm64/simulator-arm64.h" |
| 10 #include "src/codegen.h" | 10 #include "src/codegen.h" |
| (...skipping 22 matching lines...) Expand all Loading... |
| 33 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { | 33 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { |
| 34 masm->LeaveFrame(StackFrame::INTERNAL); | 34 masm->LeaveFrame(StackFrame::INTERNAL); |
| 35 DCHECK(masm->has_frame()); | 35 DCHECK(masm->has_frame()); |
| 36 masm->set_has_frame(false); | 36 masm->set_has_frame(false); |
| 37 } | 37 } |
| 38 | 38 |
| 39 | 39 |
| 40 // ------------------------------------------------------------------------- | 40 // ------------------------------------------------------------------------- |
| 41 // Code generators | 41 // Code generators |
| 42 | 42 |
| 43 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( | |
| 44 MacroAssembler* masm, | |
| 45 Register receiver, | |
| 46 Register key, | |
| 47 Register value, | |
| 48 Register target_map, | |
| 49 AllocationSiteMode mode, | |
| 50 Label* allocation_memento_found) { | |
| 51 ASM_LOCATION( | |
| 52 "ElementsTransitionGenerator::GenerateMapChangeElementsTransition"); | |
| 53 DCHECK(!AreAliased(receiver, key, value, target_map)); | |
| 54 | |
| 55 if (mode == TRACK_ALLOCATION_SITE) { | |
| 56 DCHECK(allocation_memento_found != NULL); | |
| 57 __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, | |
| 58 allocation_memento_found); | |
| 59 } | |
| 60 | |
| 61 // Set transitioned map. | |
| 62 __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
| 63 __ RecordWriteField(receiver, | |
| 64 HeapObject::kMapOffset, | |
| 65 target_map, | |
| 66 x10, | |
| 67 kLRHasNotBeenSaved, | |
| 68 kDontSaveFPRegs, | |
| 69 EMIT_REMEMBERED_SET, | |
| 70 OMIT_SMI_CHECK); | |
| 71 } | |
| 72 | |
| 73 | |
| 74 void ElementsTransitionGenerator::GenerateSmiToDouble( | |
| 75 MacroAssembler* masm, | |
| 76 Register receiver, | |
| 77 Register key, | |
| 78 Register value, | |
| 79 Register target_map, | |
| 80 AllocationSiteMode mode, | |
| 81 Label* fail) { | |
| 82 ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble"); | |
| 83 Label gc_required, only_change_map; | |
| 84 Register elements = x4; | |
| 85 Register length = x5; | |
| 86 Register array_size = x6; | |
| 87 Register array = x7; | |
| 88 | |
| 89 Register scratch = x6; | |
| 90 | |
| 91 // Verify input registers don't conflict with locals. | |
| 92 DCHECK(!AreAliased(receiver, key, value, target_map, | |
| 93 elements, length, array_size, array)); | |
| 94 | |
| 95 if (mode == TRACK_ALLOCATION_SITE) { | |
| 96 __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail); | |
| 97 } | |
| 98 | |
| 99 // Check for empty arrays, which only require a map transition and no changes | |
| 100 // to the backing store. | |
| 101 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 102 __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map); | |
| 103 | |
| 104 __ Push(lr); | |
| 105 __ Ldrsw(length, UntagSmiFieldMemOperand(elements, | |
| 106 FixedArray::kLengthOffset)); | |
| 107 | |
| 108 // Allocate new FixedDoubleArray. | |
| 109 __ Lsl(array_size, length, kDoubleSizeLog2); | |
| 110 __ Add(array_size, array_size, FixedDoubleArray::kHeaderSize); | |
| 111 __ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT); | |
| 112 // Register array is non-tagged heap object. | |
| 113 | |
| 114 // Set the destination FixedDoubleArray's length and map. | |
| 115 Register map_root = array_size; | |
| 116 __ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex); | |
| 117 __ SmiTag(x11, length); | |
| 118 __ Str(x11, FieldMemOperand(array, FixedDoubleArray::kLengthOffset)); | |
| 119 __ Str(map_root, FieldMemOperand(array, HeapObject::kMapOffset)); | |
| 120 | |
| 121 __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
| 122 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch, | |
| 123 kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, | |
| 124 OMIT_SMI_CHECK); | |
| 125 | |
| 126 // Replace receiver's backing store with newly created FixedDoubleArray. | |
| 127 __ Move(x10, array); | |
| 128 __ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 129 __ RecordWriteField(receiver, JSObject::kElementsOffset, x10, scratch, | |
| 130 kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, | |
| 131 OMIT_SMI_CHECK); | |
| 132 | |
| 133 // Prepare for conversion loop. | |
| 134 Register src_elements = x10; | |
| 135 Register dst_elements = x11; | |
| 136 Register dst_end = x12; | |
| 137 __ Add(src_elements, elements, FixedArray::kHeaderSize - kHeapObjectTag); | |
| 138 __ Add(dst_elements, array, FixedDoubleArray::kHeaderSize - kHeapObjectTag); | |
| 139 __ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2)); | |
| 140 | |
| 141 FPRegister nan_d = d1; | |
| 142 __ Fmov(nan_d, rawbits_to_double(kHoleNanInt64)); | |
| 143 | |
| 144 Label entry, done; | |
| 145 __ B(&entry); | |
| 146 | |
| 147 __ Bind(&only_change_map); | |
| 148 __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
| 149 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch, | |
| 150 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, | |
| 151 OMIT_SMI_CHECK); | |
| 152 __ B(&done); | |
| 153 | |
| 154 // Call into runtime if GC is required. | |
| 155 __ Bind(&gc_required); | |
| 156 __ Pop(lr); | |
| 157 __ B(fail); | |
| 158 | |
| 159 // Iterate over the array, copying and coverting smis to doubles. If an | |
| 160 // element is non-smi, write a hole to the destination. | |
| 161 { | |
| 162 Label loop; | |
| 163 __ Bind(&loop); | |
| 164 __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex)); | |
| 165 __ SmiUntagToDouble(d0, x13, kSpeculativeUntag); | |
| 166 __ Tst(x13, kSmiTagMask); | |
| 167 __ Fcsel(d0, d0, nan_d, eq); | |
| 168 __ Str(d0, MemOperand(dst_elements, kDoubleSize, PostIndex)); | |
| 169 | |
| 170 __ Bind(&entry); | |
| 171 __ Cmp(dst_elements, dst_end); | |
| 172 __ B(lt, &loop); | |
| 173 } | |
| 174 | |
| 175 __ Pop(lr); | |
| 176 __ Bind(&done); | |
| 177 } | |
| 178 | |
| 179 | |
| 180 void ElementsTransitionGenerator::GenerateDoubleToObject( | |
| 181 MacroAssembler* masm, | |
| 182 Register receiver, | |
| 183 Register key, | |
| 184 Register value, | |
| 185 Register target_map, | |
| 186 AllocationSiteMode mode, | |
| 187 Label* fail) { | |
| 188 ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject"); | |
| 189 Register elements = x4; | |
| 190 Register array_size = x6; | |
| 191 Register array = x7; | |
| 192 Register length = x5; | |
| 193 | |
| 194 // Verify input registers don't conflict with locals. | |
| 195 DCHECK(!AreAliased(receiver, key, value, target_map, | |
| 196 elements, array_size, array, length)); | |
| 197 | |
| 198 if (mode == TRACK_ALLOCATION_SITE) { | |
| 199 __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail); | |
| 200 } | |
| 201 | |
| 202 // Check for empty arrays, which only require a map transition and no changes | |
| 203 // to the backing store. | |
| 204 Label only_change_map; | |
| 205 | |
| 206 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 207 __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map); | |
| 208 | |
| 209 __ Push(lr); | |
| 210 // TODO(all): These registers may not need to be pushed. Examine | |
| 211 // RecordWriteStub and check whether it's needed. | |
| 212 __ Push(target_map, receiver, key, value); | |
| 213 __ Ldrsw(length, UntagSmiFieldMemOperand(elements, | |
| 214 FixedArray::kLengthOffset)); | |
| 215 // Allocate new FixedArray. | |
| 216 Label gc_required; | |
| 217 __ Mov(array_size, FixedDoubleArray::kHeaderSize); | |
| 218 __ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2)); | |
| 219 __ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS); | |
| 220 | |
| 221 // Set destination FixedDoubleArray's length and map. | |
| 222 Register map_root = array_size; | |
| 223 __ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex); | |
| 224 __ SmiTag(x11, length); | |
| 225 __ Str(x11, FieldMemOperand(array, FixedDoubleArray::kLengthOffset)); | |
| 226 __ Str(map_root, FieldMemOperand(array, HeapObject::kMapOffset)); | |
| 227 | |
| 228 // Prepare for conversion loop. | |
| 229 Register src_elements = x10; | |
| 230 Register dst_elements = x11; | |
| 231 Register dst_end = x12; | |
| 232 Register the_hole = x14; | |
| 233 __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex); | |
| 234 __ Add(src_elements, elements, | |
| 235 FixedDoubleArray::kHeaderSize - kHeapObjectTag); | |
| 236 __ Add(dst_elements, array, FixedArray::kHeaderSize - kHeapObjectTag); | |
| 237 __ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2)); | |
| 238 | |
| 239 // Allocating heap numbers in the loop below can fail and cause a jump to | |
| 240 // gc_required. We can't leave a partly initialized FixedArray behind, | |
| 241 // so pessimistically fill it with holes now. | |
| 242 Label initialization_loop, initialization_loop_entry; | |
| 243 __ B(&initialization_loop_entry); | |
| 244 __ bind(&initialization_loop); | |
| 245 __ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex)); | |
| 246 __ bind(&initialization_loop_entry); | |
| 247 __ Cmp(dst_elements, dst_end); | |
| 248 __ B(lt, &initialization_loop); | |
| 249 | |
| 250 __ Add(dst_elements, array, FixedArray::kHeaderSize - kHeapObjectTag); | |
| 251 | |
| 252 Register heap_num_map = x15; | |
| 253 __ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex); | |
| 254 | |
| 255 Label entry; | |
| 256 __ B(&entry); | |
| 257 | |
| 258 // Call into runtime if GC is required. | |
| 259 __ Bind(&gc_required); | |
| 260 __ Pop(value, key, receiver, target_map); | |
| 261 __ Pop(lr); | |
| 262 __ B(fail); | |
| 263 | |
| 264 { | |
| 265 Label loop, convert_hole; | |
| 266 __ Bind(&loop); | |
| 267 __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex)); | |
| 268 __ Cmp(x13, kHoleNanInt64); | |
| 269 __ B(eq, &convert_hole); | |
| 270 | |
| 271 // Non-hole double, copy value into a heap number. | |
| 272 Register heap_num = length; | |
| 273 Register scratch = array_size; | |
| 274 Register scratch2 = elements; | |
| 275 __ AllocateHeapNumber(heap_num, &gc_required, scratch, scratch2, | |
| 276 x13, heap_num_map); | |
| 277 __ Mov(x13, dst_elements); | |
| 278 __ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex)); | |
| 279 __ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs, | |
| 280 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); | |
| 281 | |
| 282 __ B(&entry); | |
| 283 | |
| 284 // Replace the-hole NaN with the-hole pointer. | |
| 285 __ Bind(&convert_hole); | |
| 286 __ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex)); | |
| 287 | |
| 288 __ Bind(&entry); | |
| 289 __ Cmp(dst_elements, dst_end); | |
| 290 __ B(lt, &loop); | |
| 291 } | |
| 292 | |
| 293 __ Pop(value, key, receiver, target_map); | |
| 294 // Replace receiver's backing store with newly created and filled FixedArray. | |
| 295 __ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 296 __ RecordWriteField(receiver, JSObject::kElementsOffset, array, x13, | |
| 297 kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, | |
| 298 OMIT_SMI_CHECK); | |
| 299 __ Pop(lr); | |
| 300 | |
| 301 __ Bind(&only_change_map); | |
| 302 __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
| 303 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x13, | |
| 304 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, | |
| 305 OMIT_SMI_CHECK); | |
| 306 } | |
| 307 | |
| 308 | |
| 309 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) { | 43 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) { |
| 310 USE(isolate); | 44 USE(isolate); |
| 311 DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength); | 45 DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength); |
| 312 // The sequence of instructions that is patched out for aging code is the | 46 // The sequence of instructions that is patched out for aging code is the |
| 313 // following boilerplate stack-building prologue that is found both in | 47 // following boilerplate stack-building prologue that is found both in |
| 314 // FUNCTION and OPTIMIZED_FUNCTION code: | 48 // FUNCTION and OPTIMIZED_FUNCTION code: |
| 315 PatchingAssembler patcher(isolate, young_sequence_.start(), | 49 PatchingAssembler patcher(isolate, young_sequence_.start(), |
| 316 young_sequence_.length() / kInstructionSize); | 50 young_sequence_.length() / kInstructionSize); |
| 317 // The young sequence is the frame setup code for FUNCTION code types. It is | 51 // The young sequence is the frame setup code for FUNCTION code types. It is |
| 318 // generated by FullCodeGenerator::Generate. | 52 // generated by FullCodeGenerator::Generate. |
| (...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 449 __ Ldrb(result, MemOperand(string, index, SXTW)); | 183 __ Ldrb(result, MemOperand(string, index, SXTW)); |
| 450 __ Bind(&done); | 184 __ Bind(&done); |
| 451 } | 185 } |
| 452 | 186 |
| 453 #undef __ | 187 #undef __ |
| 454 | 188 |
| 455 } // namespace internal | 189 } // namespace internal |
| 456 } // namespace v8 | 190 } // namespace v8 |
| 457 | 191 |
| 458 #endif // V8_TARGET_ARCH_ARM64 | 192 #endif // V8_TARGET_ARCH_ARM64 |
| OLD | NEW |