OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 325 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
336 } | 336 } |
337 imm >>= 16; | 337 imm >>= 16; |
338 } | 338 } |
339 return count; | 339 return count; |
340 } | 340 } |
341 | 341 |
342 | 342 |
343 // The movz instruction can generate immediates containing an arbitrary 16-bit | 343 // The movz instruction can generate immediates containing an arbitrary 16-bit |
344 // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000. | 344 // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000. |
345 bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) { | 345 bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) { |
346 ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize)); | 346 ASSERT((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits)); |
347 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1); | 347 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1); |
348 } | 348 } |
349 | 349 |
350 | 350 |
351 // The movn instruction can generate immediates containing an arbitrary 16-bit | 351 // The movn instruction can generate immediates containing an arbitrary 16-bit |
352 // half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff. | 352 // half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff. |
353 bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) { | 353 bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) { |
354 return IsImmMovz(~imm, reg_size); | 354 return IsImmMovz(~imm, reg_size); |
355 } | 355 } |
356 | 356 |
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
467 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { | 467 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { |
468 // Add/sub with carry (immediate or ROR shifted register.) | 468 // Add/sub with carry (immediate or ROR shifted register.) |
469 Register temp = temps.AcquireSameSizeAs(rn); | 469 Register temp = temps.AcquireSameSizeAs(rn); |
470 Mov(temp, operand); | 470 Mov(temp, operand); |
471 AddSubWithCarry(rd, rn, temp, S, op); | 471 AddSubWithCarry(rd, rn, temp, S, op); |
472 | 472 |
473 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { | 473 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { |
474 // Add/sub with carry (shifted register). | 474 // Add/sub with carry (shifted register). |
475 ASSERT(operand.reg().SizeInBits() == rd.SizeInBits()); | 475 ASSERT(operand.reg().SizeInBits() == rd.SizeInBits()); |
476 ASSERT(operand.shift() != ROR); | 476 ASSERT(operand.shift() != ROR); |
477 ASSERT( | 477 ASSERT(is_uintn(operand.shift_amount(), |
478 is_uintn(operand.shift_amount(), | 478 rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2 |
479 rd.SizeInBits() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2)); | 479 : kWRegSizeInBitsLog2)); |
480 Register temp = temps.AcquireSameSizeAs(rn); | 480 Register temp = temps.AcquireSameSizeAs(rn); |
481 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount()); | 481 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount()); |
482 AddSubWithCarry(rd, rn, temp, S, op); | 482 AddSubWithCarry(rd, rn, temp, S, op); |
483 | 483 |
484 } else if (operand.IsExtendedRegister()) { | 484 } else if (operand.IsExtendedRegister()) { |
485 // Add/sub with carry (extended register). | 485 // Add/sub with carry (extended register). |
486 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); | 486 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); |
487 // Add/sub extended supports a shift <= 4. We want to support exactly the | 487 // Add/sub extended supports a shift <= 4. We want to support exactly the |
488 // same modes. | 488 // same modes. |
489 ASSERT(operand.shift_amount() <= 4); | 489 ASSERT(operand.shift_amount() <= 4); |
(...skipping 621 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1111 | 1111 |
1112 | 1112 |
1113 void MacroAssembler::PushCalleeSavedRegisters() { | 1113 void MacroAssembler::PushCalleeSavedRegisters() { |
1114 // Ensure that the macro-assembler doesn't use any scratch registers. | 1114 // Ensure that the macro-assembler doesn't use any scratch registers. |
1115 InstructionAccurateScope scope(this); | 1115 InstructionAccurateScope scope(this); |
1116 | 1116 |
1117 // This method must not be called unless the current stack pointer is the | 1117 // This method must not be called unless the current stack pointer is the |
1118 // system stack pointer (csp). | 1118 // system stack pointer (csp). |
1119 ASSERT(csp.Is(StackPointer())); | 1119 ASSERT(csp.Is(StackPointer())); |
1120 | 1120 |
1121 MemOperand tos(csp, -2 * kXRegSizeInBytes, PreIndex); | 1121 MemOperand tos(csp, -2 * kXRegSize, PreIndex); |
1122 | 1122 |
1123 stp(d14, d15, tos); | 1123 stp(d14, d15, tos); |
1124 stp(d12, d13, tos); | 1124 stp(d12, d13, tos); |
1125 stp(d10, d11, tos); | 1125 stp(d10, d11, tos); |
1126 stp(d8, d9, tos); | 1126 stp(d8, d9, tos); |
1127 | 1127 |
1128 stp(x29, x30, tos); | 1128 stp(x29, x30, tos); |
1129 stp(x27, x28, tos); // x28 = jssp | 1129 stp(x27, x28, tos); // x28 = jssp |
1130 stp(x25, x26, tos); | 1130 stp(x25, x26, tos); |
1131 stp(x23, x24, tos); | 1131 stp(x23, x24, tos); |
1132 stp(x21, x22, tos); | 1132 stp(x21, x22, tos); |
1133 stp(x19, x20, tos); | 1133 stp(x19, x20, tos); |
1134 } | 1134 } |
1135 | 1135 |
1136 | 1136 |
1137 void MacroAssembler::PopCalleeSavedRegisters() { | 1137 void MacroAssembler::PopCalleeSavedRegisters() { |
1138 // Ensure that the macro-assembler doesn't use any scratch registers. | 1138 // Ensure that the macro-assembler doesn't use any scratch registers. |
1139 InstructionAccurateScope scope(this); | 1139 InstructionAccurateScope scope(this); |
1140 | 1140 |
1141 // This method must not be called unless the current stack pointer is the | 1141 // This method must not be called unless the current stack pointer is the |
1142 // system stack pointer (csp). | 1142 // system stack pointer (csp). |
1143 ASSERT(csp.Is(StackPointer())); | 1143 ASSERT(csp.Is(StackPointer())); |
1144 | 1144 |
1145 MemOperand tos(csp, 2 * kXRegSizeInBytes, PostIndex); | 1145 MemOperand tos(csp, 2 * kXRegSize, PostIndex); |
1146 | 1146 |
1147 ldp(x19, x20, tos); | 1147 ldp(x19, x20, tos); |
1148 ldp(x21, x22, tos); | 1148 ldp(x21, x22, tos); |
1149 ldp(x23, x24, tos); | 1149 ldp(x23, x24, tos); |
1150 ldp(x25, x26, tos); | 1150 ldp(x25, x26, tos); |
1151 ldp(x27, x28, tos); // x28 = jssp | 1151 ldp(x27, x28, tos); // x28 = jssp |
1152 ldp(x29, x30, tos); | 1152 ldp(x29, x30, tos); |
1153 | 1153 |
1154 ldp(d8, d9, tos); | 1154 ldp(d8, d9, tos); |
1155 ldp(d10, d11, tos); | 1155 ldp(d10, d11, tos); |
(...skipping 463 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1619 Mov(x3, Operand(thunk_ref)); | 1619 Mov(x3, Operand(thunk_ref)); |
1620 B(&end_profiler_check); | 1620 B(&end_profiler_check); |
1621 | 1621 |
1622 Bind(&profiler_disabled); | 1622 Bind(&profiler_disabled); |
1623 Mov(x3, function_address); | 1623 Mov(x3, function_address); |
1624 Bind(&end_profiler_check); | 1624 Bind(&end_profiler_check); |
1625 | 1625 |
1626 // Save the callee-save registers we are going to use. | 1626 // Save the callee-save registers we are going to use. |
1627 // TODO(all): Is this necessary? ARM doesn't do it. | 1627 // TODO(all): Is this necessary? ARM doesn't do it. |
1628 STATIC_ASSERT(kCallApiFunctionSpillSpace == 4); | 1628 STATIC_ASSERT(kCallApiFunctionSpillSpace == 4); |
1629 Poke(x19, (spill_offset + 0) * kXRegSizeInBytes); | 1629 Poke(x19, (spill_offset + 0) * kXRegSize); |
1630 Poke(x20, (spill_offset + 1) * kXRegSizeInBytes); | 1630 Poke(x20, (spill_offset + 1) * kXRegSize); |
1631 Poke(x21, (spill_offset + 2) * kXRegSizeInBytes); | 1631 Poke(x21, (spill_offset + 2) * kXRegSize); |
1632 Poke(x22, (spill_offset + 3) * kXRegSizeInBytes); | 1632 Poke(x22, (spill_offset + 3) * kXRegSize); |
1633 | 1633 |
1634 // Allocate HandleScope in callee-save registers. | 1634 // Allocate HandleScope in callee-save registers. |
1635 // We will need to restore the HandleScope after the call to the API function, | 1635 // We will need to restore the HandleScope after the call to the API function, |
1636 // by allocating it in callee-save registers they will be preserved by C code. | 1636 // by allocating it in callee-save registers they will be preserved by C code. |
1637 Register handle_scope_base = x22; | 1637 Register handle_scope_base = x22; |
1638 Register next_address_reg = x19; | 1638 Register next_address_reg = x19; |
1639 Register limit_reg = x20; | 1639 Register limit_reg = x20; |
1640 Register level_reg = w21; | 1640 Register level_reg = w21; |
1641 | 1641 |
1642 Mov(handle_scope_base, Operand(next_address)); | 1642 Mov(handle_scope_base, Operand(next_address)); |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1686 Check(eq, kUnexpectedLevelAfterReturnFromApiCall); | 1686 Check(eq, kUnexpectedLevelAfterReturnFromApiCall); |
1687 } | 1687 } |
1688 Sub(level_reg, level_reg, 1); | 1688 Sub(level_reg, level_reg, 1); |
1689 Str(level_reg, MemOperand(handle_scope_base, kLevelOffset)); | 1689 Str(level_reg, MemOperand(handle_scope_base, kLevelOffset)); |
1690 Ldr(x1, MemOperand(handle_scope_base, kLimitOffset)); | 1690 Ldr(x1, MemOperand(handle_scope_base, kLimitOffset)); |
1691 Cmp(limit_reg, x1); | 1691 Cmp(limit_reg, x1); |
1692 B(ne, &delete_allocated_handles); | 1692 B(ne, &delete_allocated_handles); |
1693 | 1693 |
1694 Bind(&leave_exit_frame); | 1694 Bind(&leave_exit_frame); |
1695 // Restore callee-saved registers. | 1695 // Restore callee-saved registers. |
1696 Peek(x19, (spill_offset + 0) * kXRegSizeInBytes); | 1696 Peek(x19, (spill_offset + 0) * kXRegSize); |
1697 Peek(x20, (spill_offset + 1) * kXRegSizeInBytes); | 1697 Peek(x20, (spill_offset + 1) * kXRegSize); |
1698 Peek(x21, (spill_offset + 2) * kXRegSizeInBytes); | 1698 Peek(x21, (spill_offset + 2) * kXRegSize); |
1699 Peek(x22, (spill_offset + 3) * kXRegSizeInBytes); | 1699 Peek(x22, (spill_offset + 3) * kXRegSize); |
1700 | 1700 |
1701 // Check if the function scheduled an exception. | 1701 // Check if the function scheduled an exception. |
1702 Mov(x5, Operand(ExternalReference::scheduled_exception_address(isolate()))); | 1702 Mov(x5, Operand(ExternalReference::scheduled_exception_address(isolate()))); |
1703 Ldr(x5, MemOperand(x5)); | 1703 Ldr(x5, MemOperand(x5)); |
1704 JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex, &promote_scheduled_exception); | 1704 JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex, &promote_scheduled_exception); |
1705 Bind(&exception_handled); | 1705 Bind(&exception_handled); |
1706 | 1706 |
1707 bool restore_context = context_restore_operand != NULL; | 1707 bool restore_context = context_restore_operand != NULL; |
1708 if (restore_context) { | 1708 if (restore_context) { |
1709 Ldr(cp, *context_restore_operand); | 1709 Ldr(cp, *context_restore_operand); |
(...skipping 450 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2160 // number string cache for smis is just the smi value, and the hash for | 2160 // number string cache for smis is just the smi value, and the hash for |
2161 // doubles is the xor of the upper and lower words. See | 2161 // doubles is the xor of the upper and lower words. See |
2162 // Heap::GetNumberStringCache. | 2162 // Heap::GetNumberStringCache. |
2163 Label is_smi; | 2163 Label is_smi; |
2164 Label load_result_from_cache; | 2164 Label load_result_from_cache; |
2165 | 2165 |
2166 JumpIfSmi(object, &is_smi); | 2166 JumpIfSmi(object, &is_smi); |
2167 CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found, | 2167 CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found, |
2168 DONT_DO_SMI_CHECK); | 2168 DONT_DO_SMI_CHECK); |
2169 | 2169 |
2170 STATIC_ASSERT(kDoubleSize == (kWRegSizeInBytes * 2)); | 2170 STATIC_ASSERT(kDoubleSize == (kWRegSize * 2)); |
2171 Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag); | 2171 Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag); |
2172 Ldp(scratch1.W(), scratch2.W(), MemOperand(scratch1)); | 2172 Ldp(scratch1.W(), scratch2.W(), MemOperand(scratch1)); |
2173 Eor(scratch1, scratch1, scratch2); | 2173 Eor(scratch1, scratch1, scratch2); |
2174 And(scratch1, scratch1, mask); | 2174 And(scratch1, scratch1, mask); |
2175 | 2175 |
2176 // Calculate address of entry in string cache: each entry consists of two | 2176 // Calculate address of entry in string cache: each entry consists of two |
2177 // pointer sized fields. | 2177 // pointer sized fields. |
2178 Add(scratch1, number_string_cache, | 2178 Add(scratch1, number_string_cache, |
2179 Operand(scratch1, LSL, kPointerSizeLog2 + 1)); | 2179 Operand(scratch1, LSL, kPointerSizeLog2 + 1)); |
2180 | 2180 |
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2293 | 2293 |
2294 const Register& dst_untagged = scratch1; | 2294 const Register& dst_untagged = scratch1; |
2295 const Register& src_untagged = scratch2; | 2295 const Register& src_untagged = scratch2; |
2296 Sub(dst_untagged, dst, kHeapObjectTag); | 2296 Sub(dst_untagged, dst, kHeapObjectTag); |
2297 Sub(src_untagged, src, kHeapObjectTag); | 2297 Sub(src_untagged, src, kHeapObjectTag); |
2298 | 2298 |
2299 // Copy fields in pairs. | 2299 // Copy fields in pairs. |
2300 Label loop; | 2300 Label loop; |
2301 Bind(&loop); | 2301 Bind(&loop); |
2302 Ldp(scratch4, scratch5, | 2302 Ldp(scratch4, scratch5, |
2303 MemOperand(src_untagged, kXRegSizeInBytes * 2, PostIndex)); | 2303 MemOperand(src_untagged, kXRegSize* 2, PostIndex)); |
2304 Stp(scratch4, scratch5, | 2304 Stp(scratch4, scratch5, |
2305 MemOperand(dst_untagged, kXRegSizeInBytes * 2, PostIndex)); | 2305 MemOperand(dst_untagged, kXRegSize* 2, PostIndex)); |
2306 Sub(remaining, remaining, 1); | 2306 Sub(remaining, remaining, 1); |
2307 Cbnz(remaining, &loop); | 2307 Cbnz(remaining, &loop); |
2308 | 2308 |
2309 // Handle the leftovers. | 2309 // Handle the leftovers. |
2310 if (count & 1) { | 2310 if (count & 1) { |
2311 Ldr(scratch4, MemOperand(src_untagged)); | 2311 Ldr(scratch4, MemOperand(src_untagged)); |
2312 Str(scratch4, MemOperand(dst_untagged)); | 2312 Str(scratch4, MemOperand(dst_untagged)); |
2313 } | 2313 } |
2314 } | 2314 } |
2315 | 2315 |
2316 | 2316 |
2317 void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst, | 2317 void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst, |
2318 Register src, | 2318 Register src, |
2319 unsigned count, | 2319 unsigned count, |
2320 Register scratch1, | 2320 Register scratch1, |
2321 Register scratch2, | 2321 Register scratch2, |
2322 Register scratch3, | 2322 Register scratch3, |
2323 Register scratch4) { | 2323 Register scratch4) { |
2324 // Untag src and dst into scratch registers. | 2324 // Untag src and dst into scratch registers. |
2325 // Copy src->dst in an unrolled loop. | 2325 // Copy src->dst in an unrolled loop. |
2326 ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4)); | 2326 ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4)); |
2327 | 2327 |
2328 const Register& dst_untagged = scratch1; | 2328 const Register& dst_untagged = scratch1; |
2329 const Register& src_untagged = scratch2; | 2329 const Register& src_untagged = scratch2; |
2330 sub(dst_untagged, dst, kHeapObjectTag); | 2330 sub(dst_untagged, dst, kHeapObjectTag); |
2331 sub(src_untagged, src, kHeapObjectTag); | 2331 sub(src_untagged, src, kHeapObjectTag); |
2332 | 2332 |
2333 // Copy fields in pairs. | 2333 // Copy fields in pairs. |
2334 for (unsigned i = 0; i < count / 2; i++) { | 2334 for (unsigned i = 0; i < count / 2; i++) { |
2335 Ldp(scratch3, scratch4, | 2335 Ldp(scratch3, scratch4, MemOperand(src_untagged, kXRegSize * 2, PostIndex)); |
2336 MemOperand(src_untagged, kXRegSizeInBytes * 2, PostIndex)); | 2336 Stp(scratch3, scratch4, MemOperand(dst_untagged, kXRegSize * 2, PostIndex)); |
2337 Stp(scratch3, scratch4, | |
2338 MemOperand(dst_untagged, kXRegSizeInBytes * 2, PostIndex)); | |
2339 } | 2337 } |
2340 | 2338 |
2341 // Handle the leftovers. | 2339 // Handle the leftovers. |
2342 if (count & 1) { | 2340 if (count & 1) { |
2343 Ldr(scratch3, MemOperand(src_untagged)); | 2341 Ldr(scratch3, MemOperand(src_untagged)); |
2344 Str(scratch3, MemOperand(dst_untagged)); | 2342 Str(scratch3, MemOperand(dst_untagged)); |
2345 } | 2343 } |
2346 } | 2344 } |
2347 | 2345 |
2348 | 2346 |
2349 void MacroAssembler::CopyFieldsUnrolledHelper(Register dst, | 2347 void MacroAssembler::CopyFieldsUnrolledHelper(Register dst, |
2350 Register src, | 2348 Register src, |
2351 unsigned count, | 2349 unsigned count, |
2352 Register scratch1, | 2350 Register scratch1, |
2353 Register scratch2, | 2351 Register scratch2, |
2354 Register scratch3) { | 2352 Register scratch3) { |
2355 // Untag src and dst into scratch registers. | 2353 // Untag src and dst into scratch registers. |
2356 // Copy src->dst in an unrolled loop. | 2354 // Copy src->dst in an unrolled loop. |
2357 ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3)); | 2355 ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3)); |
2358 | 2356 |
2359 const Register& dst_untagged = scratch1; | 2357 const Register& dst_untagged = scratch1; |
2360 const Register& src_untagged = scratch2; | 2358 const Register& src_untagged = scratch2; |
2361 Sub(dst_untagged, dst, kHeapObjectTag); | 2359 Sub(dst_untagged, dst, kHeapObjectTag); |
2362 Sub(src_untagged, src, kHeapObjectTag); | 2360 Sub(src_untagged, src, kHeapObjectTag); |
2363 | 2361 |
2364 // Copy fields one by one. | 2362 // Copy fields one by one. |
2365 for (unsigned i = 0; i < count; i++) { | 2363 for (unsigned i = 0; i < count; i++) { |
2366 Ldr(scratch3, MemOperand(src_untagged, kXRegSizeInBytes, PostIndex)); | 2364 Ldr(scratch3, MemOperand(src_untagged, kXRegSize, PostIndex)); |
2367 Str(scratch3, MemOperand(dst_untagged, kXRegSizeInBytes, PostIndex)); | 2365 Str(scratch3, MemOperand(dst_untagged, kXRegSize, PostIndex)); |
2368 } | 2366 } |
2369 } | 2367 } |
2370 | 2368 |
2371 | 2369 |
2372 void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps, | 2370 void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps, |
2373 unsigned count) { | 2371 unsigned count) { |
2374 // One of two methods is used: | 2372 // One of two methods is used: |
2375 // | 2373 // |
2376 // For high 'count' values where many scratch registers are available: | 2374 // For high 'count' values where many scratch registers are available: |
2377 // Untag src and dst into scratch registers. | 2375 // Untag src and dst into scratch registers. |
(...skipping 526 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2904 void MacroAssembler::ExitFrameRestoreFPRegs() { | 2902 void MacroAssembler::ExitFrameRestoreFPRegs() { |
2905 // Read the registers from the stack without popping them. The stack pointer | 2903 // Read the registers from the stack without popping them. The stack pointer |
2906 // will be reset as part of the unwinding process. | 2904 // will be reset as part of the unwinding process. |
2907 CPURegList saved_fp_regs = kCallerSavedFP; | 2905 CPURegList saved_fp_regs = kCallerSavedFP; |
2908 ASSERT(saved_fp_regs.Count() % 2 == 0); | 2906 ASSERT(saved_fp_regs.Count() % 2 == 0); |
2909 | 2907 |
2910 int offset = ExitFrameConstants::kLastExitFrameField; | 2908 int offset = ExitFrameConstants::kLastExitFrameField; |
2911 while (!saved_fp_regs.IsEmpty()) { | 2909 while (!saved_fp_regs.IsEmpty()) { |
2912 const CPURegister& dst0 = saved_fp_regs.PopHighestIndex(); | 2910 const CPURegister& dst0 = saved_fp_regs.PopHighestIndex(); |
2913 const CPURegister& dst1 = saved_fp_regs.PopHighestIndex(); | 2911 const CPURegister& dst1 = saved_fp_regs.PopHighestIndex(); |
2914 offset -= 2 * kDRegSizeInBytes; | 2912 offset -= 2 * kDRegSize; |
2915 Ldp(dst1, dst0, MemOperand(fp, offset)); | 2913 Ldp(dst1, dst0, MemOperand(fp, offset)); |
2916 } | 2914 } |
2917 } | 2915 } |
2918 | 2916 |
2919 | 2917 |
2920 // TODO(jbramley): Check that we're handling the frame pointer correctly. | 2918 // TODO(jbramley): Check that we're handling the frame pointer correctly. |
2921 void MacroAssembler::EnterExitFrame(bool save_doubles, | 2919 void MacroAssembler::EnterExitFrame(bool save_doubles, |
2922 const Register& scratch, | 2920 const Register& scratch, |
2923 int extra_space) { | 2921 int extra_space) { |
2924 ASSERT(jssp.Is(StackPointer())); | 2922 ASSERT(jssp.Is(StackPointer())); |
(...skipping 24 matching lines...) Expand all Loading... |
2949 | 2947 |
2950 STATIC_ASSERT((-2 * kPointerSize) == | 2948 STATIC_ASSERT((-2 * kPointerSize) == |
2951 ExitFrameConstants::kLastExitFrameField); | 2949 ExitFrameConstants::kLastExitFrameField); |
2952 if (save_doubles) { | 2950 if (save_doubles) { |
2953 ExitFramePreserveFPRegs(); | 2951 ExitFramePreserveFPRegs(); |
2954 } | 2952 } |
2955 | 2953 |
2956 // Reserve space for the return address and for user requested memory. | 2954 // Reserve space for the return address and for user requested memory. |
2957 // We do this before aligning to make sure that we end up correctly | 2955 // We do this before aligning to make sure that we end up correctly |
2958 // aligned with the minimum of wasted space. | 2956 // aligned with the minimum of wasted space. |
2959 Claim(extra_space + 1, kXRegSizeInBytes); | 2957 Claim(extra_space + 1, kXRegSize); |
2960 // fp[8]: CallerPC (lr) | 2958 // fp[8]: CallerPC (lr) |
2961 // fp -> fp[0]: CallerFP (old fp) | 2959 // fp -> fp[0]: CallerFP (old fp) |
2962 // fp[-8]: Space reserved for SPOffset. | 2960 // fp[-8]: Space reserved for SPOffset. |
2963 // fp[-16]: CodeObject() | 2961 // fp[-16]: CodeObject() |
2964 // jssp[-16 - fp_size]: Saved doubles (if save_doubles is true). | 2962 // jssp[-16 - fp_size]: Saved doubles (if save_doubles is true). |
2965 // jssp[8]: Extra space reserved for caller (if extra_space != 0). | 2963 // jssp[8]: Extra space reserved for caller (if extra_space != 0). |
2966 // jssp -> jssp[0]: Space reserved for the return address. | 2964 // jssp -> jssp[0]: Space reserved for the return address. |
2967 | 2965 |
2968 // Align and synchronize the system stack pointer with jssp. | 2966 // Align and synchronize the system stack pointer with jssp. |
2969 AlignAndSetCSPForFrame(); | 2967 AlignAndSetCSPForFrame(); |
2970 ASSERT(csp.Is(StackPointer())); | 2968 ASSERT(csp.Is(StackPointer())); |
2971 | 2969 |
2972 // fp[8]: CallerPC (lr) | 2970 // fp[8]: CallerPC (lr) |
2973 // fp -> fp[0]: CallerFP (old fp) | 2971 // fp -> fp[0]: CallerFP (old fp) |
2974 // fp[-8]: Space reserved for SPOffset. | 2972 // fp[-8]: Space reserved for SPOffset. |
2975 // fp[-16]: CodeObject() | 2973 // fp[-16]: CodeObject() |
2976 // csp[...]: Saved doubles, if saved_doubles is true. | 2974 // csp[...]: Saved doubles, if saved_doubles is true. |
2977 // csp[8]: Memory reserved for the caller if extra_space != 0. | 2975 // csp[8]: Memory reserved for the caller if extra_space != 0. |
2978 // Alignment padding, if necessary. | 2976 // Alignment padding, if necessary. |
2979 // csp -> csp[0]: Space reserved for the return address. | 2977 // csp -> csp[0]: Space reserved for the return address. |
2980 | 2978 |
2981 // ExitFrame::GetStateForFramePointer expects to find the return address at | 2979 // ExitFrame::GetStateForFramePointer expects to find the return address at |
2982 // the memory address immediately below the pointer stored in SPOffset. | 2980 // the memory address immediately below the pointer stored in SPOffset. |
2983 // It is not safe to derive much else from SPOffset, because the size of the | 2981 // It is not safe to derive much else from SPOffset, because the size of the |
2984 // padding can vary. | 2982 // padding can vary. |
2985 Add(scratch, csp, kXRegSizeInBytes); | 2983 Add(scratch, csp, kXRegSize); |
2986 Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); | 2984 Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); |
2987 } | 2985 } |
2988 | 2986 |
2989 | 2987 |
2990 // Leave the current exit frame. | 2988 // Leave the current exit frame. |
2991 void MacroAssembler::LeaveExitFrame(bool restore_doubles, | 2989 void MacroAssembler::LeaveExitFrame(bool restore_doubles, |
2992 const Register& scratch, | 2990 const Register& scratch, |
2993 bool restore_context) { | 2991 bool restore_context) { |
2994 ASSERT(csp.Is(StackPointer())); | 2992 ASSERT(csp.Is(StackPointer())); |
2995 | 2993 |
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3117 Push(x10); | 3115 Push(x10); |
3118 // Set this new handler as the current one. | 3116 // Set this new handler as the current one. |
3119 Str(jssp, MemOperand(x11)); | 3117 Str(jssp, MemOperand(x11)); |
3120 } | 3118 } |
3121 | 3119 |
3122 | 3120 |
3123 void MacroAssembler::PopTryHandler() { | 3121 void MacroAssembler::PopTryHandler() { |
3124 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | 3122 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
3125 Pop(x10); | 3123 Pop(x10); |
3126 Mov(x11, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); | 3124 Mov(x11, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); |
3127 Drop(StackHandlerConstants::kSize - kXRegSizeInBytes, kByteSizeInBytes); | 3125 Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes); |
3128 Str(x10, MemOperand(x11)); | 3126 Str(x10, MemOperand(x11)); |
3129 } | 3127 } |
3130 | 3128 |
3131 | 3129 |
3132 void MacroAssembler::Allocate(int object_size, | 3130 void MacroAssembler::Allocate(int object_size, |
3133 Register result, | 3131 Register result, |
3134 Register scratch1, | 3132 Register scratch1, |
3135 Register scratch2, | 3133 Register scratch2, |
3136 Label* gc_required, | 3134 Label* gc_required, |
3137 AllocationFlags flags) { | 3135 AllocationFlags flags) { |
(...skipping 968 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4106 // Safepoints expect a block of kNumSafepointRegisters values on the stack, so | 4104 // Safepoints expect a block of kNumSafepointRegisters values on the stack, so |
4107 // adjust the stack for unsaved registers. | 4105 // adjust the stack for unsaved registers. |
4108 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; | 4106 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; |
4109 ASSERT(num_unsaved >= 0); | 4107 ASSERT(num_unsaved >= 0); |
4110 Claim(num_unsaved); | 4108 Claim(num_unsaved); |
4111 PushXRegList(kSafepointSavedRegisters); | 4109 PushXRegList(kSafepointSavedRegisters); |
4112 } | 4110 } |
4113 | 4111 |
4114 | 4112 |
4115 void MacroAssembler::PushSafepointFPRegisters() { | 4113 void MacroAssembler::PushSafepointFPRegisters() { |
4116 PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSize, | 4114 PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, |
4117 FPRegister::kAllocatableFPRegisters)); | 4115 FPRegister::kAllocatableFPRegisters)); |
4118 } | 4116 } |
4119 | 4117 |
4120 | 4118 |
4121 void MacroAssembler::PopSafepointFPRegisters() { | 4119 void MacroAssembler::PopSafepointFPRegisters() { |
4122 PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSize, | 4120 PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, |
4123 FPRegister::kAllocatableFPRegisters)); | 4121 FPRegister::kAllocatableFPRegisters)); |
4124 } | 4122 } |
4125 | 4123 |
4126 | 4124 |
4127 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { | 4125 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { |
4128 // Make sure the safepoint registers list is what we expect. | 4126 // Make sure the safepoint registers list is what we expect. |
4129 ASSERT(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff); | 4127 ASSERT(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff); |
4130 | 4128 |
4131 // Safepoint registers are stored contiguously on the stack, but not all the | 4129 // Safepoint registers are stored contiguously on the stack, but not all the |
4132 // registers are saved. The following registers are excluded: | 4130 // registers are saved. The following registers are excluded: |
(...skipping 812 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4945 | 4943 |
4946 | 4944 |
4947 void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) { | 4945 void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) { |
4948 Label start; | 4946 Label start; |
4949 __ bind(&start); | 4947 __ bind(&start); |
4950 | 4948 |
4951 // We can do this sequence using four instructions, but the code ageing | 4949 // We can do this sequence using four instructions, but the code ageing |
4952 // sequence that patches it needs five, so we use the extra space to try to | 4950 // sequence that patches it needs five, so we use the extra space to try to |
4953 // simplify some addressing modes and remove some dependencies (compared to | 4951 // simplify some addressing modes and remove some dependencies (compared to |
4954 // using two stp instructions with write-back). | 4952 // using two stp instructions with write-back). |
4955 __ sub(jssp, jssp, 4 * kXRegSizeInBytes); | 4953 __ sub(jssp, jssp, 4 * kXRegSize); |
4956 __ sub(csp, csp, 4 * kXRegSizeInBytes); | 4954 __ sub(csp, csp, 4 * kXRegSize); |
4957 __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSizeInBytes)); | 4955 __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSize)); |
4958 __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSizeInBytes)); | 4956 __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSize)); |
4959 __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); | 4957 __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); |
4960 | 4958 |
4961 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize); | 4959 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize); |
4962 } | 4960 } |
4963 | 4961 |
4964 | 4962 |
4965 void MacroAssembler::EmitCodeAgeSequence(Assembler * assm, | 4963 void MacroAssembler::EmitCodeAgeSequence(Assembler * assm, |
4966 Code * stub) { | 4964 Code * stub) { |
4967 Label start; | 4965 Label start; |
4968 __ bind(&start); | 4966 __ bind(&start); |
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5112 } | 5110 } |
5113 } | 5111 } |
5114 | 5112 |
5115 | 5113 |
5116 #undef __ | 5114 #undef __ |
5117 | 5115 |
5118 | 5116 |
5119 } } // namespace v8::internal | 5117 } } // namespace v8::internal |
5120 | 5118 |
5121 #endif // V8_TARGET_ARCH_A64 | 5119 #endif // V8_TARGET_ARCH_A64 |
OLD | NEW |