OLD | NEW |
---|---|
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_ | 5 #ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_ |
6 #define V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_ | 6 #define V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_ |
7 | 7 |
8 #include <ctype.h> | 8 #include <ctype.h> |
9 | 9 |
10 #include "v8globals.h" | 10 #include "v8globals.h" |
(...skipping 1228 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1239 | 1239 |
1240 void MacroAssembler::Uxtw(const Register& rd, const Register& rn) { | 1240 void MacroAssembler::Uxtw(const Register& rd, const Register& rn) { |
1241 ASSERT(allow_macro_instructions_); | 1241 ASSERT(allow_macro_instructions_); |
1242 ASSERT(!rd.IsZero()); | 1242 ASSERT(!rd.IsZero()); |
1243 uxtw(rd, rn); | 1243 uxtw(rd, rn); |
1244 } | 1244 } |
1245 | 1245 |
1246 | 1246 |
1247 void MacroAssembler::BumpSystemStackPointer(const Operand& space) { | 1247 void MacroAssembler::BumpSystemStackPointer(const Operand& space) { |
1248 ASSERT(!csp.Is(sp_)); | 1248 ASSERT(!csp.Is(sp_)); |
1249 // TODO(jbramley): Several callers rely on this not using scratch registers, | 1249 if (!TmpList()->IsEmpty()) { |
1250 // so we use the assembler directly here. However, this means that large | 1250 if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) { |
1251 // immediate values of 'space' cannot be handled cleanly. (Only 24-bits | 1251 UseScratchRegisterScope temps(this); |
1252 // immediates or values of 'space' that can be encoded in one instruction are | 1252 Register temp = temps.AcquireX(); |
1253 // accepted.) Once we implement our flexible scratch register idea, we could | 1253 Sub(temp, StackPointer(), space); |
1254 // greatly simplify this function. | 1254 Bic(csp, temp, 0xf); |
1255 InstructionAccurateScope scope(this); | 1255 } else { |
1256 if ((space.IsImmediate()) && !is_uint12(space.immediate())) { | 1256 Sub(csp, StackPointer(), space); |
1257 // The subtract instruction supports a 12-bit immediate, shifted left by | 1257 } |
1258 // zero or 12 bits. So, in two instructions, we can subtract any immediate | 1258 } else { |
1259 // between zero and (1 << 24) - 1. | 1259 // TODO(jbramley): Several callers rely on this not using scratch |
1260 int64_t imm = space.immediate(); | 1260 // registers, so we use the assembler directly here. However, this means |
1261 // that large immediate values of 'space' cannot be handled cleanly. (Only | |
1262 // 24-bits immediates or values of 'space' that can be encoded in one | |
1263 // instruction are accepted.) Once we implement our flexible scratch | |
1264 // register idea, we could greatly simplify this function. | |
1265 InstructionAccurateScope scope(this); | |
1266 ASSERT(space.IsImmediate()); | |
1267 // Align to 16 bytes. | |
1268 uint64_t imm = RoundUp(space.immediate(), 0x10); | |
1261 ASSERT(is_uint24(imm)); | 1269 ASSERT(is_uint24(imm)); |
1262 | 1270 |
1263 int64_t imm_top_12_bits = imm >> 12; | 1271 Register source = StackPointer(); |
1264 sub(csp, StackPointer(), imm_top_12_bits << 12); | 1272 if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) { |
1265 imm -= imm_top_12_bits << 12; | 1273 bic(csp, source, 0xf); |
1274 source = csp; | |
1275 } | |
1276 if (!is_uint12(imm)) { | |
1277 int64_t imm_top_12_bits = imm >> 12; | |
1278 sub(csp, source, imm_top_12_bits << 12); | |
1279 source = csp; | |
1280 imm -= imm_top_12_bits << 12; | |
1281 } | |
1266 if (imm > 0) { | 1282 if (imm > 0) { |
1267 sub(csp, csp, imm); | 1283 sub(csp, source, imm); |
1268 } | 1284 } |
1269 } else { | |
1270 sub(csp, StackPointer(), space); | |
1271 } | 1285 } |
1286 // TODO(rmcilroy): Uncomment when Debug::PrepareForBreakPoints issue is | |
1287 // resolved. | |
1288 // AssertStackConsistency(true); | |
rmcilroy
2014/05/07 18:08:21
If I enable this I get the following assert in deb
| |
1272 } | 1289 } |
1273 | 1290 |
1274 | 1291 |
1292 void MacroAssembler::SyncSystemStackPointer() { | |
1293 ASSERT(emit_debug_code()); | |
1294 ASSERT(!csp.Is(sp_)); | |
1295 { InstructionAccurateScope scope(this); | |
1296 if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) { | |
1297 bic(csp, StackPointer(), 0xf); | |
1298 } else { | |
1299 mov(csp, StackPointer()); | |
1300 } | |
1301 } | |
1302 AssertStackConsistency(true); | |
1303 } | |
1304 | |
1305 | |
1275 void MacroAssembler::InitializeRootRegister() { | 1306 void MacroAssembler::InitializeRootRegister() { |
1276 ExternalReference roots_array_start = | 1307 ExternalReference roots_array_start = |
1277 ExternalReference::roots_array_start(isolate()); | 1308 ExternalReference::roots_array_start(isolate()); |
1278 Mov(root, Operand(roots_array_start)); | 1309 Mov(root, Operand(roots_array_start)); |
1279 } | 1310 } |
1280 | 1311 |
1281 | 1312 |
1282 void MacroAssembler::SmiTag(Register dst, Register src) { | 1313 void MacroAssembler::SmiTag(Register dst, Register src) { |
1283 ASSERT(dst.Is64Bits() && src.Is64Bits()); | 1314 ASSERT(dst.Is64Bits() && src.Is64Bits()); |
1284 Lsl(dst, src, kSmiShift); | 1315 Lsl(dst, src, kSmiShift); |
(...skipping 248 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1533 } | 1564 } |
1534 | 1565 |
1535 Add(StackPointer(), StackPointer(), size); | 1566 Add(StackPointer(), StackPointer(), size); |
1536 | 1567 |
1537 if (csp.Is(StackPointer())) { | 1568 if (csp.Is(StackPointer())) { |
1538 ASSERT(size % 16 == 0); | 1569 ASSERT(size % 16 == 0); |
1539 } else if (emit_debug_code()) { | 1570 } else if (emit_debug_code()) { |
1540 // It is safe to leave csp where it is when unwinding the JavaScript stack, | 1571 // It is safe to leave csp where it is when unwinding the JavaScript stack, |
1541 // but if we keep it matching StackPointer, the simulator can detect memory | 1572 // but if we keep it matching StackPointer, the simulator can detect memory |
1542 // accesses in the now-free part of the stack. | 1573 // accesses in the now-free part of the stack. |
1543 Mov(csp, StackPointer()); | 1574 SyncSystemStackPointer(); |
1544 } | 1575 } |
1545 } | 1576 } |
1546 | 1577 |
1547 | 1578 |
1548 void MacroAssembler::Drop(const Register& count, uint64_t unit_size) { | 1579 void MacroAssembler::Drop(const Register& count, uint64_t unit_size) { |
1549 if (unit_size == 0) return; | 1580 if (unit_size == 0) return; |
1550 ASSERT(IsPowerOf2(unit_size)); | 1581 ASSERT(IsPowerOf2(unit_size)); |
1551 | 1582 |
1552 const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits); | 1583 const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits); |
1553 const Operand size(count, LSL, shift); | 1584 const Operand size(count, LSL, shift); |
1554 | 1585 |
1555 if (size.IsZero()) { | 1586 if (size.IsZero()) { |
1556 return; | 1587 return; |
1557 } | 1588 } |
1558 | 1589 |
1559 Add(StackPointer(), StackPointer(), size); | 1590 Add(StackPointer(), StackPointer(), size); |
1560 | 1591 |
1561 if (!csp.Is(StackPointer()) && emit_debug_code()) { | 1592 if (!csp.Is(StackPointer()) && emit_debug_code()) { |
1562 // It is safe to leave csp where it is when unwinding the JavaScript stack, | 1593 // It is safe to leave csp where it is when unwinding the JavaScript stack, |
1563 // but if we keep it matching StackPointer, the simulator can detect memory | 1594 // but if we keep it matching StackPointer, the simulator can detect memory |
1564 // accesses in the now-free part of the stack. | 1595 // accesses in the now-free part of the stack. |
1565 Mov(csp, StackPointer()); | 1596 SyncSystemStackPointer(); |
1566 } | 1597 } |
1567 } | 1598 } |
1568 | 1599 |
1569 | 1600 |
1570 void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) { | 1601 void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) { |
1571 ASSERT(unit_size == 0 || IsPowerOf2(unit_size)); | 1602 ASSERT(unit_size == 0 || IsPowerOf2(unit_size)); |
1572 const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift; | 1603 const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift; |
1573 const Operand size(count_smi, | 1604 const Operand size(count_smi, |
1574 (shift >= 0) ? (LSL) : (LSR), | 1605 (shift >= 0) ? (LSL) : (LSR), |
1575 (shift >= 0) ? (shift) : (-shift)); | 1606 (shift >= 0) ? (shift) : (-shift)); |
1576 | 1607 |
1577 if (size.IsZero()) { | 1608 if (size.IsZero()) { |
1578 return; | 1609 return; |
1579 } | 1610 } |
1580 | 1611 |
1581 Add(StackPointer(), StackPointer(), size); | 1612 Add(StackPointer(), StackPointer(), size); |
1582 | 1613 |
1583 if (!csp.Is(StackPointer()) && emit_debug_code()) { | 1614 if (!csp.Is(StackPointer()) && emit_debug_code()) { |
1584 // It is safe to leave csp where it is when unwinding the JavaScript stack, | 1615 // It is safe to leave csp where it is when unwinding the JavaScript stack, |
1585 // but if we keep it matching StackPointer, the simulator can detect memory | 1616 // but if we keep it matching StackPointer, the simulator can detect memory |
1586 // accesses in the now-free part of the stack. | 1617 // accesses in the now-free part of the stack. |
1587 Mov(csp, StackPointer()); | 1618 SyncSystemStackPointer(); |
1588 } | 1619 } |
1589 } | 1620 } |
1590 | 1621 |
1591 | 1622 |
1592 void MacroAssembler::CompareAndBranch(const Register& lhs, | 1623 void MacroAssembler::CompareAndBranch(const Register& lhs, |
1593 const Operand& rhs, | 1624 const Operand& rhs, |
1594 Condition cond, | 1625 Condition cond, |
1595 Label* label) { | 1626 Label* label) { |
1596 if (rhs.IsImmediate() && (rhs.immediate() == 0) && | 1627 if (rhs.IsImmediate() && (rhs.immediate() == 0) && |
1597 ((cond == eq) || (cond == ne))) { | 1628 ((cond == eq) || (cond == ne))) { |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1661 // characters are reserved for controlling features of the instrumentation. | 1692 // characters are reserved for controlling features of the instrumentation. |
1662 ASSERT(isprint(marker_name[0]) && isprint(marker_name[1])); | 1693 ASSERT(isprint(marker_name[0]) && isprint(marker_name[1])); |
1663 | 1694 |
1664 InstructionAccurateScope scope(this, 1); | 1695 InstructionAccurateScope scope(this, 1); |
1665 movn(xzr, (marker_name[1] << 8) | marker_name[0]); | 1696 movn(xzr, (marker_name[1] << 8) | marker_name[0]); |
1666 } | 1697 } |
1667 | 1698 |
1668 } } // namespace v8::internal | 1699 } } // namespace v8::internal |
1669 | 1700 |
1670 #endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_ | 1701 #endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_ |
OLD | NEW |