Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(128)

Side by Side Diff: src/arm64/macro-assembler-arm64-inl.h

Issue 264773004: Arm64: Ensure that csp is always aligned to 16 byte values even if jssp is not. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_ 5 #ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
6 #define V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_ 6 #define V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
7 7
8 #include <ctype.h> 8 #include <ctype.h>
9 9
10 #include "v8globals.h" 10 #include "v8globals.h"
(...skipping 1229 matching lines...) Expand 10 before | Expand all | Expand 10 after
1240 1240
1241 void MacroAssembler::Uxtw(const Register& rd, const Register& rn) { 1241 void MacroAssembler::Uxtw(const Register& rd, const Register& rn) {
1242 ASSERT(allow_macro_instructions_); 1242 ASSERT(allow_macro_instructions_);
1243 ASSERT(!rd.IsZero()); 1243 ASSERT(!rd.IsZero());
1244 uxtw(rd, rn); 1244 uxtw(rd, rn);
1245 } 1245 }
1246 1246
1247 1247
1248 void MacroAssembler::BumpSystemStackPointer(const Operand& space) { 1248 void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
1249 ASSERT(!csp.Is(sp_)); 1249 ASSERT(!csp.Is(sp_));
1250 // TODO(jbramley): Several callers rely on this not using scratch registers, 1250 { InstructionAccurateScope scope(this);
1251 // so we use the assembler directly here. However, this means that large 1251 if (!TmpList()->IsEmpty()) {
1252 // immediate values of 'space' cannot be handled cleanly. (Only 24-bits 1252 UseScratchRegisterScope temps(this);
1253 // immediates or values of 'space' that can be encoded in one instruction are 1253 Register temp = temps.AcquireX();
1254 // accepted.) Once we implement our flexible scratch register idea, we could 1254 sub(temp, StackPointer(), space);
1255 // greatly simplify this function. 1255 bic(temp, temp, 0xf);
1256 InstructionAccurateScope scope(this); 1256 sub(csp, temp, 0x10);
1257 if ((space.IsImmediate()) && !is_uint12(space.immediate())) { 1257 } else {
1258 // The subtract instruction supports a 12-bit immediate, shifted left by 1258 // TODO(jbramley): Several callers rely on this not using scratch
jbramley 2014/05/01 15:17:10 Since we added UseScratchRegisterScope, I don't th
rmcilroy 2014/05/01 18:29:11 Yes I had hoped that was true and tried it origina
1259 // zero or 12 bits. So, in two instructions, we can subtract any immediate 1259 // registers, so we use the assembler directly here. However, this means
1260 // between zero and (1 << 24) - 1. 1260 // that large immediate values of 'space' cannot be handled cleanly. (Only
1261 int64_t imm = space.immediate(); 1261 // 24-bits immediates or values of 'space' that can be encoded in one
1262 ASSERT(is_uint24(imm)); 1262 // instruction are accepted.) Once we implement our flexible scratch
1263 // register idea, we could greatly simplify this function.
1264 ASSERT(space.IsImmediate());
1265 // Align to 16 bytes and add 16 bytes to counteract mask of StackPointer
1266 // below.
1267 uint64_t imm = RoundUp(space.immediate(), 0x10) + 0x10;
1268 ASSERT(is_uint24(imm));
1263 1269
1264 int64_t imm_top_12_bits = imm >> 12; 1270 bic(csp, StackPointer(), 0xf);
jbramley 2014/05/01 15:17:10 This will generate two extra instructions (bic + s
rmcilroy 2014/05/01 18:29:11 Sure I understand your concern. We are not yet su
1265 sub(csp, StackPointer(), imm_top_12_bits << 12); 1271 if (!is_uint12(imm)) {
1266 imm -= imm_top_12_bits << 12; 1272 int64_t imm_top_12_bits = imm >> 12;
1267 if (imm > 0) { 1273 sub(csp, csp, imm_top_12_bits << 12);
1268 sub(csp, csp, imm); 1274 imm -= imm_top_12_bits << 12;
1275 }
1276 if (imm > 0) {
1277 sub(csp, csp, imm);
1278 }
1269 } 1279 }
1270 } else {
1271 sub(csp, StackPointer(), space);
1272 } 1280 }
1281 AssertStackConsistency();
1273 } 1282 }
1274 1283
1275 1284
1285 void MacroAssembler::SyncSystemStackPointer() {
jbramley 2014/05/01 15:17:10 This is never _necessary_, so it might be a good i
rmcilroy 2014/05/01 18:29:11 Done.
1286 ASSERT(!csp.Is(sp_));
1287 { InstructionAccurateScope scope(this);
1288 bic(csp, StackPointer(), 0xf);
1289 sub(csp, csp, 0x10);
jbramley 2014/05/01 15:17:10 Why is the sub necessary? Isn't the bic enough?
rmcilroy 2014/05/01 18:29:11 You are right, the bic is enough (this is an artif
1290 }
1291 AssertStackConsistency();
1292 }
1293
1294
1276 void MacroAssembler::InitializeRootRegister() { 1295 void MacroAssembler::InitializeRootRegister() {
1277 ExternalReference roots_array_start = 1296 ExternalReference roots_array_start =
1278 ExternalReference::roots_array_start(isolate()); 1297 ExternalReference::roots_array_start(isolate());
1279 Mov(root, Operand(roots_array_start)); 1298 Mov(root, Operand(roots_array_start));
1280 } 1299 }
1281 1300
1282 1301
1283 void MacroAssembler::SmiTag(Register dst, Register src) { 1302 void MacroAssembler::SmiTag(Register dst, Register src) {
1284 ASSERT(dst.Is64Bits() && src.Is64Bits()); 1303 ASSERT(dst.Is64Bits() && src.Is64Bits());
1285 Lsl(dst, src, kSmiShift); 1304 Lsl(dst, src, kSmiShift);
(...skipping 248 matching lines...) Expand 10 before | Expand all | Expand 10 after
1534 } 1553 }
1535 1554
1536 Add(StackPointer(), StackPointer(), size); 1555 Add(StackPointer(), StackPointer(), size);
1537 1556
1538 if (csp.Is(StackPointer())) { 1557 if (csp.Is(StackPointer())) {
1539 ASSERT(size % 16 == 0); 1558 ASSERT(size % 16 == 0);
1540 } else if (emit_debug_code()) { 1559 } else if (emit_debug_code()) {
1541 // It is safe to leave csp where it is when unwinding the JavaScript stack, 1560 // It is safe to leave csp where it is when unwinding the JavaScript stack,
1542 // but if we keep it matching StackPointer, the simulator can detect memory 1561 // but if we keep it matching StackPointer, the simulator can detect memory
1543 // accesses in the now-free part of the stack. 1562 // accesses in the now-free part of the stack.
1544 Mov(csp, StackPointer()); 1563 SyncSystemStackPointer();
1545 } 1564 }
1546 } 1565 }
1547 1566
1548 1567
1549 void MacroAssembler::Drop(const Register& count, uint64_t unit_size) { 1568 void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
1550 if (unit_size == 0) return; 1569 if (unit_size == 0) return;
1551 ASSERT(IsPowerOf2(unit_size)); 1570 ASSERT(IsPowerOf2(unit_size));
1552 1571
1553 const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits); 1572 const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
1554 const Operand size(count, LSL, shift); 1573 const Operand size(count, LSL, shift);
1555 1574
1556 if (size.IsZero()) { 1575 if (size.IsZero()) {
1557 return; 1576 return;
1558 } 1577 }
1559 1578
1560 Add(StackPointer(), StackPointer(), size); 1579 Add(StackPointer(), StackPointer(), size);
1561 1580
1562 if (!csp.Is(StackPointer()) && emit_debug_code()) { 1581 if (!csp.Is(StackPointer()) && emit_debug_code()) {
1563 // It is safe to leave csp where it is when unwinding the JavaScript stack, 1582 // It is safe to leave csp where it is when unwinding the JavaScript stack,
1564 // but if we keep it matching StackPointer, the simulator can detect memory 1583 // but if we keep it matching StackPointer, the simulator can detect memory
1565 // accesses in the now-free part of the stack. 1584 // accesses in the now-free part of the stack.
1566 Mov(csp, StackPointer()); 1585 SyncSystemStackPointer();
1567 } 1586 }
1568 } 1587 }
1569 1588
1570 1589
1571 void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) { 1590 void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
1572 ASSERT(unit_size == 0 || IsPowerOf2(unit_size)); 1591 ASSERT(unit_size == 0 || IsPowerOf2(unit_size));
1573 const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift; 1592 const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
1574 const Operand size(count_smi, 1593 const Operand size(count_smi,
1575 (shift >= 0) ? (LSL) : (LSR), 1594 (shift >= 0) ? (LSL) : (LSR),
1576 (shift >= 0) ? (shift) : (-shift)); 1595 (shift >= 0) ? (shift) : (-shift));
1577 1596
1578 if (size.IsZero()) { 1597 if (size.IsZero()) {
1579 return; 1598 return;
1580 } 1599 }
1581 1600
1582 Add(StackPointer(), StackPointer(), size); 1601 Add(StackPointer(), StackPointer(), size);
1583 1602
1584 if (!csp.Is(StackPointer()) && emit_debug_code()) { 1603 if (!csp.Is(StackPointer()) && emit_debug_code()) {
1585 // It is safe to leave csp where it is when unwinding the JavaScript stack, 1604 // It is safe to leave csp where it is when unwinding the JavaScript stack,
1586 // but if we keep it matching StackPointer, the simulator can detect memory 1605 // but if we keep it matching StackPointer, the simulator can detect memory
1587 // accesses in the now-free part of the stack. 1606 // accesses in the now-free part of the stack.
1588 Mov(csp, StackPointer()); 1607 SyncSystemStackPointer();
1589 } 1608 }
1590 } 1609 }
1591 1610
1592 1611
1593 void MacroAssembler::CompareAndBranch(const Register& lhs, 1612 void MacroAssembler::CompareAndBranch(const Register& lhs,
1594 const Operand& rhs, 1613 const Operand& rhs,
1595 Condition cond, 1614 Condition cond,
1596 Label* label) { 1615 Label* label) {
1597 if (rhs.IsImmediate() && (rhs.immediate() == 0) && 1616 if (rhs.IsImmediate() && (rhs.immediate() == 0) &&
1598 ((cond == eq) || (cond == ne))) { 1617 ((cond == eq) || (cond == ne))) {
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
1662 // characters are reserved for controlling features of the instrumentation. 1681 // characters are reserved for controlling features of the instrumentation.
1663 ASSERT(isprint(marker_name[0]) && isprint(marker_name[1])); 1682 ASSERT(isprint(marker_name[0]) && isprint(marker_name[1]));
1664 1683
1665 InstructionAccurateScope scope(this, 1); 1684 InstructionAccurateScope scope(this, 1);
1666 movn(xzr, (marker_name[1] << 8) | marker_name[0]); 1685 movn(xzr, (marker_name[1] << 8) | marker_name[0]);
1667 } 1686 }
1668 1687
1669 } } // namespace v8::internal 1688 } } // namespace v8::internal
1670 1689
1671 #endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_ 1690 #endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
OLDNEW
« src/arm64/macro-assembler-arm64.cc ('K') | « src/arm64/macro-assembler-arm64.cc ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698