Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(961)

Side by Side Diff: src/arm64/macro-assembler-arm64-inl.h

Issue 264773004: Arm64: Ensure that csp is always aligned to 16 byte values even if jssp is not. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Sync Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm64/macro-assembler-arm64.cc ('k') | src/arm64/regexp-macro-assembler-arm64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_ 5 #ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
6 #define V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_ 6 #define V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
7 7
8 #include <ctype.h> 8 #include <ctype.h>
9 9
10 #include "v8globals.h" 10 #include "v8globals.h"
(...skipping 1228 matching lines...) Expand 10 before | Expand all | Expand 10 after
1239 1239
1240 void MacroAssembler::Uxtw(const Register& rd, const Register& rn) { 1240 void MacroAssembler::Uxtw(const Register& rd, const Register& rn) {
1241 ASSERT(allow_macro_instructions_); 1241 ASSERT(allow_macro_instructions_);
1242 ASSERT(!rd.IsZero()); 1242 ASSERT(!rd.IsZero());
1243 uxtw(rd, rn); 1243 uxtw(rd, rn);
1244 } 1244 }
1245 1245
1246 1246
1247 void MacroAssembler::BumpSystemStackPointer(const Operand& space) { 1247 void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
1248 ASSERT(!csp.Is(sp_)); 1248 ASSERT(!csp.Is(sp_));
1249 // TODO(jbramley): Several callers rely on this not using scratch registers, 1249 { InstructionAccurateScope scope(this);
1250 // so we use the assembler directly here. However, this means that large 1250 if (!TmpList()->IsEmpty()) {
1251 // immediate values of 'space' cannot be handled cleanly. (Only 24-bits 1251 if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
1252 // immediates or values of 'space' that can be encoded in one instruction are 1252 UseScratchRegisterScope temps(this);
1253 // accepted.) Once we implement our flexible scratch register idea, we could 1253 Register temp = temps.AcquireX();
1254 // greatly simplify this function. 1254 sub(temp, StackPointer(), space);
1255 InstructionAccurateScope scope(this); 1255 bic(csp, temp, 0xf);
1256 if ((space.IsImmediate()) && !is_uint12(space.immediate())) { 1256 } else {
1257 // The subtract instruction supports a 12-bit immediate, shifted left by 1257 sub(csp, StackPointer(), space);
1258 // zero or 12 bits. So, in two instructions, we can subtract any immediate 1258 }
1259 // between zero and (1 << 24) - 1. 1259 } else {
1260 int64_t imm = space.immediate(); 1260 // TODO(jbramley): Several callers rely on this not using scratch
1261 ASSERT(is_uint24(imm)); 1261 // registers, so we use the assembler directly here. However, this means
1262 // that large immediate values of 'space' cannot be handled cleanly. (Only
1263 // 24-bits immediates or values of 'space' that can be encoded in one
1264 // instruction are accepted.) Once we implement our flexible scratch
1265 // register idea, we could greatly simplify this function.
1266 ASSERT(space.IsImmediate());
1267 // Align to 16 bytes.
1268 uint64_t imm = RoundUp(space.immediate(), 0x10);
1269 ASSERT(is_uint24(imm));
1262 1270
1263 int64_t imm_top_12_bits = imm >> 12; 1271 Register source = StackPointer();
1264 sub(csp, StackPointer(), imm_top_12_bits << 12); 1272 if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
1265 imm -= imm_top_12_bits << 12; 1273 bic(csp, source, 0xf);
1266 if (imm > 0) { 1274 source = csp;
1267 sub(csp, csp, imm); 1275 }
1276 if (!is_uint12(imm)) {
1277 int64_t imm_top_12_bits = imm >> 12;
1278 sub(csp, source, imm_top_12_bits << 12);
1279 source = csp;
1280 imm -= imm_top_12_bits << 12;
1281 }
1282 if (imm > 0) {
1283 sub(csp, source, imm);
1284 }
1268 } 1285 }
1269 } else {
1270 sub(csp, StackPointer(), space);
1271 } 1286 }
1287 AssertStackConsistency();
1272 } 1288 }
1273 1289
1274 1290
1291 void MacroAssembler::SyncSystemStackPointer() {
1292 ASSERT(emit_debug_code());
1293 ASSERT(!csp.Is(sp_));
1294 if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
1295 InstructionAccurateScope scope(this);
1296 bic(csp, StackPointer(), 0xf);
1297 }
1298 AssertStackConsistency();
1299 }
1300
1301
1275 void MacroAssembler::InitializeRootRegister() { 1302 void MacroAssembler::InitializeRootRegister() {
1276 ExternalReference roots_array_start = 1303 ExternalReference roots_array_start =
1277 ExternalReference::roots_array_start(isolate()); 1304 ExternalReference::roots_array_start(isolate());
1278 Mov(root, Operand(roots_array_start)); 1305 Mov(root, Operand(roots_array_start));
1279 } 1306 }
1280 1307
1281 1308
1282 void MacroAssembler::SmiTag(Register dst, Register src) { 1309 void MacroAssembler::SmiTag(Register dst, Register src) {
1283 ASSERT(dst.Is64Bits() && src.Is64Bits()); 1310 ASSERT(dst.Is64Bits() && src.Is64Bits());
1284 Lsl(dst, src, kSmiShift); 1311 Lsl(dst, src, kSmiShift);
(...skipping 248 matching lines...) Expand 10 before | Expand all | Expand 10 after
1533 } 1560 }
1534 1561
1535 Add(StackPointer(), StackPointer(), size); 1562 Add(StackPointer(), StackPointer(), size);
1536 1563
1537 if (csp.Is(StackPointer())) { 1564 if (csp.Is(StackPointer())) {
1538 ASSERT(size % 16 == 0); 1565 ASSERT(size % 16 == 0);
1539 } else if (emit_debug_code()) { 1566 } else if (emit_debug_code()) {
1540 // It is safe to leave csp where it is when unwinding the JavaScript stack, 1567 // It is safe to leave csp where it is when unwinding the JavaScript stack,
1541 // but if we keep it matching StackPointer, the simulator can detect memory 1568 // but if we keep it matching StackPointer, the simulator can detect memory
1542 // accesses in the now-free part of the stack. 1569 // accesses in the now-free part of the stack.
1543 Mov(csp, StackPointer()); 1570 SyncSystemStackPointer();
1544 } 1571 }
1545 } 1572 }
1546 1573
1547 1574
1548 void MacroAssembler::Drop(const Register& count, uint64_t unit_size) { 1575 void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
1549 if (unit_size == 0) return; 1576 if (unit_size == 0) return;
1550 ASSERT(IsPowerOf2(unit_size)); 1577 ASSERT(IsPowerOf2(unit_size));
1551 1578
1552 const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits); 1579 const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
1553 const Operand size(count, LSL, shift); 1580 const Operand size(count, LSL, shift);
1554 1581
1555 if (size.IsZero()) { 1582 if (size.IsZero()) {
1556 return; 1583 return;
1557 } 1584 }
1558 1585
1559 Add(StackPointer(), StackPointer(), size); 1586 Add(StackPointer(), StackPointer(), size);
1560 1587
1561 if (!csp.Is(StackPointer()) && emit_debug_code()) { 1588 if (!csp.Is(StackPointer()) && emit_debug_code()) {
1562 // It is safe to leave csp where it is when unwinding the JavaScript stack, 1589 // It is safe to leave csp where it is when unwinding the JavaScript stack,
1563 // but if we keep it matching StackPointer, the simulator can detect memory 1590 // but if we keep it matching StackPointer, the simulator can detect memory
1564 // accesses in the now-free part of the stack. 1591 // accesses in the now-free part of the stack.
1565 Mov(csp, StackPointer()); 1592 SyncSystemStackPointer();
1566 } 1593 }
1567 } 1594 }
1568 1595
1569 1596
1570 void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) { 1597 void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
1571 ASSERT(unit_size == 0 || IsPowerOf2(unit_size)); 1598 ASSERT(unit_size == 0 || IsPowerOf2(unit_size));
1572 const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift; 1599 const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
1573 const Operand size(count_smi, 1600 const Operand size(count_smi,
1574 (shift >= 0) ? (LSL) : (LSR), 1601 (shift >= 0) ? (LSL) : (LSR),
1575 (shift >= 0) ? (shift) : (-shift)); 1602 (shift >= 0) ? (shift) : (-shift));
1576 1603
1577 if (size.IsZero()) { 1604 if (size.IsZero()) {
1578 return; 1605 return;
1579 } 1606 }
1580 1607
1581 Add(StackPointer(), StackPointer(), size); 1608 Add(StackPointer(), StackPointer(), size);
1582 1609
1583 if (!csp.Is(StackPointer()) && emit_debug_code()) { 1610 if (!csp.Is(StackPointer()) && emit_debug_code()) {
1584 // It is safe to leave csp where it is when unwinding the JavaScript stack, 1611 // It is safe to leave csp where it is when unwinding the JavaScript stack,
1585 // but if we keep it matching StackPointer, the simulator can detect memory 1612 // but if we keep it matching StackPointer, the simulator can detect memory
1586 // accesses in the now-free part of the stack. 1613 // accesses in the now-free part of the stack.
1587 Mov(csp, StackPointer()); 1614 SyncSystemStackPointer();
1588 } 1615 }
1589 } 1616 }
1590 1617
1591 1618
1592 void MacroAssembler::CompareAndBranch(const Register& lhs, 1619 void MacroAssembler::CompareAndBranch(const Register& lhs,
1593 const Operand& rhs, 1620 const Operand& rhs,
1594 Condition cond, 1621 Condition cond,
1595 Label* label) { 1622 Label* label) {
1596 if (rhs.IsImmediate() && (rhs.immediate() == 0) && 1623 if (rhs.IsImmediate() && (rhs.immediate() == 0) &&
1597 ((cond == eq) || (cond == ne))) { 1624 ((cond == eq) || (cond == ne))) {
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
1661 // characters are reserved for controlling features of the instrumentation. 1688 // characters are reserved for controlling features of the instrumentation.
1662 ASSERT(isprint(marker_name[0]) && isprint(marker_name[1])); 1689 ASSERT(isprint(marker_name[0]) && isprint(marker_name[1]));
1663 1690
1664 InstructionAccurateScope scope(this, 1); 1691 InstructionAccurateScope scope(this, 1);
1665 movn(xzr, (marker_name[1] << 8) | marker_name[0]); 1692 movn(xzr, (marker_name[1] << 8) | marker_name[0]);
1666 } 1693 }
1667 1694
1668 } } // namespace v8::internal 1695 } } // namespace v8::internal
1669 1696
1670 #endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_ 1697 #endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
OLDNEW
« no previous file with comments | « src/arm64/macro-assembler-arm64.cc ('k') | src/arm64/regexp-macro-assembler-arm64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698