OLD | NEW |
---|---|
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #if V8_TARGET_ARCH_ARM64 | 7 #if V8_TARGET_ARCH_ARM64 |
8 | 8 |
9 #include "src/bootstrapper.h" | 9 #include "src/bootstrapper.h" |
10 #include "src/codegen.h" | 10 #include "src/codegen.h" |
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
117 } | 117 } |
118 } | 118 } |
119 | 119 |
120 unsigned n, imm_s, imm_r; | 120 unsigned n, imm_s, imm_r; |
121 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { | 121 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { |
122 // Immediate can be encoded in the instruction. | 122 // Immediate can be encoded in the instruction. |
123 LogicalImmediate(rd, rn, n, imm_s, imm_r, op); | 123 LogicalImmediate(rd, rn, n, imm_s, imm_r, op); |
124 } else { | 124 } else { |
125 // Immediate can't be encoded: synthesize using move immediate. | 125 // Immediate can't be encoded: synthesize using move immediate. |
126 Register temp = temps.AcquireSameSizeAs(rn); | 126 Register temp = temps.AcquireSameSizeAs(rn); |
127 Mov(temp, immediate); | 127 Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate); |
128 if (rd.Is(csp)) { | 128 if (rd.Is(csp)) { |
129 // If rd is the stack pointer we cannot use it as the destination | 129 // If rd is the stack pointer we cannot use it as the destination |
130 // register so we use the temp register as an intermediate again. | 130 // register so we use the temp register as an intermediate again. |
131 Logical(temp, rn, temp, op); | 131 Logical(temp, rn, imm_operand, op); |
132 Mov(csp, temp); | 132 Mov(csp, temp); |
133 AssertStackConsistency(); | 133 AssertStackConsistency(); |
134 } else { | 134 } else { |
135 Logical(rd, rn, temp, op); | 135 Logical(rd, rn, imm_operand, op); |
136 } | 136 } |
137 } | 137 } |
138 | 138 |
139 } else if (operand.IsExtendedRegister()) { | 139 } else if (operand.IsExtendedRegister()) { |
140 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); | 140 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); |
141 // Add/sub extended supports shift <= 4. We want to support exactly the | 141 // Add/sub extended supports shift <= 4. We want to support exactly the |
142 // same modes here. | 142 // same modes here. |
143 ASSERT(operand.shift_amount() <= 4); | 143 ASSERT(operand.shift_amount() <= 4); |
144 ASSERT(operand.reg().Is64Bits() || | 144 ASSERT(operand.reg().Is64Bits() || |
145 ((operand.extend() != UXTX) && (operand.extend() != SXTX))); | 145 ((operand.extend() != UXTX) && (operand.extend() != SXTX))); |
(...skipping 25 matching lines...) Expand all Loading... | |
171 // 2. 32-bit move inverted (movn). | 171 // 2. 32-bit move inverted (movn). |
172 // 3. 64-bit move inverted. | 172 // 3. 64-bit move inverted. |
173 // 4. 32-bit orr immediate. | 173 // 4. 32-bit orr immediate. |
174 // 5. 64-bit orr immediate. | 174 // 5. 64-bit orr immediate. |
175 // Move-keep may then be used to modify each of the 16-bit half-words. | 175 // Move-keep may then be used to modify each of the 16-bit half-words. |
176 // | 176 // |
177 // The code below supports all five initial value generators, and | 177 // The code below supports all five initial value generators, and |
178 // applying move-keep operations to move-zero and move-inverted initial | 178 // applying move-keep operations to move-zero and move-inverted initial |
179 // values. | 179 // values. |
180 | 180 |
181 unsigned reg_size = rd.SizeInBits(); | 181 // Try to move the immediate in one instruction, and if that fails, switch to |
182 unsigned n, imm_s, imm_r; | 182 // using multiple instructions. |
183 if (IsImmMovz(imm, reg_size) && !rd.IsSP()) { | 183 if (!TryOneInstrMoveImmediate(rd, imm)) { |
184 // Immediate can be represented in a move zero instruction. Movz can't | 184 unsigned reg_size = rd.SizeInBits(); |
185 // write to the stack pointer. | 185 |
186 movz(rd, imm); | |
187 } else if (IsImmMovn(imm, reg_size) && !rd.IsSP()) { | |
188 // Immediate can be represented in a move inverted instruction. Movn can't | |
189 // write to the stack pointer. | |
190 movn(rd, rd.Is64Bits() ? ~imm : (~imm & kWRegMask)); | |
191 } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) { | |
192 // Immediate can be represented in a logical orr instruction. | |
193 LogicalImmediate(rd, AppropriateZeroRegFor(rd), n, imm_s, imm_r, ORR); | |
194 } else { | |
195 // Generic immediate case. Imm will be represented by | 186 // Generic immediate case. Imm will be represented by |
196 // [imm3, imm2, imm1, imm0], where each imm is 16 bits. | 187 // [imm3, imm2, imm1, imm0], where each imm is 16 bits. |
197 // A move-zero or move-inverted is generated for the first non-zero or | 188 // A move-zero or move-inverted is generated for the first non-zero or |
198 // non-0xffff immX, and a move-keep for subsequent non-zero immX. | 189 // non-0xffff immX, and a move-keep for subsequent non-zero immX. |
199 | 190 |
200 uint64_t ignored_halfword = 0; | 191 uint64_t ignored_halfword = 0; |
201 bool invert_move = false; | 192 bool invert_move = false; |
202 // If the number of 0xffff halfwords is greater than the number of 0x0000 | 193 // If the number of 0xffff halfwords is greater than the number of 0x0000 |
203 // halfwords, it's more efficient to use move-inverted. | 194 // halfwords, it's more efficient to use move-inverted. |
204 if (CountClearHalfWords(~imm, reg_size) > | 195 if (CountClearHalfWords(~imm, reg_size) > |
(...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
412 } else { | 403 } else { |
413 // All other arguments. | 404 // All other arguments. |
414 UseScratchRegisterScope temps(this); | 405 UseScratchRegisterScope temps(this); |
415 Register temp = temps.AcquireSameSizeAs(rn); | 406 Register temp = temps.AcquireSameSizeAs(rn); |
416 Mov(temp, operand); | 407 Mov(temp, operand); |
417 csel(rd, rn, temp, cond); | 408 csel(rd, rn, temp, cond); |
418 } | 409 } |
419 } | 410 } |
420 | 411 |
421 | 412 |
413 bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst, | |
414 int64_t imm) { | |
415 unsigned n, imm_s, imm_r; | |
416 int reg_size = dst.SizeInBits(); | |
417 if (IsImmMovz(imm, reg_size) && !dst.IsSP()) { | |
418 // Immediate can be represented in a move zero instruction. Movz can't write | |
419 // to the stack pointer. | |
420 movz(dst, imm); | |
421 return true; | |
422 } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) { | |
423 // Immediate can be represented in a move not instruction. Movn can't write | |
424 // to the stack pointer. | |
425 movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask)); | |
426 return true; | |
427 } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) { | |
428 // Immediate can be represented in a logical orr instruction. | |
429 LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR); | |
430 return true; | |
431 } | |
432 return false; | |
433 } | |
434 | |
435 | |
436 Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst, | |
437 int64_t imm) { | |
438 int reg_size = dst.SizeInBits(); | |
439 | |
440 // Encode the immediate in a single move instruction, if possible. | |
441 if (TryOneInstrMoveImmediate(dst, imm)) { | |
442 // The move was successful; nothing to do here. | |
443 } else { | |
444 // Pre-shift the immediate to the least-significant bits of the register. | |
445 int shift_low = CountTrailingZeros(imm, reg_size); | |
446 int64_t imm_low = imm >> shift_low; | |
447 | |
448 // Pre-shift the immediate to the most-significant bits of the register, | |
449 // inserting set bits in the least-significant bits. | |
ulan
2014/07/07 07:42:20
Could you please add a comment explaining why we f
m.m.capewell
2014/07/07 13:28:03
Done.
| |
450 int shift_high = CountLeadingZeros(imm, reg_size); | |
451 int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1); | |
452 | |
453 if (TryOneInstrMoveImmediate(dst, imm_low)) { | |
454 // The new immediate has been moved into the destination's low bits: | |
455 // return a new leftward-shifting operand. | |
456 return Operand(dst, LSL, shift_low); | |
457 } else if (TryOneInstrMoveImmediate(dst, imm_high)) { | |
458 // The new immediate has been moved into the destination's high bits: | |
459 // return a new rightward-shifting operand. | |
460 return Operand(dst, LSR, shift_high); | |
461 } else { | |
462 Mov(dst, imm); | |
463 } | |
464 } | |
465 return Operand(dst); | |
466 } | |
467 | |
468 | |
422 void MacroAssembler::AddSubMacro(const Register& rd, | 469 void MacroAssembler::AddSubMacro(const Register& rd, |
423 const Register& rn, | 470 const Register& rn, |
424 const Operand& operand, | 471 const Operand& operand, |
425 FlagsUpdate S, | 472 FlagsUpdate S, |
426 AddSubOp op) { | 473 AddSubOp op) { |
427 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() && | 474 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() && |
428 !operand.NeedsRelocation(this) && (S == LeaveFlags)) { | 475 !operand.NeedsRelocation(this) && (S == LeaveFlags)) { |
429 // The instruction would be a nop. Avoid generating useless code. | 476 // The instruction would be a nop. Avoid generating useless code. |
430 return; | 477 return; |
431 } | 478 } |
432 | 479 |
433 if (operand.NeedsRelocation(this)) { | 480 if (operand.NeedsRelocation(this)) { |
434 UseScratchRegisterScope temps(this); | 481 UseScratchRegisterScope temps(this); |
435 Register temp = temps.AcquireX(); | 482 Register temp = temps.AcquireX(); |
436 Ldr(temp, operand.immediate()); | 483 Ldr(temp, operand.immediate()); |
437 AddSubMacro(rd, rn, temp, S, op); | 484 AddSubMacro(rd, rn, temp, S, op); |
438 } else if ((operand.IsImmediate() && | 485 } else if ((operand.IsImmediate() && |
439 !IsImmAddSub(operand.ImmediateValue())) || | 486 !IsImmAddSub(operand.ImmediateValue())) || |
440 (rn.IsZero() && !operand.IsShiftedRegister()) || | 487 (rn.IsZero() && !operand.IsShiftedRegister()) || |
441 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { | 488 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { |
442 UseScratchRegisterScope temps(this); | 489 UseScratchRegisterScope temps(this); |
443 Register temp = temps.AcquireSameSizeAs(rn); | 490 Register temp = temps.AcquireSameSizeAs(rn); |
444 Mov(temp, operand); | 491 if (operand.IsImmediate()) { |
445 AddSub(rd, rn, temp, S, op); | 492 Operand imm_operand = |
493 MoveImmediateForShiftedOp(temp, operand.ImmediateValue()); | |
494 AddSub(rd, rn, imm_operand, S, op); | |
495 } else { | |
496 Mov(temp, operand); | |
497 AddSub(rd, rn, temp, S, op); | |
498 } | |
446 } else { | 499 } else { |
447 AddSub(rd, rn, operand, S, op); | 500 AddSub(rd, rn, operand, S, op); |
448 } | 501 } |
449 } | 502 } |
450 | 503 |
451 | 504 |
452 void MacroAssembler::AddSubWithCarryMacro(const Register& rd, | 505 void MacroAssembler::AddSubWithCarryMacro(const Register& rd, |
453 const Register& rn, | 506 const Register& rn, |
454 const Operand& operand, | 507 const Operand& operand, |
455 FlagsUpdate S, | 508 FlagsUpdate S, |
(...skipping 4857 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5313 } | 5366 } |
5314 } | 5367 } |
5315 | 5368 |
5316 | 5369 |
5317 #undef __ | 5370 #undef __ |
5318 | 5371 |
5319 | 5372 |
5320 } } // namespace v8::internal | 5373 } } // namespace v8::internal |
5321 | 5374 |
5322 #endif // V8_TARGET_ARCH_ARM64 | 5375 #endif // V8_TARGET_ARCH_ARM64 |
OLD | NEW |