Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(46)

Side by Side Diff: src/arm64/macro-assembler-arm64.cc

Issue 318773009: ARM64: Clean up support for explicit literal load. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #if V8_TARGET_ARCH_ARM64 7 #if V8_TARGET_ARCH_ARM64
8 8
9 #include "src/bootstrapper.h" 9 #include "src/bootstrapper.h"
10 #include "src/codegen.h" 10 #include "src/codegen.h"
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
51 51
52 52
53 void MacroAssembler::LogicalMacro(const Register& rd, 53 void MacroAssembler::LogicalMacro(const Register& rd,
54 const Register& rn, 54 const Register& rn,
55 const Operand& operand, 55 const Operand& operand,
56 LogicalOp op) { 56 LogicalOp op) {
57 UseScratchRegisterScope temps(this); 57 UseScratchRegisterScope temps(this);
58 58
59 if (operand.NeedsRelocation(this)) { 59 if (operand.NeedsRelocation(this)) {
60 Register temp = temps.AcquireX(); 60 Register temp = temps.AcquireX();
61 LoadRelocated(temp, operand); 61 Ldr(temp, operand.immediate());
62 Logical(rd, rn, temp, op); 62 Logical(rd, rn, temp, op);
63 63
64 } else if (operand.IsImmediate()) { 64 } else if (operand.IsImmediate()) {
65 int64_t immediate = operand.immediate(); 65 int64_t immediate = operand.ImmediateValue();
66 unsigned reg_size = rd.SizeInBits(); 66 unsigned reg_size = rd.SizeInBits();
67 ASSERT(rd.Is64Bits() || is_uint32(immediate)); 67 ASSERT(rd.Is64Bits() || is_uint32(immediate));
68 68
69 // If the operation is NOT, invert the operation and immediate. 69 // If the operation is NOT, invert the operation and immediate.
70 if ((op & NOT) == NOT) { 70 if ((op & NOT) == NOT) {
71 op = static_cast<LogicalOp>(op & ~NOT); 71 op = static_cast<LogicalOp>(op & ~NOT);
72 immediate = ~immediate; 72 immediate = ~immediate;
73 if (rd.Is32Bits()) { 73 if (rd.Is32Bits()) {
74 immediate &= kWRegMask; 74 immediate &= kWRegMask;
75 } 75 }
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after
243 DiscardMoveMode discard_mode) { 243 DiscardMoveMode discard_mode) {
244 ASSERT(allow_macro_instructions_); 244 ASSERT(allow_macro_instructions_);
245 ASSERT(!rd.IsZero()); 245 ASSERT(!rd.IsZero());
246 246
247 // Provide a swap register for instructions that need to write into the 247 // Provide a swap register for instructions that need to write into the
248 // system stack pointer (and can't do this inherently). 248 // system stack pointer (and can't do this inherently).
249 UseScratchRegisterScope temps(this); 249 UseScratchRegisterScope temps(this);
250 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd; 250 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
251 251
252 if (operand.NeedsRelocation(this)) { 252 if (operand.NeedsRelocation(this)) {
253 LoadRelocated(dst, operand); 253 Ldr(dst, operand.immediate());
254 254
255 } else if (operand.IsImmediate()) { 255 } else if (operand.IsImmediate()) {
256 // Call the macro assembler for generic immediates. 256 // Call the macro assembler for generic immediates.
257 Mov(dst, operand.immediate()); 257 Mov(dst, operand.ImmediateValue());
258 258
259 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { 259 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
260 // Emit a shift instruction if moving a shifted register. This operation 260 // Emit a shift instruction if moving a shifted register. This operation
261 // could also be achieved using an orr instruction (like orn used by Mvn), 261 // could also be achieved using an orr instruction (like orn used by Mvn),
262 // but using a shift instruction makes the disassembly clearer. 262 // but using a shift instruction makes the disassembly clearer.
263 EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount()); 263 EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
264 264
265 } else if (operand.IsExtendedRegister()) { 265 } else if (operand.IsExtendedRegister()) {
266 // Emit an extend instruction if moving an extended register. This handles 266 // Emit an extend instruction if moving an extended register. This handles
267 // extend with post-shift operations, too. 267 // extend with post-shift operations, too.
(...skipping 23 matching lines...) Expand all
291 ASSERT(rd.IsSP()); 291 ASSERT(rd.IsSP());
292 Assembler::mov(rd, dst); 292 Assembler::mov(rd, dst);
293 } 293 }
294 } 294 }
295 295
296 296
297 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { 297 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
298 ASSERT(allow_macro_instructions_); 298 ASSERT(allow_macro_instructions_);
299 299
300 if (operand.NeedsRelocation(this)) { 300 if (operand.NeedsRelocation(this)) {
301 LoadRelocated(rd, operand); 301 Ldr(rd, operand.immediate());
302 mvn(rd, rd); 302 mvn(rd, rd);
303 303
304 } else if (operand.IsImmediate()) { 304 } else if (operand.IsImmediate()) {
305 // Call the macro assembler for generic immediates. 305 // Call the macro assembler for generic immediates.
306 Mov(rd, ~operand.immediate()); 306 Mov(rd, ~operand.ImmediateValue());
307 307
308 } else if (operand.IsExtendedRegister()) { 308 } else if (operand.IsExtendedRegister()) {
309 // Emit two instructions for the extend case. This differs from Mov, as 309 // Emit two instructions for the extend case. This differs from Mov, as
310 // the extend and invert can't be achieved in one instruction. 310 // the extend and invert can't be achieved in one instruction.
311 EmitExtendShift(rd, operand.reg(), operand.extend(), 311 EmitExtendShift(rd, operand.reg(), operand.extend(),
312 operand.shift_amount()); 312 operand.shift_amount());
313 mvn(rd, rd); 313 mvn(rd, rd);
314 314
315 } else { 315 } else {
316 mvn(rd, operand); 316 mvn(rd, operand);
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
348 348
349 void MacroAssembler::ConditionalCompareMacro(const Register& rn, 349 void MacroAssembler::ConditionalCompareMacro(const Register& rn,
350 const Operand& operand, 350 const Operand& operand,
351 StatusFlags nzcv, 351 StatusFlags nzcv,
352 Condition cond, 352 Condition cond,
353 ConditionalCompareOp op) { 353 ConditionalCompareOp op) {
354 ASSERT((cond != al) && (cond != nv)); 354 ASSERT((cond != al) && (cond != nv));
355 if (operand.NeedsRelocation(this)) { 355 if (operand.NeedsRelocation(this)) {
356 UseScratchRegisterScope temps(this); 356 UseScratchRegisterScope temps(this);
357 Register temp = temps.AcquireX(); 357 Register temp = temps.AcquireX();
358 LoadRelocated(temp, operand); 358 Ldr(temp, operand.immediate());
359 ConditionalCompareMacro(rn, temp, nzcv, cond, op); 359 ConditionalCompareMacro(rn, temp, nzcv, cond, op);
360 360
361 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) || 361 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
362 (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) { 362 (operand.IsImmediate() &&
363 IsImmConditionalCompare(operand.ImmediateValue()))) {
363 // The immediate can be encoded in the instruction, or the operand is an 364 // The immediate can be encoded in the instruction, or the operand is an
364 // unshifted register: call the assembler. 365 // unshifted register: call the assembler.
365 ConditionalCompare(rn, operand, nzcv, cond, op); 366 ConditionalCompare(rn, operand, nzcv, cond, op);
366 367
367 } else { 368 } else {
368 // The operand isn't directly supported by the instruction: perform the 369 // The operand isn't directly supported by the instruction: perform the
369 // operation on a temporary register. 370 // operation on a temporary register.
370 UseScratchRegisterScope temps(this); 371 UseScratchRegisterScope temps(this);
371 Register temp = temps.AcquireSameSizeAs(rn); 372 Register temp = temps.AcquireSameSizeAs(rn);
372 Mov(temp, operand); 373 Mov(temp, operand);
373 ConditionalCompare(rn, temp, nzcv, cond, op); 374 ConditionalCompare(rn, temp, nzcv, cond, op);
374 } 375 }
375 } 376 }
376 377
377 378
378 void MacroAssembler::Csel(const Register& rd, 379 void MacroAssembler::Csel(const Register& rd,
379 const Register& rn, 380 const Register& rn,
380 const Operand& operand, 381 const Operand& operand,
381 Condition cond) { 382 Condition cond) {
382 ASSERT(allow_macro_instructions_); 383 ASSERT(allow_macro_instructions_);
383 ASSERT(!rd.IsZero()); 384 ASSERT(!rd.IsZero());
384 ASSERT((cond != al) && (cond != nv)); 385 ASSERT((cond != al) && (cond != nv));
385 if (operand.IsImmediate()) { 386 if (operand.IsImmediate()) {
386 // Immediate argument. Handle special cases of 0, 1 and -1 using zero 387 // Immediate argument. Handle special cases of 0, 1 and -1 using zero
387 // register. 388 // register.
388 int64_t imm = operand.immediate(); 389 int64_t imm = operand.ImmediateValue();
389 Register zr = AppropriateZeroRegFor(rn); 390 Register zr = AppropriateZeroRegFor(rn);
390 if (imm == 0) { 391 if (imm == 0) {
391 csel(rd, rn, zr, cond); 392 csel(rd, rn, zr, cond);
392 } else if (imm == 1) { 393 } else if (imm == 1) {
393 csinc(rd, rn, zr, cond); 394 csinc(rd, rn, zr, cond);
394 } else if (imm == -1) { 395 } else if (imm == -1) {
395 csinv(rd, rn, zr, cond); 396 csinv(rd, rn, zr, cond);
396 } else { 397 } else {
397 UseScratchRegisterScope temps(this); 398 UseScratchRegisterScope temps(this);
398 Register temp = temps.AcquireSameSizeAs(rn); 399 Register temp = temps.AcquireSameSizeAs(rn);
399 Mov(temp, operand.immediate()); 400 Mov(temp, imm);
400 csel(rd, rn, temp, cond); 401 csel(rd, rn, temp, cond);
401 } 402 }
402 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) { 403 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
403 // Unshifted register argument. 404 // Unshifted register argument.
404 csel(rd, rn, operand.reg(), cond); 405 csel(rd, rn, operand.reg(), cond);
405 } else { 406 } else {
406 // All other arguments. 407 // All other arguments.
407 UseScratchRegisterScope temps(this); 408 UseScratchRegisterScope temps(this);
408 Register temp = temps.AcquireSameSizeAs(rn); 409 Register temp = temps.AcquireSameSizeAs(rn);
409 Mov(temp, operand); 410 Mov(temp, operand);
410 csel(rd, rn, temp, cond); 411 csel(rd, rn, temp, cond);
411 } 412 }
412 } 413 }
413 414
414 415
415 void MacroAssembler::AddSubMacro(const Register& rd, 416 void MacroAssembler::AddSubMacro(const Register& rd,
416 const Register& rn, 417 const Register& rn,
417 const Operand& operand, 418 const Operand& operand,
418 FlagsUpdate S, 419 FlagsUpdate S,
419 AddSubOp op) { 420 AddSubOp op) {
420 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() && 421 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
421 !operand.NeedsRelocation(this) && (S == LeaveFlags)) { 422 !operand.NeedsRelocation(this) && (S == LeaveFlags)) {
422 // The instruction would be a nop. Avoid generating useless code. 423 // The instruction would be a nop. Avoid generating useless code.
423 return; 424 return;
424 } 425 }
425 426
426 if (operand.NeedsRelocation(this)) { 427 if (operand.NeedsRelocation(this)) {
427 UseScratchRegisterScope temps(this); 428 UseScratchRegisterScope temps(this);
428 Register temp = temps.AcquireX(); 429 Register temp = temps.AcquireX();
429 LoadRelocated(temp, operand); 430 Ldr(temp, operand.immediate());
430 AddSubMacro(rd, rn, temp, S, op); 431 AddSubMacro(rd, rn, temp, S, op);
431 } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) || 432 } else if ((operand.IsImmediate() &&
432 (rn.IsZero() && !operand.IsShiftedRegister()) || 433 !IsImmAddSub(operand.ImmediateValue())) ||
434 (rn.IsZero() && !operand.IsShiftedRegister()) ||
433 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { 435 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
434 UseScratchRegisterScope temps(this); 436 UseScratchRegisterScope temps(this);
435 Register temp = temps.AcquireSameSizeAs(rn); 437 Register temp = temps.AcquireSameSizeAs(rn);
436 Mov(temp, operand); 438 Mov(temp, operand);
437 AddSub(rd, rn, temp, S, op); 439 AddSub(rd, rn, temp, S, op);
438 } else { 440 } else {
439 AddSub(rd, rn, operand, S, op); 441 AddSub(rd, rn, operand, S, op);
440 } 442 }
441 } 443 }
442 444
443 445
444 void MacroAssembler::AddSubWithCarryMacro(const Register& rd, 446 void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
445 const Register& rn, 447 const Register& rn,
446 const Operand& operand, 448 const Operand& operand,
447 FlagsUpdate S, 449 FlagsUpdate S,
448 AddSubWithCarryOp op) { 450 AddSubWithCarryOp op) {
449 ASSERT(rd.SizeInBits() == rn.SizeInBits()); 451 ASSERT(rd.SizeInBits() == rn.SizeInBits());
450 UseScratchRegisterScope temps(this); 452 UseScratchRegisterScope temps(this);
451 453
452 if (operand.NeedsRelocation(this)) { 454 if (operand.NeedsRelocation(this)) {
453 Register temp = temps.AcquireX(); 455 Register temp = temps.AcquireX();
454 LoadRelocated(temp, operand); 456 Ldr(temp, operand.immediate());
455 AddSubWithCarryMacro(rd, rn, temp, S, op); 457 AddSubWithCarryMacro(rd, rn, temp, S, op);
456 458
457 } else if (operand.IsImmediate() || 459 } else if (operand.IsImmediate() ||
458 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { 460 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
459 // Add/sub with carry (immediate or ROR shifted register.) 461 // Add/sub with carry (immediate or ROR shifted register.)
460 Register temp = temps.AcquireSameSizeAs(rn); 462 Register temp = temps.AcquireSameSizeAs(rn);
461 Mov(temp, operand); 463 Mov(temp, operand);
462 AddSubWithCarry(rd, rn, temp, S, op); 464 AddSubWithCarry(rd, rn, temp, S, op);
463 465
464 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { 466 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
(...skipping 599 matching lines...) Expand 10 before | Expand all | Expand 10 after
1064 } 1066 }
1065 } 1067 }
1066 1068
1067 1069
1068 void MacroAssembler::PushPreamble(Operand total_size) { 1070 void MacroAssembler::PushPreamble(Operand total_size) {
1069 if (csp.Is(StackPointer())) { 1071 if (csp.Is(StackPointer())) {
1070 // If the current stack pointer is csp, then it must be aligned to 16 bytes 1072 // If the current stack pointer is csp, then it must be aligned to 16 bytes
1071 // on entry and the total size of the specified registers must also be a 1073 // on entry and the total size of the specified registers must also be a
1072 // multiple of 16 bytes. 1074 // multiple of 16 bytes.
1073 if (total_size.IsImmediate()) { 1075 if (total_size.IsImmediate()) {
1074 ASSERT((total_size.immediate() % 16) == 0); 1076 ASSERT((total_size.ImmediateValue() % 16) == 0);
1075 } 1077 }
1076 1078
1077 // Don't check access size for non-immediate sizes. It's difficult to do 1079 // Don't check access size for non-immediate sizes. It's difficult to do
1078 // well, and it will be caught by hardware (or the simulator) anyway. 1080 // well, and it will be caught by hardware (or the simulator) anyway.
1079 } else { 1081 } else {
1080 // Even if the current stack pointer is not the system stack pointer (csp), 1082 // Even if the current stack pointer is not the system stack pointer (csp),
1081 // the system stack pointer will still be modified in order to comply with 1083 // the system stack pointer will still be modified in order to comply with
1082 // ABI rules about accessing memory below the system stack pointer. 1084 // ABI rules about accessing memory below the system stack pointer.
1083 BumpSystemStackPointer(total_size); 1085 BumpSystemStackPointer(total_size);
1084 } 1086 }
1085 } 1087 }
1086 1088
1087 1089
1088 void MacroAssembler::PopPostamble(Operand total_size) { 1090 void MacroAssembler::PopPostamble(Operand total_size) {
1089 if (csp.Is(StackPointer())) { 1091 if (csp.Is(StackPointer())) {
1090 // If the current stack pointer is csp, then it must be aligned to 16 bytes 1092 // If the current stack pointer is csp, then it must be aligned to 16 bytes
1091 // on entry and the total size of the specified registers must also be a 1093 // on entry and the total size of the specified registers must also be a
1092 // multiple of 16 bytes. 1094 // multiple of 16 bytes.
1093 if (total_size.IsImmediate()) { 1095 if (total_size.IsImmediate()) {
1094 ASSERT((total_size.immediate() % 16) == 0); 1096 ASSERT((total_size.ImmediateValue() % 16) == 0);
1095 } 1097 }
1096 1098
1097 // Don't check access size for non-immediate sizes. It's difficult to do 1099 // Don't check access size for non-immediate sizes. It's difficult to do
1098 // well, and it will be caught by hardware (or the simulator) anyway. 1100 // well, and it will be caught by hardware (or the simulator) anyway.
1099 } else if (emit_debug_code()) { 1101 } else if (emit_debug_code()) {
1100 // It is safe to leave csp where it is when unwinding the JavaScript stack, 1102 // It is safe to leave csp where it is when unwinding the JavaScript stack,
1101 // but if we keep it matching StackPointer, the simulator can detect memory 1103 // but if we keep it matching StackPointer, the simulator can detect memory
1102 // accesses in the now-free part of the stack. 1104 // accesses in the now-free part of the stack.
1103 SyncSystemStackPointer(); 1105 SyncSystemStackPointer();
1104 } 1106 }
1105 } 1107 }
1106 1108
1107 1109
1108 void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) { 1110 void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
1109 if (offset.IsImmediate()) { 1111 if (offset.IsImmediate()) {
1110 ASSERT(offset.immediate() >= 0); 1112 ASSERT(offset.ImmediateValue() >= 0);
1111 } else if (emit_debug_code()) { 1113 } else if (emit_debug_code()) {
1112 Cmp(xzr, offset); 1114 Cmp(xzr, offset);
1113 Check(le, kStackAccessBelowStackPointer); 1115 Check(le, kStackAccessBelowStackPointer);
1114 } 1116 }
1115 1117
1116 Str(src, MemOperand(StackPointer(), offset)); 1118 Str(src, MemOperand(StackPointer(), offset));
1117 } 1119 }
1118 1120
1119 1121
1120 void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) { 1122 void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
1121 if (offset.IsImmediate()) { 1123 if (offset.IsImmediate()) {
1122 ASSERT(offset.immediate() >= 0); 1124 ASSERT(offset.ImmediateValue() >= 0);
1123 } else if (emit_debug_code()) { 1125 } else if (emit_debug_code()) {
1124 Cmp(xzr, offset); 1126 Cmp(xzr, offset);
1125 Check(le, kStackAccessBelowStackPointer); 1127 Check(le, kStackAccessBelowStackPointer);
1126 } 1128 }
1127 1129
1128 Ldr(dst, MemOperand(StackPointer(), offset)); 1130 Ldr(dst, MemOperand(StackPointer(), offset));
1129 } 1131 }
1130 1132
1131 1133
1132 void MacroAssembler::PokePair(const CPURegister& src1, 1134 void MacroAssembler::PokePair(const CPURegister& src1,
(...skipping 929 matching lines...) Expand 10 before | Expand all | Expand 10 after
2062 2064
2063 if (rmode == RelocInfo::NONE64) { 2065 if (rmode == RelocInfo::NONE64) {
2064 // Addresses are 48 bits so we never need to load the upper 16 bits. 2066 // Addresses are 48 bits so we never need to load the upper 16 bits.
2065 uint64_t imm = reinterpret_cast<uint64_t>(target); 2067 uint64_t imm = reinterpret_cast<uint64_t>(target);
2066 // If we don't use ARM tagged addresses, the 16 higher bits must be 0. 2068 // If we don't use ARM tagged addresses, the 16 higher bits must be 0.
2067 ASSERT(((imm >> 48) & 0xffff) == 0); 2069 ASSERT(((imm >> 48) & 0xffff) == 0);
2068 movz(temp, (imm >> 0) & 0xffff, 0); 2070 movz(temp, (imm >> 0) & 0xffff, 0);
2069 movk(temp, (imm >> 16) & 0xffff, 16); 2071 movk(temp, (imm >> 16) & 0xffff, 16);
2070 movk(temp, (imm >> 32) & 0xffff, 32); 2072 movk(temp, (imm >> 32) & 0xffff, 32);
2071 } else { 2073 } else {
2072 LoadRelocated(temp, Operand(reinterpret_cast<intptr_t>(target), rmode)); 2074 Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode));
2073 } 2075 }
2074 Blr(temp); 2076 Blr(temp);
2075 #ifdef DEBUG 2077 #ifdef DEBUG
2076 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode)); 2078 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
2077 #endif 2079 #endif
2078 } 2080 }
2079 2081
2080 2082
2081 void MacroAssembler::Call(Handle<Code> code, 2083 void MacroAssembler::Call(Handle<Code> code,
2082 RelocInfo::Mode rmode, 2084 RelocInfo::Mode rmode,
(...skipping 3083 matching lines...) Expand 10 before | Expand all | Expand 10 after
5166 Code * stub) { 5168 Code * stub) {
5167 Label start; 5169 Label start;
5168 __ bind(&start); 5170 __ bind(&start);
5169 // When the stub is called, the sequence is replaced with the young sequence 5171 // When the stub is called, the sequence is replaced with the young sequence
5170 // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the 5172 // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the
5171 // stub jumps to &start, stored in x0. The young sequence does not call the 5173 // stub jumps to &start, stored in x0. The young sequence does not call the
5172 // stub so there is no infinite loop here. 5174 // stub so there is no infinite loop here.
5173 // 5175 //
5174 // A branch (br) is used rather than a call (blr) because this code replaces 5176 // A branch (br) is used rather than a call (blr) because this code replaces
5175 // the frame setup code that would normally preserve lr. 5177 // the frame setup code that would normally preserve lr.
5176 __ LoadLiteral(ip0, kCodeAgeStubEntryOffset); 5178 __ ldr_pcrel(ip0, kCodeAgeStubEntryOffset >> kLoadLiteralScaleLog2);
5177 __ adr(x0, &start); 5179 __ adr(x0, &start);
5178 __ br(ip0); 5180 __ br(ip0);
5179 // IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up 5181 // IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up
5180 // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences. 5182 // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences.
5181 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset); 5183 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
5182 if (stub) { 5184 if (stub) {
5183 __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start())); 5185 __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
5184 __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength); 5186 __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
5185 } 5187 }
5186 } 5188 }
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
5292 } 5294 }
5293 } 5295 }
5294 5296
5295 5297
5296 #undef __ 5298 #undef __
5297 5299
5298 5300
5299 } } // namespace v8::internal 5301 } } // namespace v8::internal
5300 5302
5301 #endif // V8_TARGET_ARCH_ARM64 5303 #endif // V8_TARGET_ARCH_ARM64
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698