Index: src/x64/macro-assembler-x64.cc |
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc |
index b5b7324be0af31dbe9e1a0b0b760d9c17190a282..caf9b204e902a4eb81abf8e88b63350a4f7c16cd 100644 |
--- a/src/x64/macro-assembler-x64.cc |
+++ b/src/x64/macro-assembler-x64.cc |
@@ -128,7 +128,7 @@ void MacroAssembler::LoadAddress(Register destination, |
intptr_t delta = RootRegisterDelta(source); |
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { |
Serializer::TooLateToEnableNow(); |
- lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta))); |
+ leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta))); |
return; |
} |
} |
@@ -145,7 +145,7 @@ int MacroAssembler::LoadAddressSize(ExternalReference source) { |
intptr_t delta = RootRegisterDelta(source); |
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { |
Serializer::TooLateToEnableNow(); |
- // Operand is lea(scratch, Operand(kRootRegister, delta)); |
+ // Operand is leap(scratch, Operand(kRootRegister, delta)); |
// Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7. |
int size = 4; |
if (!is_int8(static_cast<int32_t>(delta))) { |
@@ -293,7 +293,7 @@ void MacroAssembler::InNewSpace(Register object, |
if (scratch.is(object)) { |
addp(scratch, kScratchRegister); |
} else { |
- lea(scratch, Operand(object, kScratchRegister, times_1, 0)); |
+ leap(scratch, Operand(object, kScratchRegister, times_1, 0)); |
} |
and_(scratch, |
Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask()))); |
@@ -323,7 +323,7 @@ void MacroAssembler::RecordWriteField( |
// of the object, so so offset must be a multiple of kPointerSize. |
ASSERT(IsAligned(offset, kPointerSize)); |
- lea(dst, FieldOperand(object, offset)); |
+ leap(dst, FieldOperand(object, offset)); |
if (emit_debug_code()) { |
Label ok; |
testb(dst, Immediate((1 << kPointerSizeLog2) - 1)); |
@@ -363,7 +363,7 @@ void MacroAssembler::RecordWriteArray(Register object, |
// Array access: calculate the destination address. Index is not a smi. |
Register dst = index; |
- lea(dst, Operand(object, index, times_pointer_size, |
+ leap(dst, Operand(object, index, times_pointer_size, |
FixedArray::kHeaderSize - kHeapObjectTag)); |
RecordWrite( |
@@ -1052,24 +1052,28 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) { |
switch (uvalue) { |
case 9: |
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0)); |
+ leap(dst, |
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0)); |
break; |
case 8: |
xorl(dst, dst); |
- lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0)); |
+ leap(dst, Operand(dst, kSmiConstantRegister, times_8, 0)); |
break; |
case 4: |
xorl(dst, dst); |
- lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0)); |
+ leap(dst, Operand(dst, kSmiConstantRegister, times_4, 0)); |
break; |
case 5: |
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0)); |
+ leap(dst, |
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0)); |
break; |
case 3: |
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0)); |
+ leap(dst, |
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0)); |
break; |
case 2: |
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0)); |
+ leap(dst, |
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0)); |
break; |
case 1: |
movp(dst, kSmiConstantRegister); |
@@ -1452,13 +1456,13 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) { |
addp(dst, kSmiConstantRegister); |
return; |
case 2: |
- lea(dst, Operand(src, kSmiConstantRegister, times_2, 0)); |
+ leap(dst, Operand(src, kSmiConstantRegister, times_2, 0)); |
return; |
case 4: |
- lea(dst, Operand(src, kSmiConstantRegister, times_4, 0)); |
+ leap(dst, Operand(src, kSmiConstantRegister, times_4, 0)); |
return; |
case 8: |
- lea(dst, Operand(src, kSmiConstantRegister, times_8, 0)); |
+ leap(dst, Operand(src, kSmiConstantRegister, times_8, 0)); |
return; |
default: |
Register constant_reg = GetSmiConstant(constant); |
@@ -1468,16 +1472,16 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) { |
} else { |
switch (constant->value()) { |
case 1: |
- lea(dst, Operand(src, kSmiConstantRegister, times_1, 0)); |
+ leap(dst, Operand(src, kSmiConstantRegister, times_1, 0)); |
return; |
case 2: |
- lea(dst, Operand(src, kSmiConstantRegister, times_2, 0)); |
+ leap(dst, Operand(src, kSmiConstantRegister, times_2, 0)); |
return; |
case 4: |
- lea(dst, Operand(src, kSmiConstantRegister, times_4, 0)); |
+ leap(dst, Operand(src, kSmiConstantRegister, times_4, 0)); |
return; |
case 8: |
- lea(dst, Operand(src, kSmiConstantRegister, times_8, 0)); |
+ leap(dst, Operand(src, kSmiConstantRegister, times_8, 0)); |
return; |
default: |
LoadSmiConstant(dst, constant); |
@@ -1690,7 +1694,7 @@ void MacroAssembler::SmiAdd(Register dst, |
addp(kScratchRegister, src2); |
Check(no_overflow, kSmiAdditionOverflow); |
} |
- lea(dst, Operand(src1, src2, times_1, 0)); |
+ leap(dst, Operand(src1, src2, times_1, 0)); |
} else { |
addp(dst, src2); |
Assert(no_overflow, kSmiAdditionOverflow); |
@@ -1953,7 +1957,7 @@ void MacroAssembler::SmiNot(Register dst, Register src) { |
if (dst.is(src)) { |
xor_(dst, kScratchRegister); |
} else { |
- lea(dst, Operand(src, kScratchRegister, times_1, 0)); |
+ leap(dst, Operand(src, kScratchRegister, times_1, 0)); |
} |
not_(dst); |
} |
@@ -2394,7 +2398,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings( |
andl(scratch2, Immediate(kFlatAsciiStringMask)); |
// Interleave the bits to check both scratch1 and scratch2 in one test. |
ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3)); |
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0)); |
+ leap(scratch1, Operand(scratch1, scratch2, times_8, 0)); |
cmpl(scratch1, |
Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3))); |
j(not_equal, on_fail, near_jump); |
@@ -2441,7 +2445,7 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( |
andl(scratch2, Immediate(kFlatAsciiStringMask)); |
// Interleave the bits to check both scratch1 and scratch2 in one test. |
ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3)); |
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0)); |
+ leap(scratch1, Operand(scratch1, scratch2, times_8, 0)); |
cmpl(scratch1, |
Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3))); |
j(not_equal, on_fail, near_jump); |
@@ -2765,7 +2769,7 @@ void MacroAssembler::Pushad() { |
// Use lea for symmetry with Popad. |
int sp_delta = |
(kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize; |
- lea(rsp, Operand(rsp, -sp_delta)); |
+ leap(rsp, Operand(rsp, -sp_delta)); |
} |
@@ -2773,7 +2777,7 @@ void MacroAssembler::Popad() { |
// Popad must not change the flags, so use lea instead of addq. |
int sp_delta = |
(kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize; |
- lea(rsp, Operand(rsp, sp_delta)); |
+ leap(rsp, Operand(rsp, sp_delta)); |
Pop(r15); |
Pop(r14); |
Pop(r11); |
@@ -2893,7 +2897,7 @@ void MacroAssembler::JumpToHandlerEntry() { |
movp(rdx, |
FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize)); |
SmiToInteger64(rdx, rdx); |
- lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize)); |
+ leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize)); |
jmp(rdi); |
} |
@@ -3852,7 +3856,7 @@ void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) { |
// Set up argv in callee-saved register r15. It is reused in LeaveExitFrame, |
// so it must be retained across the C-call. |
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize; |
- lea(r15, Operand(rbp, r14, times_pointer_size, offset)); |
+ leap(r15, Operand(rbp, r14, times_pointer_size, offset)); |
EnterExitFrameEpilogue(arg_stack_space, save_doubles); |
} |
@@ -3880,7 +3884,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) { |
// Drop everything up to and including the arguments and the receiver |
// from the caller stack. |
- lea(rsp, Operand(r15, 1 * kPointerSize)); |
+ leap(rsp, Operand(r15, 1 * kPointerSize)); |
PushReturnAddressFrom(rcx); |
@@ -4068,7 +4072,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, |
// Scale the index by multiplying by the entry size. |
ASSERT(SeededNumberDictionary::kEntrySize == 3); |
- lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3 |
+ leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3 |
// Check if the key matches. |
cmpp(key, FieldOperand(elements, |
@@ -4225,7 +4229,7 @@ void MacroAssembler::Allocate(int header_size, |
Label* gc_required, |
AllocationFlags flags) { |
ASSERT((flags & SIZE_IN_WORDS) == 0); |
- lea(result_end, Operand(element_count, element_size, header_size)); |
+ leap(result_end, Operand(element_count, element_size, header_size)); |
Allocate(result_end, result, result_end, scratch, gc_required, flags); |
} |
@@ -4323,7 +4327,7 @@ void MacroAssembler::AllocateTwoByteString(Register result, |
kObjectAlignmentMask; |
ASSERT(kShortSize == 2); |
// scratch1 = length * 2 + kObjectAlignmentMask. |
- lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask + |
+ leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask + |
kHeaderAlignment)); |
and_(scratch1, Immediate(~kObjectAlignmentMask)); |
if (kHeaderAlignment > 0) { |
@@ -4511,7 +4515,7 @@ void MacroAssembler::CopyBytes(Register destination, |
// at the end of the ranges. |
movp(scratch, length); |
shrl(length, Immediate(kPointerSizeLog2)); |
- repmovsq(); |
+ repmovsp(); |
// Move remaining bytes of length. |
andl(scratch, Immediate(kPointerSize - 1)); |
movp(length, Operand(source, scratch, times_1, -kPointerSize)); |
@@ -4683,7 +4687,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string, |
Push(value); |
movp(value, FieldOperand(string, HeapObject::kMapOffset)); |
- movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset)); |
+ movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset)); |
andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask)); |
cmpp(value, Immediate(encoding_mask)); |
@@ -4827,7 +4831,7 @@ void MacroAssembler::JumpIfBlack(Register object, |
movp(rcx, mask_scratch); |
// Make rcx into a mask that covers both marking bits using the operation |
// rcx = mask | (mask << 1). |
- lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0)); |
+ leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0)); |
// Note that we are using a 4-byte aligned 8-byte load. |
and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
cmpp(mask_scratch, rcx); |
@@ -5032,7 +5036,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento( |
ExternalReference new_space_allocation_top = |
ExternalReference::new_space_allocation_top_address(isolate()); |
- lea(scratch_reg, Operand(receiver_reg, |
+ leap(scratch_reg, Operand(receiver_reg, |
JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag)); |
Move(kScratchRegister, new_space_start); |
cmpp(scratch_reg, kScratchRegister); |