Index: src/x64/code-stubs-x64.cc |
=================================================================== |
--- src/x64/code-stubs-x64.cc (revision 15486) |
+++ src/x64/code-stubs-x64.cc (working copy) |
@@ -227,6 +227,11 @@ |
#define __ ACCESS_MASM(masm) |
+#define __k __ |
+#define __a __ |
+#define __q __ |
+#define __s __ |
+#define __n __ |
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { |
@@ -267,9 +272,9 @@ |
__ Ret(); |
__ bind(&call_builtin); |
- __ pop(rcx); // Pop return address. |
+ __k pop(rcx); // Pop return address. |
__ push(rax); |
- __ push(rcx); // Push return address. |
+ __k push(rcx); // Push return address. |
__ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION); |
} |
@@ -285,7 +290,7 @@ |
__ IncrementCounter(counters->fast_new_closure_total(), 1); |
// Get the function info from the stack. |
- __ movq(rdx, Operand(rsp, 1 * kPointerSize)); |
+ __a movq(rdx, Operand(rsp, 1 * kPointerSize)); |
int map_index = Context::FunctionMapIndex(language_mode_, is_generator_); |
@@ -393,17 +398,17 @@ |
__ ret(1 * kPointerSize); |
__ bind(&restore); |
- __ movq(rdx, Operand(rsp, 1 * kPointerSize)); |
+ __a movq(rdx, Operand(rsp, 1 * kPointerSize)); |
__ jmp(&install_unoptimized); |
// Create a new closure through the slower runtime call. |
__ bind(&gc); |
- __ pop(rcx); // Temporarily remove return address. |
+ __k pop(rcx); // Temporarily remove return address. |
__ pop(rdx); |
__ push(rsi); |
__ push(rdx); |
__ PushRoot(Heap::kFalseValueRootIndex); |
- __ push(rcx); // Restore return address. |
+ __k push(rcx); // Restore return address. |
__ TailCallRuntime(Runtime::kNewClosure, 3, 1); |
} |
@@ -416,7 +421,7 @@ |
rax, rbx, rcx, &gc, TAG_OBJECT); |
// Get the function from the stack. |
- __ movq(rcx, Operand(rsp, 1 * kPointerSize)); |
+ __a movq(rcx, Operand(rsp, 1 * kPointerSize)); |
// Set up the object header. |
__ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex); |
@@ -462,10 +467,10 @@ |
rax, rbx, rcx, &gc, TAG_OBJECT); |
// Get the function from the stack. |
- __ movq(rcx, Operand(rsp, 1 * kPointerSize)); |
+ __a movq(rcx, Operand(rsp, 1 * kPointerSize)); |
// Get the serialized scope info from the stack. |
- __ movq(rbx, Operand(rsp, 2 * kPointerSize)); |
+ __a movq(rbx, Operand(rsp, 2 * kPointerSize)); |
// Set up the object header. |
__ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex); |
@@ -589,14 +594,14 @@ |
Register double_value = rdi; |
Label done, exponent_63_plus; |
// Get double and extract exponent. |
- __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset)); |
+ __k movq(double_value, FieldOperand(source, HeapNumber::kValueOffset)); |
// Clear result preemptively, in case we need to return zero. |
__ xorl(result, result); |
__ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there. |
// Double to remove sign bit, shift exponent down to least significant bits. |
// and subtract bias to get the unshifted, unbiased exponent. |
- __ lea(double_exponent, Operand(double_value, double_value, times_1, 0)); |
- __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits)); |
+ __k lea(double_exponent, Operand(double_value, double_value, times_1, 0)); |
+ __k shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits)); |
__ subl(double_exponent, Immediate(HeapNumber::kExponentBias)); |
// Check whether the exponent is too big for a 63 bit unsigned integer. |
__ cmpl(double_exponent, Immediate(63)); |
@@ -617,7 +622,7 @@ |
// the least significant exponent-52 bits. |
// Negate low bits of mantissa if value is negative. |
- __ addq(double_value, double_value); // Move sign bit to carry. |
+ __k addq(double_value, double_value); // Move sign bit to carry. |
__ sbbl(result, result); // And convert carry to -1 in result register. |
// if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0. |
__ addl(double_value, result); |
@@ -662,14 +667,14 @@ |
void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
- __ pop(rcx); // Save return address. |
+ __k pop(rcx); // Save return address. |
__ push(rax); // the operand |
__ Push(Smi::FromInt(op_)); |
__ Push(Smi::FromInt(mode_)); |
__ Push(Smi::FromInt(operand_type_)); |
- __ push(rcx); // Push return address. |
+ __k push(rcx); // Push return address. |
// Patch the caller to an appropriate specialized stub and return the |
// operation result to the caller of the stub. |
@@ -780,8 +785,8 @@ |
// Operand is a float, negate its value by flipping the sign bit. |
if (mode_ == UNARY_OVERWRITE) { |
__ Set(kScratchRegister, 0x01); |
- __ shl(kScratchRegister, Immediate(63)); |
- __ xor_(FieldOperand(rax, HeapNumber::kValueOffset), kScratchRegister); |
+ __k shl(kScratchRegister, Immediate(63)); |
+ __k xor_(FieldOperand(rax, HeapNumber::kValueOffset), kScratchRegister); |
} else { |
// Allocate a heap number before calculating the answer, |
// so we don't have an untagged double around during GC. |
@@ -801,11 +806,11 @@ |
// rcx: allocated 'empty' number |
// Copy the double value to the new heap number, flipping the sign. |
- __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset)); |
+ __k movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset)); |
__ Set(kScratchRegister, 0x01); |
- __ shl(kScratchRegister, Immediate(63)); |
- __ xor_(rdx, kScratchRegister); // Flip sign. |
- __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx); |
+ __k shl(kScratchRegister, Immediate(63)); |
+ __k xor_(rdx, kScratchRegister); // Flip sign. |
+ __k movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx); |
__ movq(rax, rcx); |
} |
__ ret(0); |
@@ -819,6 +824,7 @@ |
Heap::kHeapNumberMapRootIndex); |
__ j(not_equal, slow); |
+#ifndef V8_TARGET_ARCH_X32 |
// Convert the heap number in rax to an untagged integer in rcx. |
IntegerConvert(masm, rax, rax); |
@@ -826,6 +832,47 @@ |
__ notl(rax); |
__ Integer32ToSmi(rax, rax); |
__ ret(0); |
+#else |
+ // Convert the heap number in rax to an untagged integer in rcx. |
+ IntegerConvert(masm, rcx, rax); |
+ |
+ // Do the bitwise operation and smi tag the result. |
+ Label try_float; |
+ __ notl(rcx); |
+ __ cmpl(rcx, Immediate(0xc0000000)); |
+ __ j(sign, &try_float, Label::kNear); |
+ __ Integer32ToSmi(rax, rcx); |
+ __ ret(0); |
+ |
+ // Try to store the result in a heap number. |
+ __ bind(&try_float); |
+ if (mode_ == UNARY_NO_OVERWRITE) { |
+ Label slow_allocate_heapnumber, heapnumber_allocated; |
+ __ movl(rbx, rax); |
+ __ AllocateHeapNumber(rax, kScratchRegister, &slow_allocate_heapnumber); |
+ __ jmp(&heapnumber_allocated); |
+ |
+ __ bind(&slow_allocate_heapnumber); |
+ { |
+ FrameScope scope(masm, StackFrame::INTERNAL); |
+ // Push the original HeapNumber on the stack. The integer value can't |
+ // be stored since it's untagged and not in the smi range (so we can't |
+ // smi-tag it). We'll recalculate the value after the GC instead. |
+ __ Push(rbx); |
+ __ CallRuntime(Runtime::kNumberAlloc, 0); |
+ // New HeapNumber is in eax. |
+ __ Pop(rbx); |
+ } |
+ // Recalcuate bit-not value. |
+ IntegerConvert(masm, rcx, rbx); |
+ __ notl(rcx); |
+ |
+ __ bind(&heapnumber_allocated); |
+ } |
+ __ cvtlsi2sd(xmm0, rcx); |
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
+ __ ret(0); |
+#endif |
} |
@@ -866,9 +913,9 @@ |
void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) { |
// Handle the slow case by jumping to the JavaScript builtin. |
- __ pop(rcx); // pop return address |
+ __k pop(rcx); // pop return address |
__ push(rax); |
- __ push(rcx); // push return address |
+ __k push(rcx); // push return address |
switch (op_) { |
case Token::SUB: |
__ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); |
@@ -900,13 +947,13 @@ |
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
- __ pop(rcx); // Save return address. |
+ __k pop(rcx); // Save return address. |
__ push(rdx); |
__ push(rax); |
// Left and right arguments are now on top. |
__ Push(Smi::FromInt(MinorKey())); |
- __ push(rcx); // Push return address. |
+ __k push(rcx); // Push return address. |
// Patch the caller to an appropriate specialized stub and return the |
// operation result to the caller of the stub. |
@@ -930,10 +977,17 @@ |
// We only generate heapnumber answers for overflowing calculations |
// for the four basic arithmetic operations and logical right shift by 0. |
+#ifndef V8_TARGET_ARCH_X32 |
bool generate_inline_heapnumber_results = |
(allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) && |
(op == Token::ADD || op == Token::SUB || |
op == Token::MUL || op == Token::DIV || op == Token::SHR); |
+#else |
+ bool generate_inline_heapnumber_results = |
+ (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) && |
+ (op == Token::ADD || op == Token::SUB || op == Token::SHL || |
+ op == Token::MUL || op == Token::DIV || op == Token::SHR); |
+#endif |
// Smi check of both operands. If op is BIT_OR, the check is delayed |
// until after the OR operation. |
@@ -996,7 +1050,12 @@ |
break; |
case Token::SHL: |
+#ifndef V8_TARGET_ARCH_X32 |
__ SmiShiftLeft(left, left, right); |
+#else |
+ __ movl(kScratchRegister, left); |
+ __ SmiShiftLeft(left, left, right, &use_fp_on_smis); |
+#endif |
__ movq(rax, left); |
break; |
@@ -1006,6 +1065,9 @@ |
break; |
case Token::SHR: |
+#ifdef V8_TARGET_ARCH_X32 |
+ __ movl(kScratchRegister, left); |
+#endif |
__ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis); |
__ movq(rax, left); |
break; |
@@ -1031,10 +1093,22 @@ |
if (generate_inline_heapnumber_results) { |
__ AllocateHeapNumber(rcx, rbx, slow); |
Comment perform_float(masm, "-- Perform float operation on smis"); |
+#ifndef V8_TARGET_ARCH_X32 |
if (op == Token::SHR) { |
__ SmiToInteger32(left, left); |
__ cvtqsi2sd(xmm0, left); |
} else { |
+#else |
+ if (op == Token::SHL) { |
+ __ cvtlsi2sd(xmm0, left); |
+ } else if (op == Token::SHR) { |
+ // The value of left is from MacroAssembler::SmiShiftLogicalRight |
+ // We allow logical shift value: |
+ // 0 : might turn a signed integer into unsigned integer |
+ // 1 : the value might be above 2^30 - 1 |
+ __ cvtqsi2sd(xmm0, left); |
+ } else { |
+#endif |
FloatingPointHelper::LoadSSE2SmiOperands(masm); |
switch (op) { |
case Token::ADD: __ addsd(xmm0, xmm1); break; |
@@ -1048,6 +1122,14 @@ |
__ movq(rax, rcx); |
__ ret(0); |
} else { |
+#ifdef V8_TARGET_ARCH_X32 |
+ // Restore the orignial left value from kScratchRegister for stub call |
+ // KScratchRegister is not killed by MacroAssembler::SmiShiftLogicalRight |
+ // and is not killed by MacroAssembler::SmiShiftLeft either. |
+ if (op == Token::SHL || op == Token::SHR) { |
+ __ movl(left, kScratchRegister); |
+ } |
+#endif |
__ jmp(&fail); |
} |
} |
@@ -1086,6 +1168,11 @@ |
Label* allocation_failure, |
Label* non_numeric_failure, |
Token::Value op, |
+#ifdef V8_TARGET_ARCH_X32 |
+ BinaryOpIC::TypeInfo |
+ result_type, |
+ Label* non_int32_failure, |
+#endif |
OverwriteMode mode) { |
switch (op) { |
case Token::ADD: |
@@ -1101,6 +1188,18 @@ |
case Token::DIV: __ divsd(xmm0, xmm1); break; |
default: UNREACHABLE(); |
} |
+#ifdef V8_TARGET_ARCH_X32 |
+ if (non_int32_failure != NULL) { |
+ if (result_type <= BinaryOpIC::INT32) { |
+ __ cvttsd2si(kScratchRegister, xmm0); |
+ __ cvtlsi2sd(xmm2, kScratchRegister); |
+ __ pcmpeqd(xmm2, xmm0); |
+ __ movmskpd(rcx, xmm2); |
+ __ testl(rcx, Immediate(1)); |
+ __ j(zero, non_int32_failure); |
+ } |
+ } |
+#endif |
BinaryOpStub_GenerateHeapResultAllocation( |
masm, allocation_failure, mode); |
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
@@ -1120,6 +1219,9 @@ |
case Token::SHR: { |
Label non_smi_shr_result; |
Register heap_number_map = r9; |
+#ifdef V8_TARGET_ARCH_X32 |
+ __ movl(kScratchRegister, rax); |
+#endif |
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure, |
heap_number_map); |
@@ -1131,19 +1233,33 @@ |
case Token::SHL: __ shll_cl(rax); break; |
case Token::SHR: { |
__ shrl_cl(rax); |
+#ifndef V8_TARGET_ARCH_X32 |
// Check if result is negative. This can only happen for a shift |
// by zero. |
__ testl(rax, rax); |
__ j(negative, &non_smi_shr_result); |
+#endif |
break; |
} |
default: UNREACHABLE(); |
} |
+#ifndef V8_TARGET_ARCH_X32 |
STATIC_ASSERT(kSmiValueSize == 32); |
+#else |
+ STATIC_ASSERT(kSmiValueSize == 31); |
+ if (op == Token::SHR) { |
+ __ testl(rax, Immediate(0xc0000000)); |
+ __ j(not_zero, &non_smi_shr_result); |
+ } else { |
+ __ cmpl(rax, Immediate(0xc0000000)); |
+ __ j(negative, &non_smi_shr_result, Label::kNear); |
+ } |
+#endif |
// Tag smi result and return. |
__ Integer32ToSmi(rax, rax); |
__ Ret(); |
+#ifndef V8_TARGET_ARCH_X32 |
// Logical shift right can produce an unsigned int32 that is not |
// an int32, and so is not in the smi range. Allocate a heap number |
// in that case. |
@@ -1173,6 +1289,37 @@ |
__ Integer32ToSmi(rdx, rbx); |
__ jmp(allocation_failure); |
} |
+#else |
+ __ bind(&non_smi_shr_result); |
+ Label allocation_failed; |
+ __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). |
+ // Allocate heap number in new space. |
+ // Not using AllocateHeapNumber macro in order to reuse |
+ // already loaded heap_number_map. |
+ __ Allocate(HeapNumber::kSize, rax, r8, no_reg, &allocation_failed, |
+ TAG_OBJECT); |
+ // Set the map. |
+ __ AssertRootValue(heap_number_map, |
+ Heap::kHeapNumberMapRootIndex, |
+ "HeapNumberMap register clobbered."); |
+ __ movl(FieldOperand(rax, HeapObject::kMapOffset), |
+ heap_number_map); |
+ if (op == Token::SHR) { |
+ __ cvtqsi2sd(xmm0, rbx); |
+ } else { |
+ // All other operations returns a signed int32, so we |
+ // use lsi2sd here to retain the sign bit. |
+ __ cvtlsi2sd(xmm0, rbx); |
+ } |
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
+ __ Ret(); |
+ |
+ __ bind(&allocation_failed); |
+ // Restore the right operand from kScratchRegister. |
+ // Left operand is in rdx, not changed in this function. |
+ __ movl(rax, kScratchRegister); |
+ __ jmp(allocation_failure); |
+#endif |
break; |
} |
default: UNREACHABLE(); break; |
@@ -1189,10 +1336,10 @@ |
MacroAssembler* masm) { |
// Push arguments, but ensure they are under the return address |
// for a tail call. |
- __ pop(rcx); |
+ __k pop(rcx); |
__ push(rdx); |
__ push(rax); |
- __ push(rcx); |
+ __k push(rcx); |
} |
@@ -1268,9 +1415,29 @@ |
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
+#ifndef V8_TARGET_ARCH_X32 |
// The int32 case is identical to the Smi case. We avoid creating this |
// ic state on x64. |
UNREACHABLE(); |
+#else |
+ ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); |
+ |
+ Label gc_required, not_number, not_int32; |
+ BinaryOpStub_GenerateFloatingPointCode(masm, &gc_required, ¬_number, |
+ op_, result_type_, ¬_int32, mode_); |
+ |
+ __ bind(¬_number); |
+ __ bind(¬_int32); |
+ GenerateTypeTransition(masm); |
+ |
+ __ bind(&gc_required); |
+ { |
+ FrameScope scope(masm, StackFrame::INTERNAL); |
+ GenerateRegisterArgsPush(masm); |
+ GenerateCallRuntime(masm); |
+ } |
+ __ Ret(); |
+#endif |
} |
@@ -1356,7 +1523,7 @@ |
__ cvtlsi2sd(xmm1, scratch2); |
__ movq(scratch1, xmm0); |
__ movq(scratch2, xmm1); |
- __ cmpq(scratch1, scratch2); |
+ __k cmpq(scratch1, scratch2); |
__ j(not_equal, fail); |
__ bind(&ok); |
} |
@@ -1375,8 +1542,13 @@ |
BinaryOpStub_CheckSmiInput(masm, rax, ¬_number); |
} |
+#ifndef V8_TARGET_ARCH_X32 |
BinaryOpStub_GenerateFloatingPointCode( |
masm, &gc_required, ¬_number, op_, mode_); |
+#else |
+ BinaryOpStub_GenerateFloatingPointCode( |
+ masm, &gc_required, ¬_number, op_, result_type_, NULL, mode_); |
+#endif |
__ bind(¬_number); |
GenerateTypeTransition(masm); |
@@ -1397,8 +1569,14 @@ |
BinaryOpStub_GenerateSmiCode( |
masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); |
+#ifndef V8_TARGET_ARCH_X32 |
BinaryOpStub_GenerateFloatingPointCode( |
masm, &call_runtime, &call_string_add_or_runtime, op_, mode_); |
+#else |
+ BinaryOpStub_GenerateFloatingPointCode( |
+ masm, &call_runtime, &call_string_add_or_runtime, op_, |
+ result_type_, NULL, mode_); |
+#endif |
__ bind(&call_string_add_or_runtime); |
if (op_ == Token::ADD) { |
@@ -1481,7 +1659,7 @@ |
if (tagged) { |
Label input_not_smi, loaded; |
// Test that rax is a number. |
- __ movq(rax, Operand(rsp, kPointerSize)); |
+ __a movq(rax, Operand(rsp, 1 * kPointerSize)); |
__ JumpIfNotSmi(rax, &input_not_smi, Label::kNear); |
// Input is a smi. Untag and load it onto the FPU stack. |
// Then load the bits of the double into rbx. |
@@ -1503,8 +1681,8 @@ |
// Input is a HeapNumber. Push it on the FPU stack and load its |
// bits into rbx. |
__ fld_d(FieldOperand(rax, HeapNumber::kValueOffset)); |
- __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset)); |
- __ movq(rdx, rbx); |
+ __k movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset)); |
+ __k movq(rdx, rbx); |
__ bind(&loaded); |
} else { // UNTAGGED. |
@@ -1521,7 +1699,7 @@ |
// h ^= h >> 8; |
// h = h & (cacheSize - 1); |
// or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1) |
- __ sar(rdx, Immediate(32)); |
+ __k sar(rdx, Immediate(32)); |
__ xorl(rdx, rbx); |
__ movl(rcx, rdx); |
__ movl(rax, rdx); |
@@ -1565,12 +1743,18 @@ |
CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start)); |
} |
#endif |
+#ifndef V8_TARGET_ARCH_X32 |
// Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16]. |
__ addl(rcx, rcx); |
__ lea(rcx, Operand(rax, rcx, times_8, 0)); |
+#else |
+ // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*12]. |
+ __ leal(rcx, Operand(rcx, rcx, times_2, 0)); |
+ __ leal(rcx, Operand(rax, rcx, times_4, 0)); |
+#endif |
// Check if cache matches: Double value is stored in uint32_t[2] array. |
Label cache_miss; |
- __ cmpq(rbx, Operand(rcx, 0)); |
+ __k cmpq(rbx, Operand(rcx, 0)); |
__ j(not_equal, &cache_miss, Label::kNear); |
// Cache hit! |
Counters* counters = masm->isolate()->counters(); |
@@ -1595,7 +1779,7 @@ |
__ fld_d(FieldOperand(rax, HeapNumber::kValueOffset)); |
} |
GenerateOperation(masm, type_); |
- __ movq(Operand(rcx, 0), rbx); |
+ __k movq(Operand(rcx, 0), rbx); |
__ movq(Operand(rcx, 2 * kIntSize), rax); |
__ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset)); |
if (tagged) { |
@@ -1678,9 +1862,9 @@ |
Label in_range; |
// If argument is outside the range -2^63..2^63, fsin/cos doesn't |
// work. We must reduce it to the appropriate range. |
- __ movq(rdi, rbx); |
+ __k movq(rdi, rbx); |
// Move exponent and sign bits to low bits. |
- __ shr(rdi, Immediate(HeapNumber::kMantissaBits)); |
+ __k shr(rdi, Immediate(HeapNumber::kMantissaBits)); |
// Remove sign bit. |
__ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1)); |
int supported_exponent_limit = (63 + HeapNumber::kExponentBias); |
@@ -1693,17 +1877,17 @@ |
// Input is +/-Infinity or NaN. Result is NaN. |
__ fstp(0); |
// NaN is represented by 0x7ff8000000000000. |
- __ subq(rsp, Immediate(kPointerSize)); |
+ __ subq(rsp, Immediate(kDoubleSize)); |
__ movl(Operand(rsp, 4), Immediate(0x7ff80000)); |
__ movl(Operand(rsp, 0), Immediate(0x00000000)); |
__ fld_d(Operand(rsp, 0)); |
- __ addq(rsp, Immediate(kPointerSize)); |
+ __ addq(rsp, Immediate(kDoubleSize)); |
__ jmp(&done); |
__ bind(&non_nan_result); |
// Use fpmod to restrict argument to the range +/-2*PI. |
- __ movq(rdi, rax); // Save rax before using fnstsw_ax. |
+ __k movq(rdi, rax); // Save rax before using fnstsw_ax. |
__ fldpi(); |
__ fadd(0); |
__ fld(1); |
@@ -1936,8 +2120,12 @@ |
// comparing to the original double's bits. |
__ cvtlsi2sd(xmm1, smi_result); |
__ movq(kScratchRegister, xmm1); |
- __ cmpq(scratch2, kScratchRegister); |
+ __k cmpq(scratch2, kScratchRegister); |
__ j(not_equal, on_not_smis); |
+#ifdef V8_TARGET_ARCH_X32 |
+ __ cmpl(smi_result, Immediate(0xc0000000)); |
+ __ j(negative, on_not_smis); |
+#endif |
__ Integer32ToSmi(first, smi_result); |
__ bind(&first_done); |
@@ -1955,8 +2143,12 @@ |
__ cvttsd2siq(smi_result, xmm0); |
__ cvtlsi2sd(xmm1, smi_result); |
__ movq(kScratchRegister, xmm1); |
- __ cmpq(scratch2, kScratchRegister); |
+ __k cmpq(scratch2, kScratchRegister); |
__ j(not_equal, on_not_smis); |
+#ifdef V8_TARGET_ARCH_X32 |
+ __ cmpl(smi_result, Immediate(0xc0000000)); |
+ __ j(negative, on_not_smis); |
+#endif |
__ Integer32ToSmi(second, smi_result); |
if (on_success != NULL) { |
__ jmp(on_success); |
@@ -2003,8 +2195,8 @@ |
// The exponent and base are supplied as arguments on the stack. |
// This can only happen if the stub is called from non-optimized code. |
// Load input parameters from stack. |
- __ movq(base, Operand(rsp, 2 * kPointerSize)); |
- __ movq(exponent, Operand(rsp, 1 * kPointerSize)); |
+ __a movq(base, Operand(rsp, 2 * kPointerSize)); |
+ __a movq(exponent, Operand(rsp, 1 * kPointerSize)); |
__ JumpIfSmi(base, &base_is_smi, Label::kNear); |
__ CompareRoot(FieldOperand(base, HeapObject::kMapOffset), |
Heap::kHeapNumberMapRootIndex); |
@@ -2347,10 +2539,10 @@ |
__ JumpIfNotSmi(value, &miss); |
// Prepare tail call to StoreIC_ArrayLength. |
- __ pop(scratch); |
+ __k pop(scratch); |
__ push(receiver); |
__ push(value); |
- __ push(scratch); // return address |
+ __k push(scratch); // return address |
ExternalReference ref = |
ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate()); |
@@ -2368,7 +2560,11 @@ |
// The displacement is used for skipping the frame pointer on the |
// stack. It is the offset of the last parameter (if any) relative |
// to the frame pointer. |
+#ifndef V8_TARGET_ARCH_X32 |
static const int kDisplacement = 1 * kPointerSize; |
+#else |
+ static const int kDisplacement = 2 * kHWRegSize - 1 * kPointerSize; |
+#endif |
// Check that the key is a smi. |
Label slow; |
@@ -2415,9 +2611,9 @@ |
// Slow-case: Handle non-smi or out-of-bounds access to arguments |
// by calling the runtime system. |
__ bind(&slow); |
- __ pop(rbx); // Return address. |
+ __k pop(rbx); // Return address. |
__ push(rdx); |
- __ push(rbx); |
+ __k push(rbx); |
__ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); |
} |
@@ -2434,7 +2630,7 @@ |
Factory* factory = masm->isolate()->factory(); |
- __ SmiToInteger64(rbx, Operand(rsp, 1 * kPointerSize)); |
+ __a SmiToInteger64(rbx, Operand(rsp, 1 * kPointerSize)); |
// rbx = parameter count (untagged) |
// Check if the calling frame is an arguments adaptor frame. |
@@ -2456,7 +2652,7 @@ |
ArgumentsAdaptorFrameConstants::kLengthOffset)); |
__ lea(rdx, Operand(rdx, rcx, times_pointer_size, |
StandardFrameConstants::kCallerSPOffset)); |
- __ movq(Operand(rsp, 2 * kPointerSize), rdx); |
+ __a movq(Operand(rsp, 2 * kPointerSize), rdx); |
// rbx = parameter count (untagged) |
// rcx = argument count (untagged) |
@@ -2517,7 +2713,7 @@ |
// Set up the callee in-object property. |
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); |
- __ movq(rdx, Operand(rsp, 3 * kPointerSize)); |
+ __a movq(rdx, Operand(rsp, 3 * kPointerSize)); |
__ movq(FieldOperand(rax, JSObject::kHeaderSize + |
Heap::kArgumentsCalleeIndex * kPointerSize), |
rdx); |
@@ -2568,7 +2764,7 @@ |
// Load tagged parameter count into r9. |
__ Integer32ToSmi(r9, rbx); |
__ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS)); |
- __ addq(r8, Operand(rsp, 1 * kPointerSize)); |
+ __a addq(r8, Operand(rsp, 1 * kPointerSize)); |
__ subq(r8, r9); |
__ Move(r11, factory->the_hole_value()); |
__ movq(rdx, rdi); |
@@ -2607,7 +2803,7 @@ |
Label arguments_loop, arguments_test; |
__ movq(r8, rbx); |
- __ movq(rdx, Operand(rsp, 2 * kPointerSize)); |
+ __a movq(rdx, Operand(rsp, 2 * kPointerSize)); |
// Untag rcx for the loop below. |
__ SmiToInteger64(rcx, rcx); |
__ lea(kScratchRegister, Operand(r8, times_pointer_size, 0)); |
@@ -2634,7 +2830,7 @@ |
// rcx = argument count (untagged) |
__ bind(&runtime); |
__ Integer32ToSmi(rcx, rcx); |
- __ movq(Operand(rsp, 1 * kPointerSize), rcx); // Patch argument count. |
+ __a movq(Operand(rsp, 1 * kPointerSize), rcx); // Patch argument count. |
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); |
} |
@@ -2654,11 +2850,11 @@ |
// Patch the arguments.length and the parameters pointer. |
__ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
- __ movq(Operand(rsp, 1 * kPointerSize), rcx); |
+ __a movq(Operand(rsp, 1 * kPointerSize), rcx); |
__ SmiToInteger64(rcx, rcx); |
__ lea(rdx, Operand(rdx, rcx, times_pointer_size, |
StandardFrameConstants::kCallerSPOffset)); |
- __ movq(Operand(rsp, 2 * kPointerSize), rdx); |
+ __a movq(Operand(rsp, 2 * kPointerSize), rdx); |
__ bind(&runtime); |
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); |
@@ -2679,18 +2875,18 @@ |
__ j(equal, &adaptor_frame); |
// Get the length from the frame. |
- __ movq(rcx, Operand(rsp, 1 * kPointerSize)); |
+ __a movq(rcx, Operand(rsp, 1 * kPointerSize)); |
__ SmiToInteger64(rcx, rcx); |
__ jmp(&try_allocate); |
// Patch the arguments.length and the parameters pointer. |
__ bind(&adaptor_frame); |
__ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
- __ movq(Operand(rsp, 1 * kPointerSize), rcx); |
+ __a movq(Operand(rsp, 1 * kPointerSize), rcx); |
__ SmiToInteger64(rcx, rcx); |
__ lea(rdx, Operand(rdx, rcx, times_pointer_size, |
StandardFrameConstants::kCallerSPOffset)); |
- __ movq(Operand(rsp, 2 * kPointerSize), rdx); |
+ __a movq(Operand(rsp, 2 * kPointerSize), rdx); |
// Try the new space allocation. Start out with computing the size of |
// the arguments object and the elements array. |
@@ -2720,7 +2916,7 @@ |
// Get the length (smi tagged) and set that as an in-object property too. |
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); |
- __ movq(rcx, Operand(rsp, 1 * kPointerSize)); |
+ __a movq(rcx, Operand(rsp, 1 * kPointerSize)); |
__ movq(FieldOperand(rax, JSObject::kHeaderSize + |
Heap::kArgumentsLengthIndex * kPointerSize), |
rcx); |
@@ -2731,7 +2927,7 @@ |
__ j(zero, &done); |
// Get the parameters pointer from the stack. |
- __ movq(rdx, Operand(rsp, 2 * kPointerSize)); |
+ __a movq(rdx, Operand(rsp, 2 * kPointerSize)); |
// Set up the elements pointer in the allocated arguments object and |
// initialize the header in the elements fixed array. |
@@ -2780,10 +2976,17 @@ |
// rsp[24] : subject string |
// rsp[32] : JSRegExp object |
+#ifndef V8_TARGET_ARCH_X32 |
static const int kLastMatchInfoOffset = 1 * kPointerSize; |
static const int kPreviousIndexOffset = 2 * kPointerSize; |
static const int kSubjectOffset = 3 * kPointerSize; |
static const int kJSRegExpOffset = 4 * kPointerSize; |
+#else |
+ static const int kLastMatchInfoOffset = 1 * kHWRegSize; |
+ static const int kPreviousIndexOffset = 1 * kHWRegSize + 1 * kPointerSize; |
+ static const int kSubjectOffset = 1 * kHWRegSize + 2 * kPointerSize; |
+ static const int kJSRegExpOffset = 1 * kHWRegSize + 3 * kPointerSize; |
+#endif |
Label runtime; |
// Ensure that a RegExp stack is allocated. |
@@ -2955,11 +3158,11 @@ |
// Argument 9: Pass current isolate address. |
__ LoadAddress(kScratchRegister, |
ExternalReference::isolate_address(masm->isolate())); |
- __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize), |
+ __s movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize), |
kScratchRegister); |
// Argument 8: Indicate that this is a direct call from JavaScript. |
- __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), |
+ __s movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), |
Immediate(1)); |
// Argument 7: Start (high end) of backtracking stack memory area. |
@@ -2967,13 +3170,13 @@ |
__ movq(r9, Operand(kScratchRegister, 0)); |
__ movq(kScratchRegister, address_of_regexp_stack_memory_size); |
__ addq(r9, Operand(kScratchRegister, 0)); |
- __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9); |
+ __s movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9); |
// Argument 6: Set the number of capture registers to zero to force global |
// regexps to behave as non-global. This does not affect non-global regexps. |
// Argument 6 is passed in r9 on Linux and on the stack on Windows. |
#ifdef _WIN64 |
- __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize), |
+ __s movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize), |
Immediate(0)); |
#else |
__ Set(r9, 0); |
@@ -2984,7 +3187,7 @@ |
ExternalReference::address_of_static_offsets_vector(isolate)); |
// Argument 5 passed in r8 on Linux and on the stack on Windows. |
#ifdef _WIN64 |
- __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kPointerSize), r8); |
+ __s movq(Operand(rsp, (argument_slots_on_stack - 5) * kPointerSize), r8); |
#endif |
// rdi: subject string |
@@ -3214,7 +3417,7 @@ |
const int kMaxInlineLength = 100; |
Label slowcase; |
Label done; |
- __ movq(r8, Operand(rsp, kPointerSize * 3)); |
+ __a movq(r8, Operand(rsp, 3 * kPointerSize)); |
__ JumpIfNotSmi(r8, &slowcase); |
__ SmiToInteger32(rbx, r8); |
__ cmpl(rbx, Immediate(kMaxInlineLength)); |
@@ -3252,11 +3455,11 @@ |
__ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx); |
// Set input, index and length fields from arguments. |
- __ movq(r8, Operand(rsp, kPointerSize * 1)); |
+ __a movq(r8, Operand(rsp, 1 * kPointerSize)); |
__ movq(FieldOperand(rax, JSRegExpResult::kInputOffset), r8); |
- __ movq(r8, Operand(rsp, kPointerSize * 2)); |
+ __a movq(r8, Operand(rsp, 2 * kPointerSize)); |
__ movq(FieldOperand(rax, JSRegExpResult::kIndexOffset), r8); |
- __ movq(r8, Operand(rsp, kPointerSize * 3)); |
+ __a movq(r8, Operand(rsp, 3 * kPointerSize)); |
__ movq(FieldOperand(rax, JSArray::kLengthOffset), r8); |
// Fill out the elements FixedArray. |
@@ -3391,7 +3594,7 @@ |
void NumberToStringStub::Generate(MacroAssembler* masm) { |
Label runtime; |
- __ movq(rbx, Operand(rsp, kPointerSize)); |
+ __a movq(rbx, Operand(rsp, 1 * kPointerSize)); |
// Generate code to lookup number in the number string cache. |
GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime); |
@@ -3682,7 +3885,7 @@ |
} |
// Push arguments below the return address to prepare jump to builtin. |
- __ pop(rcx); |
+ __k pop(rcx); |
__ push(rdx); |
__ push(rax); |
@@ -3696,7 +3899,7 @@ |
} |
// Restore return address on the stack. |
- __ push(rcx); |
+ __k push(rcx); |
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater) |
// tagged as a small integer. |
@@ -3806,14 +4009,14 @@ |
Label call; |
// Get the receiver from the stack. |
// +1 ~ return address |
- __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize)); |
+ __a movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize)); |
// Call as function is indicated with the hole. |
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex); |
__ j(not_equal, &call, Label::kNear); |
// Patch the receiver on the stack with the global receiver object. |
__ movq(rcx, GlobalObjectOperand()); |
__ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset)); |
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rcx); |
+ __a movq(Operand(rsp, (argc_ + 1) * kPointerSize), rcx); |
__ bind(&call); |
} |
@@ -3859,9 +4062,9 @@ |
// Check for function proxy. |
__ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE); |
__ j(not_equal, &non_function); |
- __ pop(rcx); |
+ __k pop(rcx); |
__ push(rdi); // put proxy as additional argument under return address |
- __ push(rcx); |
+ __k push(rcx); |
__ Set(rax, argc_ + 1); |
__ Set(rbx, 0); |
__ SetCallKind(rcx, CALL_AS_METHOD); |
@@ -3875,7 +4078,7 @@ |
// CALL_NON_FUNCTION expects the non-function callee as receiver (instead |
// of the original receiver from the call site). |
__ bind(&non_function); |
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi); |
+ __a movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi); |
__ Set(rax, argc_); |
__ Set(rbx, 0); |
__ SetCallKind(rcx, CALL_AS_METHOD); |
@@ -4067,8 +4270,8 @@ |
// Read result values stored on stack. Result is stored |
// above the four argument mirror slots and the two |
// Arguments object slots. |
- __ movq(rax, Operand(rsp, 6 * kPointerSize)); |
- __ movq(rdx, Operand(rsp, 7 * kPointerSize)); |
+ __s movq(rax, Operand(rsp, 6 * kPointerSize)); |
+ __s movq(rdx, Operand(rsp, 7 * kPointerSize)); |
} |
#endif |
__ lea(rcx, Operand(rax, 1)); |
@@ -4177,7 +4380,7 @@ |
// Do full GC and retry runtime call one final time. |
Failure* failure = Failure::InternalError(); |
- __ movq(rax, failure, RelocInfo::NONE64); |
+ __n movq(rax, failure, RelocInfo::NONE64); |
GenerateCore(masm, |
&throw_normal_exception, |
&throw_termination_exception, |
@@ -4198,7 +4401,7 @@ |
isolate); |
Label already_have_failure; |
JumpIfOOM(masm, rax, kScratchRegister, &already_have_failure); |
- __ movq(rax, Failure::OutOfMemoryException(0x1), RelocInfo::NONE64); |
+ __n movq(rax, Failure::OutOfMemoryException(0x1), RelocInfo::NONE64); |
__ bind(&already_have_failure); |
__ Store(pending_exception, rax); |
// Fall through to the next label. |
@@ -4228,21 +4431,27 @@ |
// Scratch register is neither callee-save, nor an argument register on any |
// platform. It's free to use at this point. |
// Cannot use smi-register for loading yet. |
+#ifndef V8_TARGET_ARCH_X32 |
__ movq(kScratchRegister, |
reinterpret_cast<uint64_t>(Smi::FromInt(marker)), |
RelocInfo::NONE64); |
+#else |
+ __ movl(kScratchRegister, |
+ reinterpret_cast<uint32_t>(Smi::FromInt(marker)), |
+ RelocInfo::NONE32); |
+#endif |
__ push(kScratchRegister); // context slot |
__ push(kScratchRegister); // function slot |
// Save callee-saved registers (X64/Win64 calling conventions). |
- __ push(r12); |
- __ push(r13); |
- __ push(r14); |
- __ push(r15); |
+ __k push(r12); |
+ __k push(r13); |
+ __k push(r14); |
+ __k push(r15); |
#ifdef _WIN64 |
- __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI. |
- __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI. |
+ __k push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI. |
+ __k push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI. |
#endif |
- __ push(rbx); |
+ __k push(rbx); |
#ifdef _WIN64 |
// On Win64 XMM6-XMM15 are callee-save |
@@ -4298,7 +4507,7 @@ |
ExternalReference pending_exception(Isolate::kPendingExceptionAddress, |
isolate); |
__ Store(pending_exception, rax); |
- __ movq(rax, Failure::Exception(), RelocInfo::NONE64); |
+ __n movq(rax, Failure::Exception(), RelocInfo::NONE64); |
__ jmp(&exit); |
// Invoke: Link this frame into the handler chain. There's only one |
@@ -4362,16 +4571,16 @@ |
__ addq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize)); |
#endif |
- __ pop(rbx); |
+ __k pop(rbx); |
#ifdef _WIN64 |
// Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI. |
- __ pop(rsi); |
- __ pop(rdi); |
+ __k pop(rsi); |
+ __k pop(rdi); |
#endif |
- __ pop(r15); |
- __ pop(r14); |
- __ pop(r13); |
- __ pop(r12); |
+ __k pop(r15); |
+ __k pop(r14); |
+ __k pop(r13); |
+ __k pop(r12); |
__ addq(rsp, Immediate(2 * kPointerSize)); // remove markers |
// Restore frame pointer and return. |
@@ -4396,6 +4605,7 @@ |
// indicate that the value is not an instance. |
static const int kOffsetToMapCheckValue = 2; |
+#ifndef V8_TARGET_ARCH_X32 |
static const int kOffsetToResultValue = 18; |
// The last 4 bytes of the instruction sequence |
// movq(rdi, FieldOperand(rax, HeapObject::kMapOffset)) |
@@ -4407,6 +4617,19 @@ |
// __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex); |
// before the offset of the hole value in the root array. |
static const unsigned int kWordBeforeResultValue = 0x458B4909; |
+#else |
+ static const int kOffsetToResultValue = 14; |
+ // The last 4 bytes of the instruction sequence |
+ // movl(rdi, FieldOperand(rax, HeapObject::kMapOffset)) |
+ // Move(kScratchRegister, Factory::the_hole_value()) |
+ // in front of the hole value address. |
+ static const unsigned int kWordBeforeMapCheckValue = 0xBA41FF78; |
+ // The last 4 bytes of the instruction sequence |
+ // __ j(not_equal, &cache_miss); |
+ // __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex); |
+ // before the offset of the hole value in the root array. |
+ static const unsigned int kWordBeforeResultValue = 0x458B4109; |
+#endif |
// Only the inline check flag is supported on X64. |
ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck()); |
int extra_stack_space = HasCallSiteInlineCheck() ? kPointerSize : 0; |
@@ -4414,7 +4637,12 @@ |
// Get the object - go slow case if it's a smi. |
Label slow; |
+#ifndef V8_TARGET_ARCH_X32 |
__ movq(rax, Operand(rsp, 2 * kPointerSize + extra_stack_space)); |
+#else |
+ __ movl(rax, Operand(rsp, 1 * kHWRegSize + 1 * kPointerSize + |
+ extra_stack_space)); |
+#endif |
__ JumpIfSmi(rax, &slow); |
// Check that the left hand is a JS object. Leave its map in rax. |
@@ -4424,7 +4652,7 @@ |
__ j(above, &slow); |
// Get the prototype of the function. |
- __ movq(rdx, Operand(rsp, 1 * kPointerSize + extra_stack_space)); |
+ __a movq(rdx, Operand(rsp, 1 * kPointerSize + extra_stack_space)); |
// rdx is function, rax is map. |
// If there is a call site cache don't look in the global cache, but do the |
@@ -4459,8 +4687,8 @@ |
__ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex); |
} else { |
// Get return address and delta to inlined map check. |
- __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize)); |
- __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); |
+ __q movq(kScratchRegister, Operand(rsp, 0 * kPointerSize)); |
+ __a subq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); |
if (FLAG_debug_code) { |
__ movl(rdi, Immediate(kWordBeforeMapCheckValue)); |
__ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi); |
@@ -4500,8 +4728,8 @@ |
// Assert it is a 1-byte signed value. |
ASSERT(true_offset >= 0 && true_offset < 0x100); |
__ movl(rax, Immediate(true_offset)); |
- __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize)); |
- __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); |
+ __q movq(kScratchRegister, Operand(rsp, 0 * kPointerSize)); |
+ __a subq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); |
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax); |
if (FLAG_debug_code) { |
__ movl(rax, Immediate(kWordBeforeResultValue)); |
@@ -4523,8 +4751,8 @@ |
// Assert it is a 1-byte signed value. |
ASSERT(false_offset >= 0 && false_offset < 0x100); |
__ movl(rax, Immediate(false_offset)); |
- __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize)); |
- __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); |
+ __q movq(kScratchRegister, Operand(rsp, 0 * kPointerSize)); |
+ __a subq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); |
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax); |
if (FLAG_debug_code) { |
__ movl(rax, Immediate(kWordBeforeResultValue)); |
@@ -4538,9 +4766,9 @@ |
__ bind(&slow); |
if (HasCallSiteInlineCheck()) { |
// Remove extra value from the stack. |
- __ pop(rcx); |
+ __k pop(rcx); |
__ pop(rax); |
- __ push(rcx); |
+ __k push(rcx); |
} |
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); |
} |
@@ -4690,8 +4918,10 @@ |
Builtins::JavaScript builtin_id = Builtins::ADD; |
// Load the two arguments. |
- __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument (left). |
- __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument (right). |
+ // First argument (left). |
+ __a movq(rax, Operand(rsp, 2 * kPointerSize)); |
+ // Second argument (right). |
+ __a movq(rdx, Operand(rsp, 1 * kPointerSize)); |
// Make sure that both arguments are strings if not known in advance. |
if ((flags_ & NO_STRING_ADD_FLAGS) != 0) { |
@@ -4708,13 +4938,23 @@ |
// We convert the one that is not known to be a string. |
if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) { |
ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0); |
+#ifndef V8_TARGET_ARCH_X32 |
GenerateConvertArgument(masm, 2 * kPointerSize, rax, rbx, rcx, rdi, |
&call_builtin); |
+#else |
+ GenerateConvertArgument(masm, 1 * kHWRegSize + 1 * kPointerSize, rax, |
+ rbx, rcx, rdi, &call_builtin); |
+#endif |
builtin_id = Builtins::STRING_ADD_RIGHT; |
} else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) { |
ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0); |
+#ifndef V8_TARGET_ARCH_X32 |
GenerateConvertArgument(masm, 1 * kPointerSize, rdx, rbx, rcx, rdi, |
&call_builtin); |
+#else |
+ GenerateConvertArgument(masm, 1 * kHWRegSize, rdx, rbx, rcx, rdi, |
+ &call_builtin); |
+#endif |
builtin_id = Builtins::STRING_ADD_LEFT; |
} |
} |
@@ -4761,8 +5001,12 @@ |
__ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset)); |
// Look at the length of the result of adding the two strings. |
+#ifndef V8_TARGET_ARCH_X32 |
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2); |
__ SmiAdd(rbx, rbx, rcx); |
+#else |
+ __ SmiAdd(rbx, rbx, rcx, &call_runtime); |
+#endif |
// Use the string table when adding two one character strings, as it |
// helps later optimizations to return an internalized string here. |
__ SmiCompare(rbx, Smi::FromInt(2)); |
@@ -5011,10 +5255,10 @@ |
void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm, |
Register temp) { |
- __ pop(temp); |
+ __k pop(temp); |
__ pop(rdx); |
__ pop(rax); |
- __ push(temp); |
+ __k push(temp); |
} |
@@ -5117,7 +5361,8 @@ |
// Copy from edi to esi using rep movs instruction. |
__ movl(kScratchRegister, count); |
- __ shr(count, Immediate(kPointerSizeLog2)); // Number of doublewords to copy. |
+ // Number of doublewords to copy. |
+ __ shr(count, Immediate(kPointerSizeLog2)); |
__ repmovsq(); |
// Find number of bytes left. |
@@ -5337,7 +5582,11 @@ |
// rsp[16] : from |
// rsp[24] : string |
+#ifndef V8_TARGET_ARCH_X32 |
const int kToOffset = 1 * kPointerSize; |
+#else |
+ const int kToOffset = 1 * kHWRegSize; |
+#endif |
const int kFromOffset = kToOffset + kPointerSize; |
const int kStringOffset = kFromOffset + kPointerSize; |
const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset; |
@@ -5676,7 +5925,7 @@ |
FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize)); |
__ lea(right, |
FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize)); |
- __ neg(length); |
+ __k neg(length); |
Register index = length; // index = -length; |
// Compare loop. |
@@ -5685,7 +5934,7 @@ |
__ movb(scratch, Operand(left, index, times_1, 0)); |
__ cmpb(scratch, Operand(right, index, times_1, 0)); |
__ j(not_equal, chars_not_equal, near_jump); |
- __ incq(index); |
+ __k incq(index); |
__ j(not_zero, &loop); |
} |
@@ -5698,8 +5947,8 @@ |
// rsp[8] : right string |
// rsp[16] : left string |
- __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // left |
- __ movq(rax, Operand(rsp, 1 * kPointerSize)); // right |
+ __a movq(rdx, Operand(rsp, 2 * kPointerSize)); // left |
+ __a movq(rax, Operand(rsp, 1 * kPointerSize)); // right |
// Check for identity. |
Label not_same; |
@@ -5718,9 +5967,9 @@ |
// Inline comparison of ASCII strings. |
__ IncrementCounter(counters->string_compare_native(), 1); |
// Drop arguments from the stack |
- __ pop(rcx); |
+ __k pop(rcx); |
__ addq(rsp, Immediate(2 * kPointerSize)); |
- __ push(rcx); |
+ __k push(rcx); |
GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8); |
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) |
@@ -5994,10 +6243,10 @@ |
// Handle more complex cases in runtime. |
__ bind(&runtime); |
- __ pop(tmp1); // Return address. |
+ __k pop(tmp1); // Return address. |
__ push(left); |
__ push(right); |
- __ push(tmp1); |
+ __k push(tmp1); |
if (equality) { |
__ TailCallRuntime(Runtime::kStringEquals, 2, 1); |
} else { |
@@ -6219,7 +6468,7 @@ |
// (their names are the null value). |
for (int i = kInlinedProbes; i < kTotalProbes; i++) { |
// Compute the masked index: (hash + i + i * i) & mask. |
- __ movq(scratch, Operand(rsp, 2 * kPointerSize)); |
+ __a movq(scratch, Operand(rsp, 2 * kPointerSize)); |
if (i > 0) { |
__ addl(scratch, Immediate(NameDictionary::GetProbeOffset(i))); |
} |
@@ -6239,7 +6488,7 @@ |
__ j(equal, ¬_in_dictionary); |
// Stop if found the property. |
- __ cmpq(scratch, Operand(rsp, 3 * kPointerSize)); |
+ __a cmpq(scratch, Operand(rsp, 3 * kPointerSize)); |
__ j(equal, &in_dictionary); |
if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { |
@@ -6591,8 +6840,8 @@ |
Label fast_elements; |
// Get array literal index, array literal and its map. |
- __ movq(rdx, Operand(rsp, 1 * kPointerSize)); |
- __ movq(rbx, Operand(rsp, 2 * kPointerSize)); |
+ __a movq(rdx, Operand(rsp, 1 * kPointerSize)); |
+ __a movq(rbx, Operand(rsp, 2 * kPointerSize)); |
__ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset)); |
__ CheckFastElements(rdi, &double_elements); |
@@ -6605,7 +6854,7 @@ |
// the runtime. |
__ bind(&slow_elements); |
- __ pop(rdi); // Pop return address and remember to put back later for tail |
+ __k pop(rdi); // Pop return address and remember to put back later for tail |
// call. |
__ push(rbx); |
__ push(rcx); |
@@ -6613,7 +6862,7 @@ |
__ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); |
__ push(FieldOperand(rbx, JSFunction::kLiteralsOffset)); |
__ push(rdx); |
- __ push(rdi); // Return return address so that tail call returns to right |
+ __k push(rdi); // Return return address so that tail call returns to right |
// place. |
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); |
@@ -6661,7 +6910,7 @@ |
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; |
__ movq(rbx, MemOperand(rbp, parameter_count_offset)); |
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); |
- __ pop(rcx); |
+ __k pop(rcx); |
int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE |
? kPointerSize |
: 0; |
@@ -6686,14 +6935,14 @@ |
// This stub can be called from essentially anywhere, so it needs to save |
// all volatile and callee-save registers. |
const size_t kNumSavedRegisters = 2; |
- __ push(arg_reg_1); |
- __ push(arg_reg_2); |
+ __k push(arg_reg_1); |
+ __k push(arg_reg_2); |
// Calculate the original stack pointer and store it in the second arg. |
- __ lea(arg_reg_2, Operand(rsp, (kNumSavedRegisters + 1) * kPointerSize)); |
+ __q lea(arg_reg_2, Operand(rsp, (kNumSavedRegisters + 1) * kPointerSize)); |
// Calculate the function address to the first arg. |
- __ movq(arg_reg_1, Operand(rsp, kNumSavedRegisters * kPointerSize)); |
+ __s movq(arg_reg_1, Operand(rsp, kNumSavedRegisters * kPointerSize)); |
__ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength)); |
// Save the remainder of the volatile registers. |
@@ -6701,7 +6950,11 @@ |
// Call the entry hook function. |
__ movq(rax, FUNCTION_ADDR(masm->isolate()->function_entry_hook()), |
+#ifndef V8_TARGET_ARCH_X32 |
RelocInfo::NONE64); |
+#else |
+ RelocInfo::NONE32); |
+#endif |
AllowExternalCallThatCantCauseGC scope(masm); |
@@ -6711,9 +6964,8 @@ |
// Restore volatile regs. |
masm->PopCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2); |
- __ pop(arg_reg_2); |
- __ pop(arg_reg_1); |
- |
+ __k pop(arg_reg_2); |
+ __k pop(arg_reg_1); |
__ Ret(); |
} |
@@ -6761,7 +7013,7 @@ |
__ j(not_zero, &normal_sequence); |
// look at the first argument |
- __ movq(rcx, Operand(rsp, kPointerSize)); |
+ __a movq(rcx, Operand(rsp, 1 * kPointerSize)); |
__ testq(rcx, rcx); |
__ j(zero, &normal_sequence); |
@@ -6928,7 +7180,7 @@ |
if (IsFastPackedElementsKind(kind)) { |
// We might need to create a holey array |
// look at the first argument |
- __ movq(rcx, Operand(rsp, kPointerSize)); |
+ __a movq(rcx, Operand(rsp, 1 * kPointerSize)); |
__ testq(rcx, rcx); |
__ j(zero, &normal_sequence); |
@@ -6999,7 +7251,11 @@ |
GenerateCase(masm, FAST_ELEMENTS); |
} |
- |
+#undef __n |
+#undef __s |
+#undef __q |
+#undef __a |
+#undef __k |
#undef __ |
} } // namespace v8::internal |