| Index: src/sh4/code-stubs-sh4.cc
|
| diff --git a/src/arm/code-stubs-arm.cc b/src/sh4/code-stubs-sh4.cc
|
| similarity index 81%
|
| copy from src/arm/code-stubs-arm.cc
|
| copy to src/sh4/code-stubs-sh4.cc
|
| index ceb108ffae69854ede01a5a33526815d07a54983..5468b904f6f1c353ccb92afc66a3c161accd7153 100644
|
| --- a/src/arm/code-stubs-arm.cc
|
| +++ b/src/sh4/code-stubs-sh4.cc
|
| @@ -1,4 +1,4 @@
|
| -// Copyright 2012 the V8 project authors. All rights reserved.
|
| +// Copyright 2011-2012 the V8 project authors. All rights reserved.
|
| // Redistribution and use in source and binary forms, with or without
|
| // modification, are permitted provided that the following conditions are
|
| // met:
|
| @@ -27,7 +27,7 @@
|
|
|
| #include "v8.h"
|
|
|
| -#if defined(V8_TARGET_ARCH_ARM)
|
| +#if defined(V8_TARGET_ARCH_SH4)
|
|
|
| #include "bootstrapper.h"
|
| #include "code-stubs.h"
|
| @@ -53,8 +53,22 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond);
|
| static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
|
| Register lhs,
|
| Register rhs);
|
| +static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
|
| + Register lhs,
|
| + Register rhs,
|
| + Label* both_loaded_as_doubles,
|
| + Label* not_heap_numbers,
|
| + Label* slow);
|
| +static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
|
| + Register lhs,
|
| + Register rhs,
|
| + Label* possible_strings,
|
| + Label* not_both_strings);
|
| +void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond);
|
|
|
|
|
| +// Copy from ARM
|
| +#include "map-sh4.h" // Define register map
|
| // Check if the operand is a heap number.
|
| static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
|
| Register scratch1, Register scratch2,
|
| @@ -68,8 +82,15 @@ static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
|
|
|
| void ToNumberStub::Generate(MacroAssembler* masm) {
|
| // The ToNumber stub takes one argument in eax.
|
| + // Entry argument: r0
|
| + // Exit in: r0
|
| +#ifdef DEBUG
|
| + // Clobber other parameter registers on entry.
|
| + __ Dead(r1, r2, r3);
|
| + __ Dead(r4, r5, r6, r7);
|
| +#endif
|
| Label check_heap_number, call_builtin;
|
| - __ JumpIfNotSmi(r0, &check_heap_number);
|
| + __ JumpIfNotSmi(r0, &check_heap_number, Label::kNear);
|
| __ Ret();
|
|
|
| __ bind(&check_heap_number);
|
| @@ -170,13 +191,15 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
|
| __ sub(r4, r4, Operand(
|
| Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry.
|
| __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
| - __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
|
| + __ lsl(ip, r4, Operand(kPointerSizeLog2 - kSmiTagSize));
|
| + __ add(r5, r5, ip);
|
| __ ldr(r5, MemOperand(r5));
|
| __ cmp(r2, r5);
|
| __ b(ne, &loop);
|
| // Hit: fetch the optimized code.
|
| __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
| - __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
|
| + __ lsl(r4, r4, Operand(kPointerSizeLog2 - kSmiTagSize));
|
| + __ add(r5, r5, r4);
|
| __ add(r5, r5, Operand(kPointerSize));
|
| __ ldr(r4, MemOperand(r5));
|
|
|
| @@ -392,7 +415,8 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
|
| __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
|
| __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
|
| __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
| - __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
|
| + __ lsl(ip, r0, Operand(kPointerSizeLog2 - kSmiTagSize));
|
| + __ ldr(r3, MemOperand(r3, ip));
|
| __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
|
| __ b(eq, &slow_case);
|
|
|
| @@ -470,7 +494,8 @@ void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
|
| __ ldr(r3, MemOperand(sp, 3 * kPointerSize));
|
| __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
|
| __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
| - __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
|
| + __ lsl(r1, r0, Operand(kPointerSizeLog2 - kSmiTagSize));
|
| + __ ldr(r3, MemOperand(r3, r1));
|
| __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
|
| __ b(eq, &slow_case);
|
|
|
| @@ -539,30 +564,33 @@ class ConvertToDoubleStub : public CodeStub {
|
|
|
|
|
| void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
|
| + ASSERT(!result1_.is(ip) && !result2_.is(ip) && !zeros_.is(ip));
|
| Register exponent = result1_;
|
| Register mantissa = result2_;
|
|
|
| Label not_special;
|
| // Convert from Smi to integer.
|
| - __ mov(source_, Operand(source_, ASR, kSmiTagSize));
|
| + __ asr(source_, source_, Operand(kSmiTagSize));
|
| // Move sign bit from source to destination. This works because the sign bit
|
| // in the exponent word of the double has the same position and polarity as
|
| // the 2's complement sign bit in a Smi.
|
| STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
|
| - __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
|
| + __ land(exponent, source_, Operand(HeapNumber::kSignMask));
|
| + __ tst(exponent, exponent);
|
| // Subtract from 0 if source was negative.
|
| - __ rsb(source_, source_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
|
| + __ rsb(source_, source_, Operand(0), ne);
|
|
|
| // We have -1, 0 or 1, which we treat specially. Register source_ contains
|
| // absolute value: it is either equal to 1 (special case of -1 and 1),
|
| // greater than 1 (not a special case) or less than 1 (special case of 0).
|
| - __ cmp(source_, Operand(1));
|
| - __ b(gt, ¬_special);
|
| + __ cmpgt(source_, Operand(1));
|
| + __ bt_near(¬_special);
|
|
|
| // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
|
| const uint32_t exponent_word_for_1 =
|
| HeapNumber::kExponentBias << HeapNumber::kExponentShift;
|
| - __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
|
| + __ cmpeq(source_, Operand(1));
|
| + __ orr(exponent, exponent, Operand(exponent_word_for_1), eq);
|
| // 1, 0 and -1 all have 0 for the second word.
|
| __ mov(mantissa, Operand(0, RelocInfo::NONE));
|
| __ Ret();
|
| @@ -578,19 +606,17 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
|
| int fudge = 0x400;
|
| __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
|
| __ add(mantissa, mantissa, Operand(fudge));
|
| - __ orr(exponent,
|
| - exponent,
|
| - Operand(mantissa, LSL, HeapNumber::kExponentShift));
|
| + __ lsl(ip, mantissa, Operand(HeapNumber::kExponentShift));
|
| + __ orr(exponent, exponent, ip);
|
| // Shift up the source chopping the top bit off.
|
| __ add(zeros_, zeros_, Operand(1));
|
| // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
|
| - __ mov(source_, Operand(source_, LSL, zeros_));
|
| + __ lsl(source_, source_, zeros_);
|
| // Compute lower part of fraction (last 12 bits).
|
| - __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
|
| + __ lsl(mantissa, source_, Operand(HeapNumber::kMantissaBitsInTopWord));
|
| // And the top (top 20 bits).
|
| - __ orr(exponent,
|
| - exponent,
|
| - Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
|
| + __ lsr(ip, source_, Operand(32 - HeapNumber::kMantissaBitsInTopWord));
|
| + __ orr(exponent, exponent, ip);
|
| __ Ret();
|
| }
|
|
|
| @@ -599,27 +625,24 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
|
| FloatingPointHelper::Destination destination,
|
| Register scratch1,
|
| Register scratch2) {
|
| - if (CpuFeatures::IsSupported(VFP2)) {
|
| - CpuFeatures::Scope scope(VFP2);
|
| - __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
|
| - __ vmov(d7.high(), scratch1);
|
| - __ vcvt_f64_s32(d7, d7.high());
|
| - __ mov(scratch1, Operand(r1, ASR, kSmiTagSize));
|
| - __ vmov(d6.high(), scratch1);
|
| - __ vcvt_f64_s32(d6, d6.high());
|
| + if (CpuFeatures::IsSupported(FPU)) {
|
| + __ asr(scratch1, r0, Operand(kSmiTagSize));
|
| + __ dfloat(dr2, scratch1);
|
| + __ asr(scratch1, r1, Operand(kSmiTagSize));
|
| + __ dfloat(dr0, scratch1);
|
| if (destination == kCoreRegisters) {
|
| - __ vmov(r2, r3, d7);
|
| - __ vmov(r0, r1, d6);
|
| + __ movd(r2, r3, dr2);
|
| + __ movd(r0, r1, dr0);
|
| }
|
| } else {
|
| ASSERT(destination == kCoreRegisters);
|
| // Write Smi from r0 to r3 and r2 in double format.
|
| - __ mov(scratch1, Operand(r0));
|
| + __ mov(scratch1, r0);
|
| ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
|
| __ push(lr);
|
| __ Call(stub1.GetCode());
|
| // Write Smi from r1 to r1 and r0 in double format.
|
| - __ mov(scratch1, Operand(r1));
|
| + __ mov(scratch1, r1);
|
| ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
|
| __ Call(stub2.GetCode());
|
| __ pop(lr);
|
| @@ -635,13 +658,15 @@ void FloatingPointHelper::LoadOperands(
|
| Register scratch2,
|
| Label* slow) {
|
|
|
| - // Load right operand (r0) to d6 or r2/r3.
|
| + // Load right operand (r0) to d7 or r2/r3.
|
| + ASSERT(!heap_number_map.is(r0) && !heap_number_map.is(r1) &&
|
| + !heap_number_map.is(r2) && !heap_number_map.is(r3));
|
| LoadNumber(masm, destination,
|
| - r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
|
| + r0, dr2, r2, r3, heap_number_map, scratch1, scratch2, slow);
|
|
|
| - // Load left operand (r1) to d7 or r0/r1.
|
| + // Load left operand (r1) to d6 or r0/r1.
|
| LoadNumber(masm, destination,
|
| - r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
|
| + r1, dr0, r0, r1, heap_number_map, scratch1, scratch2, slow);
|
| }
|
|
|
|
|
| @@ -655,9 +680,11 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
|
| Register scratch1,
|
| Register scratch2,
|
| Label* not_number) {
|
| - __ AssertRootValue(heap_number_map,
|
| - Heap::kHeapNumberMapRootIndex,
|
| - "HeapNumberMap register clobbered.");
|
| + if (FLAG_debug_code) {
|
| + __ AbortIfNotRootValue(heap_number_map,
|
| + Heap::kHeapNumberMapRootIndex,
|
| + "HeapNumberMap register clobbered.");
|
| + }
|
|
|
| Label is_smi, done;
|
|
|
| @@ -667,34 +694,34 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
|
| __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
|
|
|
| // Handle loading a double from a heap number.
|
| - if (CpuFeatures::IsSupported(VFP2) &&
|
| + if (CpuFeatures::IsSupported(FPU) &&
|
| destination == kVFPRegisters) {
|
| - CpuFeatures::Scope scope(VFP2);
|
| // Load the double from tagged HeapNumber to double register.
|
| - __ sub(scratch1, object, Operand(kHeapObjectTag));
|
| - __ vldr(dst, scratch1, HeapNumber::kValueOffset);
|
| + ASSERT(Operand(kHeapObjectTag - HeapNumber::kValueOffset).is_int8());
|
| + __ sub(scratch1, object, Operand(kHeapObjectTag -
|
| + HeapNumber::kValueOffset));
|
| + __ dldr(dst, MemOperand(scratch1, 0), scratch1);
|
| } else {
|
| ASSERT(destination == kCoreRegisters);
|
| // Load the double from heap number to dst1 and dst2 in double format.
|
| __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
|
| }
|
| - __ jmp(&done);
|
| + __ jmp_near(&done);
|
|
|
| // Handle loading a double from a smi.
|
| __ bind(&is_smi);
|
| - if (CpuFeatures::IsSupported(VFP2)) {
|
| - CpuFeatures::Scope scope(VFP2);
|
| - // Convert smi to double using VFP instructions.
|
| - __ vmov(dst.high(), scratch1);
|
| - __ vcvt_f64_s32(dst, dst.high());
|
| + if (CpuFeatures::IsSupported(FPU)) {
|
| + // Convert smi to double using FPU instructions.
|
| + __ SmiUntag(scratch1, object);
|
| + __ dfloat(dst, scratch1);
|
| if (destination == kCoreRegisters) {
|
| // Load the converted smi to dst1 and dst2 in double format.
|
| - __ vmov(dst1, dst2, dst);
|
| + __ movd(dst1, dst2, dst);
|
| }
|
| } else {
|
| ASSERT(destination == kCoreRegisters);
|
| // Write smi to dst1 and dst2 double format.
|
| - __ mov(scratch1, Operand(object));
|
| + __ mov(scratch1, object);
|
| ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
|
| __ push(lr);
|
| __ Call(stub.GetCode());
|
| @@ -714,9 +741,11 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
|
| Register scratch3,
|
| DwVfpRegister double_scratch,
|
| Label* not_number) {
|
| - __ AssertRootValue(heap_number_map,
|
| - Heap::kHeapNumberMapRootIndex,
|
| - "HeapNumberMap register clobbered.");
|
| + if (FLAG_debug_code) {
|
| + __ AbortIfNotRootValue(heap_number_map,
|
| + Heap::kHeapNumberMapRootIndex,
|
| + "HeapNumberMap register clobbered.");
|
| + }
|
| Label done;
|
| Label not_in_int32_range;
|
|
|
| @@ -758,12 +787,10 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
|
|
|
| Label done;
|
|
|
| - if (CpuFeatures::IsSupported(VFP2)) {
|
| - CpuFeatures::Scope scope(VFP2);
|
| - __ vmov(single_scratch, int_scratch);
|
| - __ vcvt_f64_s32(double_dst, single_scratch);
|
| + if (CpuFeatures::IsSupported(FPU)) {
|
| + __ dfloat(double_dst, int_scratch);
|
| if (destination == kCoreRegisters) {
|
| - __ vmov(dst1, dst2, double_dst);
|
| + __ movd(dst1, dst2, double_dst);
|
| }
|
| } else {
|
| Label fewer_than_20_useful_bits;
|
| @@ -772,17 +799,19 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
|
| // | s | exp | mantissa |
|
|
|
| // Check for zero.
|
| - __ cmp(int_scratch, Operand::Zero());
|
| + __ cmp(int_scratch, Operand(0));
|
| __ mov(dst2, int_scratch);
|
| __ mov(dst1, int_scratch);
|
| __ b(eq, &done);
|
|
|
| // Preload the sign of the value.
|
| - __ and_(dst2, int_scratch, Operand(HeapNumber::kSignMask), SetCC);
|
| + __ land(dst2, int_scratch, Operand(HeapNumber::kSignMask));
|
| // Get the absolute value of the object (as an unsigned integer).
|
| - __ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi);
|
| + __ cmpge(dst2, Operand(0));
|
| + __ rsb(ip, int_scratch, Operand(0));
|
| + __ mov(int_scratch, ip, f);
|
|
|
| - // Get mantissa[51:20].
|
| + // Get mantisssa[51:20].
|
|
|
| // Get the position of the first set bit.
|
| __ CountLeadingZeros(dst1, int_scratch, scratch2);
|
| @@ -795,24 +824,28 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
|
|
|
| // Clear the first non null bit.
|
| __ mov(scratch2, Operand(1));
|
| - __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst1));
|
| + __ lsl(scratch2, scratch2, dst1);
|
| + __ bic(int_scratch, int_scratch, scratch2);
|
|
|
| - __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
|
| + // Present on ARM, but dead code.
|
| + // __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
|
| // Get the number of bits to set in the lower part of the mantissa.
|
| - __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
|
| - __ b(mi, &fewer_than_20_useful_bits);
|
| + __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
|
| + __ cmpge(scratch2, Operand(0));
|
| + __ b(f, &fewer_than_20_useful_bits, Label::kNear);
|
| // Set the higher 20 bits of the mantissa.
|
| - __ orr(dst2, dst2, Operand(int_scratch, LSR, scratch2));
|
| + __ lsr(ip, int_scratch, scratch2);
|
| + __ orr(dst2, dst2, ip);
|
| __ rsb(scratch2, scratch2, Operand(32));
|
| - __ mov(dst1, Operand(int_scratch, LSL, scratch2));
|
| + __ lsl(dst1, int_scratch, scratch2);
|
| __ b(&done);
|
|
|
| __ bind(&fewer_than_20_useful_bits);
|
| __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
|
| - __ mov(scratch2, Operand(int_scratch, LSL, scratch2));
|
| + __ lsl(scratch2, int_scratch, scratch2);
|
| __ orr(dst2, dst2, scratch2);
|
| // Set dst1 to 0.
|
| - __ mov(dst1, Operand::Zero());
|
| + __ mov(dst1, Operand(0));
|
| }
|
| __ bind(&done);
|
| }
|
| @@ -822,7 +855,6 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
|
| Register object,
|
| Destination destination,
|
| DwVfpRegister double_dst,
|
| - DwVfpRegister double_scratch,
|
| Register dst1,
|
| Register dst2,
|
| Register heap_number_map,
|
| @@ -845,30 +877,30 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
|
| __ b(&done);
|
|
|
| __ bind(&obj_is_not_smi);
|
| - __ AssertRootValue(heap_number_map,
|
| - Heap::kHeapNumberMapRootIndex,
|
| - "HeapNumberMap register clobbered.");
|
| + if (FLAG_debug_code) {
|
| + __ AbortIfNotRootValue(heap_number_map,
|
| + Heap::kHeapNumberMapRootIndex,
|
| + "HeapNumberMap register clobbered.");
|
| + }
|
| __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
|
|
|
| // Load the number.
|
| - if (CpuFeatures::IsSupported(VFP2)) {
|
| - CpuFeatures::Scope scope(VFP2);
|
| + if (CpuFeatures::IsSupported(FPU)) {
|
| // Load the double value.
|
| __ sub(scratch1, object, Operand(kHeapObjectTag));
|
| - __ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
|
| + __ dldr(double_dst, MemOperand(scratch1, HeapNumber::kValueOffset));
|
|
|
| - __ EmitVFPTruncate(kRoundToZero,
|
| + __ EmitFPUTruncate(kRoundToZero,
|
| scratch1,
|
| double_dst,
|
| scratch2,
|
| - double_scratch,
|
| kCheckForInexactConversion);
|
|
|
| // Jump to not_int32 if the operation did not succeed.
|
| __ b(ne, not_int32);
|
|
|
| if (destination == kCoreRegisters) {
|
| - __ vmov(dst1, dst2, double_dst);
|
| + __ movd(dst1, dst2, double_dst);
|
| }
|
|
|
| } else {
|
| @@ -878,8 +910,8 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
|
|
|
| // Check for 0 and -0.
|
| __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask));
|
| - __ orr(scratch1, scratch1, Operand(dst2));
|
| - __ cmp(scratch1, Operand::Zero());
|
| + __ orr(scratch1, scratch1, dst2);
|
| + __ cmp(scratch1, Operand(0));
|
| __ b(eq, &done);
|
|
|
| // Check that the value can be exactly represented by a 32-bit integer.
|
| @@ -901,8 +933,7 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
|
| Register scratch1,
|
| Register scratch2,
|
| Register scratch3,
|
| - DwVfpRegister double_scratch0,
|
| - DwVfpRegister double_scratch1,
|
| + DwVfpRegister double_scratch,
|
| Label* not_int32) {
|
| ASSERT(!dst.is(object));
|
| ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
|
| @@ -914,29 +945,31 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
|
|
|
| __ UntagAndJumpIfSmi(dst, object, &done);
|
|
|
| - __ AssertRootValue(heap_number_map,
|
| - Heap::kHeapNumberMapRootIndex,
|
| - "HeapNumberMap register clobbered.");
|
| + if (FLAG_debug_code) {
|
| + __ AbortIfNotRootValue(heap_number_map,
|
| + Heap::kHeapNumberMapRootIndex,
|
| + "HeapNumberMap register clobbered.");
|
| + }
|
| __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
|
|
|
| // Object is a heap number.
|
| // Convert the floating point value to a 32-bit integer.
|
| - if (CpuFeatures::IsSupported(VFP2)) {
|
| - CpuFeatures::Scope scope(VFP2);
|
| -
|
| + if (CpuFeatures::IsSupported(FPU)) {
|
| // Load the double value.
|
| __ sub(scratch1, object, Operand(kHeapObjectTag));
|
| - __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset);
|
| + __ dldr(double_scratch, MemOperand(scratch1, HeapNumber::kValueOffset));
|
|
|
| - __ EmitVFPTruncate(kRoundToZero,
|
| - dst,
|
| - double_scratch0,
|
| + __ EmitFPUTruncate(kRoundToZero,
|
| + scratch2,
|
| + double_scratch,
|
| scratch1,
|
| - double_scratch1,
|
| kCheckForInexactConversion);
|
|
|
| // Jump to not_int32 if the operation did not succeed.
|
| __ b(ne, not_int32);
|
| + // Get the result in the destination register.
|
| + __ mov(dst, scratch2);
|
| +
|
| } else {
|
| // Load the double value in the destination registers.
|
| __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
|
| @@ -944,8 +977,8 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
|
|
|
| // Check for 0 and -0.
|
| __ bic(dst, scratch1, Operand(HeapNumber::kSignMask));
|
| - __ orr(dst, scratch2, Operand(dst));
|
| - __ cmp(dst, Operand::Zero());
|
| + __ orr(dst, scratch2, dst);
|
| + __ cmp(dst, Operand(0));
|
| __ b(eq, &done);
|
|
|
| DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
|
| @@ -955,14 +988,16 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
|
| // scratch2: 1
|
|
|
| // Shift back the higher bits of the mantissa.
|
| - __ mov(dst, Operand(dst, LSR, scratch3));
|
| + __ lsr(dst, dst, scratch3);
|
| // Set the implicit first bit.
|
| __ rsb(scratch3, scratch3, Operand(32));
|
| - __ orr(dst, dst, Operand(scratch2, LSL, scratch3));
|
| + __ lsl(scratch2, scratch2, scratch3);
|
| + __ orr(dst, dst, scratch2);
|
| // Set the sign.
|
| __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
|
| __ tst(scratch1, Operand(HeapNumber::kSignMask));
|
| - __ rsb(dst, dst, Operand::Zero(), LeaveCC, mi);
|
| + __ rsb(ip, dst, Operand(0));
|
| + __ mov(dst, ip, ne); // FIXME(stm): strange case !!
|
| }
|
|
|
| __ bind(&done);
|
| @@ -982,7 +1017,7 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
|
| HeapNumber::kExponentBits);
|
|
|
| // Substract the bias from the exponent.
|
| - __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC);
|
| + __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias));
|
|
|
| // src1: higher (exponent) part of the double value.
|
| // src2: lower (mantissa) part of the double value.
|
| @@ -990,16 +1025,18 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
|
|
|
| // Fast cases. Check for obvious non 32-bit integer values.
|
| // Negative exponent cannot yield 32-bit integers.
|
| - __ b(mi, not_int32);
|
| + __ cmpge(scratch, Operand(0));
|
| + __ b(f, not_int32);
|
| // Exponent greater than 31 cannot yield 32-bit integers.
|
| // Also, a positive value with an exponent equal to 31 is outside of the
|
| // signed 32-bit integer range.
|
| // Another way to put it is that if (exponent - signbit) > 30 then the
|
| // number cannot be represented as an int32.
|
| Register tmp = dst;
|
| - __ sub(tmp, scratch, Operand(src1, LSR, 31));
|
| - __ cmp(tmp, Operand(30));
|
| - __ b(gt, not_int32);
|
| + __ lsr(tmp, src1, Operand(31));
|
| + __ sub(tmp, scratch, tmp);
|
| + __ cmpgt(tmp, Operand(30));
|
| + __ b(t, not_int32);
|
| // - Bits [21:0] in the mantissa are not null.
|
| __ tst(src2, Operand(0x3fffff));
|
| __ b(ne, not_int32);
|
| @@ -1008,21 +1045,22 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
|
| // non zero bits left. So we need the (30 - exponent) last bits of the
|
| // 31 higher bits of the mantissa to be null.
|
| // Because bits [21:0] are null, we can check instead that the
|
| - // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
|
| + // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
|
|
|
| // Get the 32 higher bits of the mantissa in dst.
|
| __ Ubfx(dst,
|
| src2,
|
| HeapNumber::kMantissaBitsInTopWord,
|
| 32 - HeapNumber::kMantissaBitsInTopWord);
|
| + __ lsl(ip, src1, Operand(HeapNumber::kNonMantissaBitsInTopWord));
|
| __ orr(dst,
|
| dst,
|
| - Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord));
|
| + ip);
|
|
|
| // Create the mask and test the lower bits (of the higher bits).
|
| __ rsb(scratch, scratch, Operand(32));
|
| __ mov(src2, Operand(1));
|
| - __ mov(src1, Operand(src2, LSL, scratch));
|
| + __ lsl(src1, src2, scratch);
|
| __ sub(src1, src1, Operand(1));
|
| __ tst(dst, src1);
|
| __ b(ne, not_int32);
|
| @@ -1042,35 +1080,34 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
|
|
|
| // Assert that heap_number_result is callee-saved.
|
| // We currently always use r5 to pass it.
|
| + // Note: as r5 is not callee-saved on SH4, we push/pop it below
|
| ASSERT(heap_number_result.is(r5));
|
|
|
| + // Calling C function (using double registers): move r0..r3 to fr4..fr7
|
| + __ movd(dr4, r0, r1);
|
| + __ movd(dr6, r2, r3);
|
| +
|
| // Push the current return address before the C call. Return will be
|
| // through pop(pc) below.
|
| __ push(lr);
|
| - __ PrepareCallCFunction(0, 2, scratch);
|
| - if (masm->use_eabi_hardfloat()) {
|
| - CpuFeatures::Scope scope(VFP2);
|
| - __ vmov(d0, r0, r1);
|
| - __ vmov(d1, r2, r3);
|
| - }
|
| + __ push(heap_number_result); // sh4 specific
|
| + /* Use r0 as scratch: PrepareCallCFunction() disallow use of r4-r7 on sh4. */
|
| + __ PrepareCallCFunction(0, 2, r0);
|
| {
|
| AllowExternalCallThatCantCauseGC scope(masm);
|
| __ CallCFunction(
|
| ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
|
| }
|
| + __ movd(r0, r1, dr0);
|
| // Store answer in the overwritable heap number. Double returned in
|
| // registers r0 and r1 or in d0.
|
| - if (masm->use_eabi_hardfloat()) {
|
| - CpuFeatures::Scope scope(VFP2);
|
| - __ vstr(d0,
|
| - FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
|
| - } else {
|
| - __ Strd(r0, r1, FieldMemOperand(heap_number_result,
|
| - HeapNumber::kValueOffset));
|
| - }
|
| + __ pop(heap_number_result); // sh4 specific
|
| + __ Strd(r0, r1, FieldMemOperand(heap_number_result,
|
| + HeapNumber::kValueOffset));
|
| // Place heap_number_result in r0 and return to the pushed return address.
|
| - __ mov(r0, Operand(heap_number_result));
|
| - __ pop(pc);
|
| + __ mov(r0, heap_number_result);
|
| + __ pop(lr);
|
| + __ rts();
|
| }
|
|
|
|
|
| @@ -1099,32 +1136,38 @@ void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
|
|
|
| // See comment for class.
|
| void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
|
| + ASSERT(!scratch_.is(ip) && !the_int_.is(ip));
|
| Label max_negative_int;
|
| // the_int_ has the answer which is a signed int32 but not a Smi.
|
| // We test for the special value that has a different exponent. This test
|
| // has the neat side effect of setting the flags according to the sign.
|
| STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
|
| __ cmp(the_int_, Operand(0x80000000u));
|
| - __ b(eq, &max_negative_int);
|
| + __ b(eq, &max_negative_int, Label::kNear);
|
| // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
|
| // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
|
| uint32_t non_smi_exponent =
|
| (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
|
| __ mov(scratch_, Operand(non_smi_exponent));
|
| + __ cmpge(the_int_, Operand(0));
|
| + Label skip;
|
| + __ bt_near(&skip);
|
| // Set the sign bit in scratch_ if the value was negative.
|
| - __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
|
| + __ lor(scratch_, scratch_, Operand(HeapNumber::kSignMask));
|
| // Subtract from 0 if the value was negative.
|
| - __ rsb(the_int_, the_int_, Operand(0, RelocInfo::NONE), LeaveCC, cs);
|
| + __ rsb(the_int_, the_int_, Operand(0));
|
| + __ bind(&skip);
|
| // We should be masking the implict first digit of the mantissa away here,
|
| // but it just ends up combining harmlessly with the last digit of the
|
| // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
|
| // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
|
| ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
|
| const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
|
| - __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
|
| + __ lsr(ip, the_int_, Operand(shift_distance));
|
| + __ lor(scratch_, scratch_, ip);
|
| __ str(scratch_, FieldMemOperand(the_heap_number_,
|
| HeapNumber::kExponentOffset));
|
| - __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
|
| + __ lsl(scratch_, the_int_, Operand(32 - shift_distance));
|
| __ str(scratch_, FieldMemOperand(the_heap_number_,
|
| HeapNumber::kMantissaOffset));
|
| __ Ret();
|
| @@ -1163,24 +1206,24 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
|
| // They are both equal and they are not both Smis so both of them are not
|
| // Smis. If it's not a heap number, then return equal.
|
| if (cond == lt || cond == gt) {
|
| - __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
|
| - __ b(ge, slow);
|
| + __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE, ge);
|
| + __ bt(slow);
|
| } else {
|
| - __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
|
| - __ b(eq, &heap_number);
|
| + __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE, eq);
|
| + __ b(eq, &heap_number, Label::kNear);
|
| // Comparing JS objects with <=, >= is complicated.
|
| if (cond != eq) {
|
| - __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
|
| - __ b(ge, slow);
|
| + __ cmpge(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
|
| + __ bt(slow);
|
| // Normally here we fall through to return_equal, but undefined is
|
| // special: (undefined == undefined) == true, but
|
| // (undefined <= undefined) == false! See ECMAScript 11.8.5.
|
| if (cond == le || cond == ge) {
|
| __ cmp(r4, Operand(ODDBALL_TYPE));
|
| - __ b(ne, &return_equal);
|
| + __ b(ne, &return_equal, Label::kNear);
|
| __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
|
| __ cmp(r0, r2);
|
| - __ b(ne, &return_equal);
|
| + __ b(ne, &return_equal, Label::kNear);
|
| if (cond == le) {
|
| // undefined <= undefined should fail.
|
| __ mov(r0, Operand(GREATER));
|
| @@ -1224,10 +1267,11 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
|
| __ b(ne, &return_equal);
|
|
|
| // Shift out flag and all exponent bits, retaining only mantissa.
|
| - __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
|
| + __ lsl(r2, r2, Operand(HeapNumber::kNonMantissaBitsInTopWord));
|
| // Or with all low-bits of mantissa.
|
| __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
|
| - __ orr(r0, r3, Operand(r2), SetCC);
|
| + __ orr(r0, r3, r2);
|
| + __ tst(r0, r0);
|
| // For equal we already have the right value in r0: Return zero (equal)
|
| // if all bits in mantissa are zero (it's an Infinity) and non-zero if
|
| // not (it's a NaN). For <= and >= we need to load r0 with the failing
|
| @@ -1261,16 +1305,16 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
|
| (lhs.is(r1) && rhs.is(r0)));
|
|
|
| Label rhs_is_smi;
|
| - __ JumpIfSmi(rhs, &rhs_is_smi);
|
| + __ JumpIfSmi(rhs, &rhs_is_smi, Label::kNear);
|
|
|
| // Lhs is a Smi. Check whether the rhs is a heap number.
|
| - __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
|
| + __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE, eq);
|
| if (strict) {
|
| // If rhs is not a number and lhs is a Smi then strict equality cannot
|
| // succeed. Return non-equal
|
| // If rhs is r0 then there is already a non zero value in it.
|
| if (!rhs.is(r0)) {
|
| - __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
|
| + __ mov(r0, Operand(NOT_EQUAL), ne);
|
| }
|
| __ Ret(ne);
|
| } else {
|
| @@ -1280,17 +1324,16 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
|
| }
|
|
|
| // Lhs is a smi, rhs is a number.
|
| - if (CpuFeatures::IsSupported(VFP2)) {
|
| - // Convert lhs to a double in d7.
|
| - CpuFeatures::Scope scope(VFP2);
|
| - __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
|
| - // Load the double from rhs, tagged HeapNumber r0, to d6.
|
| - __ sub(r7, rhs, Operand(kHeapObjectTag));
|
| - __ vldr(d6, r7, HeapNumber::kValueOffset);
|
| + if (CpuFeatures::IsSupported(FPU)) {
|
| + // Convert lhs to a double in dr2.
|
| + __ SmiToDoubleFPURegister(lhs, dr2, r7);
|
| + // Load the double from rhs, tagged HeapNumber r0, to dr0.
|
| + __ sub(r7, rhs, Operand(kHeapObjectTag - HeapNumber::kValueOffset));
|
| + __ dldr(dr0, MemOperand(r7, 0), r7);
|
| } else {
|
| __ push(lr);
|
| // Convert lhs to a double in r2, r3.
|
| - __ mov(r7, Operand(lhs));
|
| + __ mov(r7, lhs);
|
| ConvertToDoubleStub stub1(r3, r2, r7, r6);
|
| __ Call(stub1.GetCode());
|
| // Load rhs to a double in r0, r1.
|
| @@ -1304,13 +1347,13 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
|
|
|
| __ bind(&rhs_is_smi);
|
| // Rhs is a smi. Check whether the non-smi lhs is a heap number.
|
| - __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
|
| + __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE, eq);
|
| if (strict) {
|
| // If lhs is not a number and rhs is a smi then strict equality cannot
|
| // succeed. Return non-equal.
|
| // If lhs is r0 then there is already a non zero value in it.
|
| if (!lhs.is(r0)) {
|
| - __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
|
| + __ mov(r0, Operand(NOT_EQUAL), ne);
|
| }
|
| __ Ret(ne);
|
| } else {
|
| @@ -1320,19 +1363,18 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
|
| }
|
|
|
| // Rhs is a smi, lhs is a heap number.
|
| - if (CpuFeatures::IsSupported(VFP2)) {
|
| - CpuFeatures::Scope scope(VFP2);
|
| - // Load the double from lhs, tagged HeapNumber r1, to d7.
|
| - __ sub(r7, lhs, Operand(kHeapObjectTag));
|
| - __ vldr(d7, r7, HeapNumber::kValueOffset);
|
| - // Convert rhs to a double in d6 .
|
| - __ SmiToDoubleVFPRegister(rhs, d6, r7, s13);
|
| + if (CpuFeatures::IsSupported(FPU)) {
|
| + // Load the double from lhs, tagged HeapNumber r1, to dr2.
|
| + __ sub(r7, lhs, Operand(kHeapObjectTag - HeapNumber::kValueOffset));
|
| + __ dldr(dr2, MemOperand(r7, 0), r7);
|
| + // Convert rhs to a double in dr0.
|
| + __ SmiToDoubleFPURegister(rhs, dr0, r7);
|
| } else {
|
| __ push(lr);
|
| // Load lhs to a double in r2, r3.
|
| __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
|
| // Convert rhs to a double in r0, r1.
|
| - __ mov(r7, Operand(rhs));
|
| + __ mov(r7, rhs);
|
| ConvertToDoubleStub stub2(r1, r0, r7, r6);
|
| __ Call(stub2.GetCode());
|
| __ pop(lr);
|
| @@ -1356,12 +1398,11 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
|
| // NaNs have all-one exponents so they sign extend to -1.
|
| __ cmp(r4, Operand(-1));
|
| __ b(ne, lhs_not_nan);
|
| - __ mov(r4,
|
| - Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
|
| - SetCC);
|
| - __ b(ne, &one_is_nan);
|
| + __ lsl(r4, lhs_exponent, Operand(HeapNumber::kNonMantissaBitsInTopWord));
|
| + __ cmpeq(r4, Operand(0));
|
| + __ b(ne, &one_is_nan, Label::kNear);
|
| __ cmp(lhs_mantissa, Operand(0, RelocInfo::NONE));
|
| - __ b(ne, &one_is_nan);
|
| + __ b(ne, &one_is_nan, Label::kNear);
|
|
|
| __ bind(lhs_not_nan);
|
| __ Sbfx(r4,
|
| @@ -1370,13 +1411,12 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
|
| HeapNumber::kExponentBits);
|
| // NaNs have all-one exponents so they sign extend to -1.
|
| __ cmp(r4, Operand(-1));
|
| - __ b(ne, &neither_is_nan);
|
| - __ mov(r4,
|
| - Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
|
| - SetCC);
|
| - __ b(ne, &one_is_nan);
|
| - __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE));
|
| - __ b(eq, &neither_is_nan);
|
| + __ b(ne, &neither_is_nan, Label::kNear);
|
| + __ lsl(r4, rhs_exponent, Operand(HeapNumber::kNonMantissaBitsInTopWord));
|
| + __ cmpeq(r4, Operand(0, RelocInfo::NONE));
|
| + __ b(ne, &one_is_nan, Label::kNear);
|
| + __ cmp(rhs_mantissa, Operand(0));
|
| + __ b(eq, &neither_is_nan, Label::kNear);
|
|
|
| __ bind(&one_is_nan);
|
| // NaN comparisons always fail.
|
| @@ -1405,12 +1445,13 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
|
| if (cond == eq) {
|
| // Doubles are not equal unless they have the same bit pattern.
|
| // Exception: 0 and -0.
|
| - __ cmp(rhs_mantissa, Operand(lhs_mantissa));
|
| - __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
|
| + __ cmp(rhs_mantissa, lhs_mantissa);
|
| + __ orr(r0, rhs_mantissa, lhs_mantissa, ne);
|
| // Return non-zero if the numbers are unequal.
|
| __ Ret(ne);
|
|
|
| - __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
|
| + __ sub(r0, rhs_exponent, lhs_exponent);
|
| + __ tst(r0, r0);
|
| // If exponents are equal then return 0.
|
| __ Ret(eq);
|
|
|
| @@ -1420,28 +1461,30 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
|
| // We start by seeing if the mantissas (that are equal) or the bottom
|
| // 31 bits of the rhs exponent are non-zero. If so we return not
|
| // equal.
|
| - __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
|
| - __ mov(r0, Operand(r4), LeaveCC, ne);
|
| + __ lsl(r4, lhs_exponent, Operand(kSmiTagSize));
|
| + __ orr(r4, lhs_mantissa, r4);
|
| + __ tst(r4, r4);
|
| + __ mov(r0, r4, ne);
|
| __ Ret(ne);
|
| // Now they are equal if and only if the lhs exponent is zero in its
|
| // low 31 bits.
|
| - __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
|
| + __ lsl(r0, rhs_exponent, Operand(kSmiTagSize));
|
| __ Ret();
|
| } else {
|
| + __ Push(r4, r5, r6, r7);
|
| + // Calling C function: move r0..r3 to fr4..fr7
|
| + __ movd(dr4, r0, r1);
|
| + __ movd(dr6, r2, r3);
|
| +
|
| // Call a native function to do a comparison between two non-NaNs.
|
| // Call C routine that may not cause GC or other trouble.
|
| __ push(lr);
|
| - __ PrepareCallCFunction(0, 2, r5);
|
| - if (masm->use_eabi_hardfloat()) {
|
| - CpuFeatures::Scope scope(VFP2);
|
| - __ vmov(d0, r0, r1);
|
| - __ vmov(d1, r2, r3);
|
| - }
|
| -
|
| - AllowExternalCallThatCantCauseGC scope(masm);
|
| + __ PrepareCallCFunction(0, 2, r0);
|
| __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
|
| 0, 2);
|
| - __ pop(pc); // Return.
|
| + __ pop(lr);
|
| + __ Pop(r4, r5, r6, r7);
|
| + __ Ret();
|
| }
|
| }
|
|
|
| @@ -1460,8 +1503,8 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
|
| Label first_non_object;
|
| // Get the type of the first operand into r2 and compare it with
|
| // FIRST_SPEC_OBJECT_TYPE.
|
| - __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE);
|
| - __ b(lt, &first_non_object);
|
| + __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE, ge);
|
| + __ bf_near(&first_non_object);
|
|
|
| // Return non-zero (r0 is not zero)
|
| Label return_not_equal;
|
| @@ -1473,8 +1516,8 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
|
| __ cmp(r2, Operand(ODDBALL_TYPE));
|
| __ b(eq, &return_not_equal);
|
|
|
| - __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE);
|
| - __ b(ge, &return_not_equal);
|
| + __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE, ge);
|
| + __ bt(&return_not_equal);
|
|
|
| // Check for oddballs: true, false, null, undefined.
|
| __ cmp(r3, Operand(ODDBALL_TYPE));
|
| @@ -1484,7 +1527,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
|
| // Ensure that no non-strings have the symbol bit set.
|
| STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
|
| STATIC_ASSERT(kSymbolTag != 0);
|
| - __ and_(r2, r2, Operand(r3));
|
| + __ land(r2, r2, r3);
|
| __ tst(r2, Operand(kIsSymbolMask));
|
| __ b(ne, &return_not_equal);
|
| }
|
| @@ -1500,7 +1543,7 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
|
| ASSERT((lhs.is(r0) && rhs.is(r1)) ||
|
| (lhs.is(r1) && rhs.is(r0)));
|
|
|
| - __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
|
| + __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE, eq);
|
| __ b(ne, not_heap_numbers);
|
| __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
|
| __ cmp(r2, r3);
|
| @@ -1508,12 +1551,11 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
|
|
|
| // Both are heap numbers. Load them up then jump to the code we have
|
| // for that.
|
| - if (CpuFeatures::IsSupported(VFP2)) {
|
| - CpuFeatures::Scope scope(VFP2);
|
| - __ sub(r7, rhs, Operand(kHeapObjectTag));
|
| - __ vldr(d6, r7, HeapNumber::kValueOffset);
|
| - __ sub(r7, lhs, Operand(kHeapObjectTag));
|
| - __ vldr(d7, r7, HeapNumber::kValueOffset);
|
| + if (CpuFeatures::IsSupported(FPU)) {
|
| + __ sub(r7, rhs, Operand(kHeapObjectTag - HeapNumber::kValueOffset));
|
| + __ dldr(dr0, MemOperand(r7, 0), r7);
|
| + __ sub(r7, lhs, Operand(kHeapObjectTag - HeapNumber::kValueOffset));
|
| + __ dldr(dr2, MemOperand(r7, 0), r7);
|
| } else {
|
| __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
|
| __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
|
| @@ -1536,11 +1578,11 @@ static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
|
| Label object_test;
|
| STATIC_ASSERT(kSymbolTag != 0);
|
| __ tst(r2, Operand(kIsNotStringMask));
|
| - __ b(ne, &object_test);
|
| + __ b(ne, &object_test, Label::kNear);
|
| __ tst(r2, Operand(kIsSymbolMask));
|
| __ b(eq, possible_strings);
|
| - __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
|
| - __ b(ge, not_both_strings);
|
| + __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE, ge);
|
| + __ bt(not_both_strings);
|
| __ tst(r3, Operand(kIsSymbolMask));
|
| __ b(eq, possible_strings);
|
|
|
| @@ -1550,18 +1592,18 @@ static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
|
| __ Ret();
|
|
|
| __ bind(&object_test);
|
| - __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE));
|
| - __ b(lt, not_both_strings);
|
| - __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE);
|
| - __ b(lt, not_both_strings);
|
| + __ cmpge(r2, Operand(FIRST_SPEC_OBJECT_TYPE));
|
| + __ bf(not_both_strings);
|
| + __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE, ge);
|
| + __ bf(not_both_strings);
|
| // If both objects are undetectable, they are equal. Otherwise, they
|
| // are not equal, since they are different objects and an object is not
|
| // equal to undefined.
|
| __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
|
| __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
|
| __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
|
| - __ and_(r0, r2, Operand(r3));
|
| - __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
|
| + __ land(r0, r2, r3);
|
| + __ land(r0, r0, Operand(1 << Map::kIsUndetectable));
|
| __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
|
| __ Ret();
|
| }
|
| @@ -1586,7 +1628,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
|
| // contains two elements (number and string) for each cache entry.
|
| __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
|
| // Divide length by two (length is a smi).
|
| - __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
|
| + __ asr(mask, mask, Operand(kSmiTagSize + 1));
|
| __ sub(mask, mask, Operand(1)); // Make mask.
|
|
|
| // Calculate the entry in the number string cache. The hash value in the
|
| @@ -1597,9 +1639,9 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
|
| Label is_smi;
|
| Label load_result_from_cache;
|
| if (!object_is_smi) {
|
| - __ JumpIfSmi(object, &is_smi);
|
| - if (CpuFeatures::IsSupported(VFP2)) {
|
| - CpuFeatures::Scope scope(VFP2);
|
| + __ JumpIfSmi(object, &is_smi, Label::kNear);
|
| +
|
| + if (CpuFeatures::IsSupported(FPU)) {
|
| __ CheckMap(object,
|
| scratch1,
|
| Heap::kHeapNumberMapRootIndex,
|
| @@ -1610,25 +1652,27 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
|
| __ add(scratch1,
|
| object,
|
| Operand(HeapNumber::kValueOffset - kHeapObjectTag));
|
| - __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
|
| - __ eor(scratch1, scratch1, Operand(scratch2));
|
| - __ and_(scratch1, scratch1, Operand(mask));
|
| + __ ldr(scratch2, MemOperand(scratch1, 4));
|
| + __ ldr(scratch1, MemOperand(scratch1, 0));
|
| +
|
| + __ eor(scratch1, scratch1, scratch2);
|
| + __ land(scratch1, scratch1, mask);
|
|
|
| // Calculate address of entry in string cache: each entry consists
|
| // of two pointer sized fields.
|
| - __ add(scratch1,
|
| - number_string_cache,
|
| - Operand(scratch1, LSL, kPointerSizeLog2 + 1));
|
| + __ lsl(scratch1, scratch1, Operand(kPointerSizeLog2 + 1));
|
| + __ add(scratch1, number_string_cache, scratch1);
|
|
|
| Register probe = mask;
|
| __ ldr(probe,
|
| FieldMemOperand(scratch1, FixedArray::kHeaderSize));
|
| __ JumpIfSmi(probe, not_found);
|
| - __ sub(scratch2, object, Operand(kHeapObjectTag));
|
| - __ vldr(d0, scratch2, HeapNumber::kValueOffset);
|
| + __ sub(scratch2, object, Operand(kHeapObjectTag -
|
| + HeapNumber::kValueOffset));
|
| + __ dldr(dr0, MemOperand(scratch2, 0), scratch2);
|
| __ sub(probe, probe, Operand(kHeapObjectTag));
|
| - __ vldr(d1, probe, HeapNumber::kValueOffset);
|
| - __ VFPCompareAndSetFlags(d0, d1);
|
| + __ dldr(dr2, MemOperand(probe, HeapNumber::kValueOffset));
|
| + __ dcmpeq(dr0, dr2);
|
| __ b(ne, not_found); // The cache did not contain this value.
|
| __ b(&load_result_from_cache);
|
| } else {
|
| @@ -1638,12 +1682,12 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
|
|
|
| __ bind(&is_smi);
|
| Register scratch = scratch1;
|
| - __ and_(scratch, mask, Operand(object, ASR, 1));
|
| + __ asr(scratch, object, Operand(1));
|
| + __ land(scratch, mask, scratch);
|
| // Calculate address of entry in string cache: each entry consists
|
| // of two pointer sized fields.
|
| - __ add(scratch,
|
| - number_string_cache,
|
| - Operand(scratch, LSL, kPointerSizeLog2 + 1));
|
| + __ lsl(scratch, scratch, Operand(kPointerSizeLog2 + 1));
|
| + __ add(scratch, number_string_cache, scratch);
|
|
|
| // Check if the entry is the smi we are looking for.
|
| Register probe = mask;
|
| @@ -1663,6 +1707,9 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
|
|
|
|
|
| void NumberToStringStub::Generate(MacroAssembler* masm) {
|
| + // Entry argument: on stack
|
| + // Exit in: r0
|
| +
|
| Label runtime;
|
|
|
| __ ldr(r1, MemOperand(sp, 0));
|
| @@ -1691,9 +1738,10 @@ void CompareStub::Generate(MacroAssembler* masm) {
|
| if (include_smi_compare_) {
|
| Label not_two_smis, smi_done;
|
| __ orr(r2, r1, r0);
|
| - __ JumpIfNotSmi(r2, ¬_two_smis);
|
| - __ mov(r1, Operand(r1, ASR, 1));
|
| - __ sub(r0, r1, Operand(r0, ASR, 1));
|
| + __ JumpIfNotSmi(r2, ¬_two_smis, Label::kNear);
|
| + __ asr(r1, r1, Operand(1));
|
| + __ asr(r0, r0, Operand(1));
|
| + __ sub(r0, r1, r0);
|
| __ Ret();
|
| __ bind(¬_two_smis);
|
| } else if (FLAG_debug_code) {
|
| @@ -1713,7 +1761,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
|
| // be strictly equal if the other is a HeapNumber.
|
| STATIC_ASSERT(kSmiTag == 0);
|
| ASSERT_EQ(0, Smi::FromInt(0));
|
| - __ and_(r2, lhs_, Operand(rhs_));
|
| + __ land(r2, lhs_, rhs_);
|
| __ JumpIfNotSmi(r2, ¬_smis);
|
| // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
|
| // 1) Return the answer.
|
| @@ -1721,32 +1769,46 @@ void CompareStub::Generate(MacroAssembler* masm) {
|
| // 3) Fall through to both_loaded_as_doubles.
|
| // 4) Jump to lhs_not_nan.
|
| // In cases 3 and 4 we have found out we were dealing with a number-number
|
| - // comparison. If VFP3 is supported the double values of the numbers have
|
| - // been loaded into d7 and d6. Otherwise, the double values have been loaded
|
| - // into r0, r1, r2, and r3.
|
| + // comparison. If FPU is supported the double values of the numbers have
|
| + // been loaded into dr2 and dr0. Otherwise, the double values have been
|
| + // loaded into r0, r1, r2, and r3.
|
| EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_);
|
|
|
| __ bind(&both_loaded_as_doubles);
|
| - // The arguments have been converted to doubles and stored in d6 and d7, if
|
| - // VFP3 is supported, or in r0, r1, r2, and r3.
|
| + // The arguments have been converted to doubles and stored in dr0 and dr2, if
|
| + // FPU is supported, or in r0, r1, r2, and r3.
|
| Isolate* isolate = masm->isolate();
|
| - if (CpuFeatures::IsSupported(VFP2)) {
|
| + if (CpuFeatures::IsSupported(FPU)) {
|
| __ bind(&lhs_not_nan);
|
| - CpuFeatures::Scope scope(VFP2);
|
| - Label no_nan;
|
| - // ARMv7 VFP3 instructions to implement double precision comparison.
|
| - __ VFPCompareAndSetFlags(d7, d6);
|
| +
|
| + // Test for NaN
|
| Label nan;
|
| - __ b(vs, &nan);
|
| - __ mov(r0, Operand(EQUAL), LeaveCC, eq);
|
| - __ mov(r0, Operand(LESS), LeaveCC, lt);
|
| - __ mov(r0, Operand(GREATER), LeaveCC, gt);
|
| - __ Ret();
|
| + __ dcmpeq(dr0, dr0);
|
| + __ bf_near(&nan);
|
| + __ dcmpeq(dr2, dr2);
|
| + __ bf_near(&nan);
|
| +
|
| + // Test for eq, lt and gt
|
| + Label equal, greater;
|
| + __ dcmpeq(dr2, dr0);
|
| + __ bt_near(&equal);
|
| + __ dcmpgt(dr2, dr0);
|
| + __ bt_near(&greater);
|
| +
|
| + __ mov(r0, Operand(LESS));
|
| + __ rts();
|
| +
|
| + __ bind(&equal);
|
| + __ mov(r0, Operand(EQUAL));
|
| + __ rts();
|
| +
|
| + __ bind(&greater);
|
| + __ mov(r0, Operand(GREATER));
|
| + __ rts();
|
|
|
| __ bind(&nan);
|
| - // If one of the sides was a NaN then the v flag is set. Load r0 with
|
| - // whatever it takes to make the comparison fail, since comparisons with NaN
|
| - // always fail.
|
| + // One of the sides was a NaN .Load r0 with whatever it takes to make the
|
| + // comparison fail, since comparisons with NaN always fail.
|
| if (cc_ == lt || cc_ == le) {
|
| __ mov(r0, Operand(GREATER));
|
| } else {
|
| @@ -1881,80 +1943,51 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
|
| __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
|
| __ tst(ip, Operand(1 << Map::kIsUndetectable));
|
| // Undetectable -> false.
|
| - __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
|
| - __ Ret(ne);
|
| + Label skip;
|
| + __ bt_near(&skip);
|
| + __ mov(tos_, Operand(0, RelocInfo::NONE));
|
| + __ rts();
|
| + __ bind(&skip);
|
| }
|
| }
|
|
|
| if (types_.Contains(SPEC_OBJECT)) {
|
| // Spec object -> true.
|
| - __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
|
| + __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE, ge);
|
| // tos_ contains the correct non-zero return value already.
|
| - __ Ret(ge);
|
| + __ Ret(eq);
|
| }
|
|
|
| if (types_.Contains(STRING)) {
|
| // String value -> false iff empty.
|
| - __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
|
| - __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset), lt);
|
| - __ Ret(lt); // the string length is OK as the return value
|
| + __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE, ge);
|
| + Label skip;
|
| + __ bt_near(&skip);
|
| + __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset));
|
| + __ rts(); // the string length is OK as the return value
|
| + __ bind(&skip);
|
| }
|
|
|
| if (types_.Contains(HEAP_NUMBER)) {
|
| // Heap number -> false iff +0, -0, or NaN.
|
| Label not_heap_number;
|
| __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
|
| - __ b(ne, ¬_heap_number);
|
| -
|
| - if (CpuFeatures::IsSupported(VFP2)) {
|
| - CpuFeatures::Scope scope(VFP2);
|
| -
|
| - __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
|
| - __ VFPCompareAndSetFlags(d1, 0.0);
|
| - // "tos_" is a register, and contains a non zero value by default.
|
| - // Hence we only need to overwrite "tos_" with zero to return false for
|
| - // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
|
| - __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO
|
| - __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN
|
| - } else {
|
| - Label done, not_nan, not_zero;
|
| - __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
|
| - // -0 maps to false:
|
| - __ bic(
|
| - temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE), SetCC);
|
| - __ b(ne, ¬_zero);
|
| - // If exponent word is zero then the answer depends on the mantissa word.
|
| - __ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
|
| - __ jmp(&done);
|
| -
|
| - // Check for NaN.
|
| - __ bind(¬_zero);
|
| - // We already zeroed the sign bit, now shift out the mantissa so we only
|
| - // have the exponent left.
|
| - __ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord));
|
| - unsigned int shifted_exponent_mask =
|
| - HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord;
|
| - __ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE));
|
| - __ b(ne, ¬_nan); // If exponent is not 0x7ff then it can't be a NaN.
|
| -
|
| - // Reload exponent word.
|
| - __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
|
| - __ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE));
|
| - // If mantissa is not zero then we have a NaN, so return 0.
|
| - __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
|
| - __ b(ne, &done);
|
| -
|
| - // Load mantissa word.
|
| - __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
|
| - __ cmp(temp, Operand(0, RelocInfo::NONE));
|
| - // If mantissa is not zero then we have a NaN, so return 0.
|
| - __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
|
| - __ b(ne, &done);
|
| -
|
| - __ bind(¬_nan);
|
| - __ mov(tos_, Operand(1, RelocInfo::NONE));
|
| - __ bind(&done);
|
| - }
|
| + __ bf(¬_heap_number);
|
| +
|
| + if (CpuFeatures::IsSupported(FPU)) {
|
| + __ dldr(dr0, FieldMemOperand(tos_, HeapNumber::kValueOffset));
|
| + // "tos_" is a register, and contains a non zero value by default.
|
| + // Hence we only need to overwrite "tos_" with zero to return false for
|
| + // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
|
| + __ dfloat(dr2, Operand(0));
|
| + __ dcmpeq(dr0, dr2);
|
| + __ mov(tos_, Operand(0, RelocInfo::NONE), eq); // for FP_ZERO
|
| + __ dcmpeq(dr0, dr0);
|
| + // for FP_NAN (dr0 != dr0 iff isnan(dr0))
|
| + __ mov(tos_, Operand(0, RelocInfo::NONE), ne);
|
| + } else {
|
| + UNIMPLEMENTED();
|
| + }
|
| __ Ret();
|
| __ bind(¬_heap_number);
|
| }
|
| @@ -1975,7 +2008,7 @@ void ToBooleanStub::CheckOddball(MacroAssembler* masm,
|
| // The value of a root is never NULL, so we can avoid loading a non-null
|
| // value into tos_ when we want to return 'true'.
|
| if (!result) {
|
| - __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq);
|
| + __ mov(tos_, Operand(0, RelocInfo::NONE), eq);
|
| }
|
| __ Ret(eq);
|
| }
|
| @@ -1984,7 +2017,7 @@ void ToBooleanStub::CheckOddball(MacroAssembler* masm,
|
|
|
| void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
|
| if (!tos_.is(r3)) {
|
| - __ mov(r3, Operand(tos_));
|
| + __ mov(r3, tos_);
|
| }
|
| __ mov(r2, Operand(Smi::FromInt(tos_.code())));
|
| __ mov(r1, Operand(Smi::FromInt(types_.ToByte())));
|
| @@ -2002,14 +2035,10 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
|
| // We don't allow a GC during a store buffer overflow so there is no need to
|
| // store the registers in any particular way, but we do have to store and
|
| // restore them.
|
| - __ stm(db_w, sp, kCallerSaved | lr.bit());
|
| + __ pushm(kJSCallerSaved);
|
| + __ push(pr);
|
| if (save_doubles_ == kSaveFPRegs) {
|
| - CpuFeatures::Scope scope(VFP2);
|
| - __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
|
| - for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
|
| - DwVfpRegister reg = DwVfpRegister::from_code(i);
|
| - __ vstr(reg, MemOperand(sp, i * kDoubleSize));
|
| - }
|
| + UNIMPLEMENTED();
|
| }
|
| const int argument_count = 1;
|
| const int fp_argument_count = 0;
|
| @@ -2022,14 +2051,11 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
|
| ExternalReference::store_buffer_overflow_function(masm->isolate()),
|
| argument_count);
|
| if (save_doubles_ == kSaveFPRegs) {
|
| - CpuFeatures::Scope scope(VFP2);
|
| - for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
|
| - DwVfpRegister reg = DwVfpRegister::from_code(i);
|
| - __ vldr(reg, MemOperand(sp, i * kDoubleSize));
|
| - }
|
| - __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
|
| + UNIMPLEMENTED();
|
| }
|
| - __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
|
| + __ pop(pr);
|
| + __ popm(kJSCallerSaved);
|
| + __ rts();
|
| }
|
|
|
|
|
| @@ -2067,7 +2093,7 @@ void UnaryOpStub::Generate(MacroAssembler* masm) {
|
|
|
|
|
| void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
|
| - __ mov(r3, Operand(r0)); // the operand
|
| + __ mov(r3, r0); // the operand
|
| __ mov(r2, Operand(Smi::FromInt(op_)));
|
| __ mov(r1, Operand(Smi::FromInt(mode_)));
|
| __ mov(r0, Operand(Smi::FromInt(operand_type_)));
|
| @@ -2116,7 +2142,8 @@ void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
|
| __ JumpIfNotSmi(r0, non_smi);
|
|
|
| // The result of negating zero or the smallest negative smi is not a smi.
|
| - __ bic(ip, r0, Operand(0x80000000), SetCC);
|
| + __ bic(ip, r0, Operand(0x80000000));
|
| + __ tst(ip, ip);
|
| __ b(eq, slow);
|
|
|
| // Return '0 - value'.
|
| @@ -2130,7 +2157,7 @@ void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
|
| __ JumpIfNotSmi(r0, non_smi);
|
|
|
| // Flip bits and revert inverted smi-tag.
|
| - __ mvn(r0, Operand(r0));
|
| + __ mvn(r0, r0);
|
| __ bic(r0, r0, Operand(kSmiTagMask));
|
| __ Ret();
|
| }
|
| @@ -2183,14 +2210,14 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
|
| } else {
|
| Label slow_allocate_heapnumber, heapnumber_allocated;
|
| __ AllocateHeapNumber(r1, r2, r3, r6, &slow_allocate_heapnumber);
|
| - __ jmp(&heapnumber_allocated);
|
| + __ jmp_near(&heapnumber_allocated);
|
|
|
| __ bind(&slow_allocate_heapnumber);
|
| {
|
| FrameScope scope(masm, StackFrame::INTERNAL);
|
| __ push(r0);
|
| __ CallRuntime(Runtime::kNumberAlloc, 0);
|
| - __ mov(r1, Operand(r0));
|
| + __ mov(r1, r0);
|
| __ pop(r0);
|
| }
|
|
|
| @@ -2200,7 +2227,7 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
|
| __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
|
| __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
|
| __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
|
| - __ mov(r0, Operand(r1));
|
| + __ mov(r0, r1);
|
| }
|
| __ Ret();
|
| }
|
| @@ -2212,16 +2239,17 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
|
|
|
| EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
|
| // Convert the heap number is r0 to an untagged integer in r1.
|
| - __ ConvertToInt32(r0, r1, r2, r3, d0, slow);
|
| + __ ConvertToInt32(r0, r1, r2, r3, dr0, slow);
|
|
|
| // Do the bitwise operation and check if the result fits in a smi.
|
| Label try_float;
|
| - __ mvn(r1, Operand(r1));
|
| - __ add(r2, r1, Operand(0x40000000), SetCC);
|
| - __ b(mi, &try_float);
|
| + __ mvn(r1, r1);
|
| + __ add(r2, r1, Operand(0x40000000));
|
| + __ cmpge(r2, Operand(0));
|
| + __ bf(&try_float);
|
|
|
| // Tag the result as a smi and we're done.
|
| - __ mov(r0, Operand(r1, LSL, kSmiTagSize));
|
| + __ lsl(r0, r1, Operand(kSmiTagSize));
|
| __ Ret();
|
|
|
| // Try to store the result in a heap number.
|
| @@ -2245,21 +2273,19 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
|
| // Convert the heap number in r0 to an untagged integer in r1.
|
| // This can't go slow-case because it's the same number we already
|
| // converted once again.
|
| - __ ConvertToInt32(r0, r1, r3, r4, d0, &impossible);
|
| - __ mvn(r1, Operand(r1));
|
| + __ ConvertToInt32(r0, r1, r3, r4, dr0, &impossible);
|
| + __ mvn(r1, r1);
|
|
|
| __ bind(&heapnumber_allocated);
|
| __ mov(r0, r2); // Move newly allocated heap number to r0.
|
| }
|
|
|
| - if (CpuFeatures::IsSupported(VFP2)) {
|
| + if (CpuFeatures::IsSupported(FPU)) {
|
| // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
|
| - CpuFeatures::Scope scope(VFP2);
|
| - __ vmov(s0, r1);
|
| - __ vcvt_f64_s32(d0, s0);
|
| - __ sub(r2, r0, Operand(kHeapObjectTag));
|
| - __ vstr(d0, r2, HeapNumber::kValueOffset);
|
| - __ Ret();
|
| + __ dfloat(dr0, r1);
|
| + __ sub(r2, r0, Operand(kHeapObjectTag - HeapNumber::kValueOffset));
|
| + __ dstr(dr0, MemOperand(r2, 0), r2);
|
| + __ rts();
|
| } else {
|
| // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
|
| // have to set up a frame.
|
| @@ -2345,7 +2371,7 @@ void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
|
|
|
| void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
|
| MacroAssembler* masm) {
|
| - UNIMPLEMENTED();
|
| + __ UNIMPLEMENTED_BREAK();
|
| }
|
|
|
|
|
| @@ -2410,40 +2436,46 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
|
| ASSERT(right.is(r0));
|
| STATIC_ASSERT(kSmiTag == 0);
|
|
|
| - Label not_smi_result;
|
| + Label not_smi_result, skip_if_true, skip_if_false;
|
| switch (op_) {
|
| case Token::ADD:
|
| - __ add(right, left, Operand(right), SetCC); // Add optimistically.
|
| - __ Ret(vc);
|
| - __ sub(right, right, Operand(left)); // Revert optimistic add.
|
| + __ addv(right, left, right); // Add optimistically.
|
| + __ Ret(f); // Return if no overflow
|
| + __ sub(right, right, left); // Revert optimistic add.
|
| break;
|
| case Token::SUB:
|
| - __ sub(right, left, Operand(right), SetCC); // Subtract optimistically.
|
| - __ Ret(vc);
|
| - __ sub(right, left, Operand(right)); // Revert optimistic subtract.
|
| + __ subv(right, left, right); // Subtract optimistically.
|
| + __ Ret(f); // Return if no overflow
|
| + __ sub(right, left, right); // Revert optimistic subtract.
|
| break;
|
| case Token::MUL:
|
| + // TODO(stm): implement optimized multiply with overflow check for SH4
|
| // Remove tag from one of the operands. This way the multiplication result
|
| // will be a smi if it fits the smi range.
|
| __ SmiUntag(ip, right);
|
| // Do multiplication
|
| // scratch1 = lower 32 bits of ip * left.
|
| // scratch2 = higher 32 bits of ip * left.
|
| - __ smull(scratch1, scratch2, left, ip);
|
| + __ dmuls(scratch1, scratch2, left, ip);
|
| // Check for overflowing the smi range - no overflow if higher 33 bits of
|
| // the result are identical.
|
| - __ mov(ip, Operand(scratch1, ASR, 31));
|
| - __ cmp(ip, Operand(scratch2));
|
| + __ asr(ip, scratch1, Operand(31));
|
| + __ cmp(ip, scratch2);
|
| __ b(ne, ¬_smi_result);
|
| // Go slow on zero result to handle -0.
|
| - __ cmp(scratch1, Operand(0));
|
| - __ mov(right, Operand(scratch1), LeaveCC, ne);
|
| - __ Ret(ne);
|
| + __ tst(scratch1, scratch1);
|
| + __ bt_near(&skip_if_true);
|
| + __ mov(right, scratch1);
|
| + __ rts();
|
| + __ bind(&skip_if_true);
|
| // We need -0 if we were multiplying a negative number with 0 to get 0.
|
| // We know one of them was zero.
|
| - __ add(scratch2, right, Operand(left), SetCC);
|
| - __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
|
| - __ Ret(pl); // Return smi 0 if the non-zero one was positive.
|
| + __ add(scratch2, right, left);
|
| + __ cmpge(scratch2, Operand(0));
|
| + __ bf_near(&skip_if_false);
|
| + __ mov(right, Operand(Smi::FromInt(0)));
|
| + __ rts(); // Return smi 0 if the non-zero one was positive.
|
| + __ bind(&skip_if_false);
|
| // We fall through here if we multiplied a negative number with 0, because
|
| // that would mean we should produce -0.
|
| break;
|
| @@ -2458,12 +2490,12 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
|
| // Perform division by shifting.
|
| __ CountLeadingZeros(scratch1, scratch1, scratch2);
|
| __ rsb(scratch1, scratch1, Operand(31));
|
| - __ mov(right, Operand(left, LSR, scratch1));
|
| + __ lsr(right, left, scratch1);
|
| __ Ret();
|
| break;
|
| case Token::MOD:
|
| // Check for two positive smis.
|
| - __ orr(scratch1, left, Operand(right));
|
| + __ orr(scratch1, left, right);
|
| __ tst(scratch1, Operand(0x80000000u | kSmiTagMask));
|
| __ b(ne, ¬_smi_result);
|
|
|
| @@ -2471,25 +2503,25 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
|
| __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result);
|
|
|
| // Perform modulus by masking.
|
| - __ and_(right, left, Operand(scratch1));
|
| + __ land(right, left, scratch1);
|
| __ Ret();
|
| break;
|
| case Token::BIT_OR:
|
| - __ orr(right, left, Operand(right));
|
| + __ orr(right, left, right);
|
| __ Ret();
|
| break;
|
| case Token::BIT_AND:
|
| - __ and_(right, left, Operand(right));
|
| + __ land(right, left, right);
|
| __ Ret();
|
| break;
|
| case Token::BIT_XOR:
|
| - __ eor(right, left, Operand(right));
|
| + __ eor(right, left, right);
|
| __ Ret();
|
| break;
|
| case Token::SAR:
|
| // Remove tags from right operand.
|
| __ GetLeastBitsFromSmi(scratch1, right, 5);
|
| - __ mov(right, Operand(left, ASR, scratch1));
|
| + __ asr(right, left, scratch1);
|
| // Smi tag result.
|
| __ bic(right, right, Operand(kSmiTagMask));
|
| __ Ret();
|
| @@ -2499,7 +2531,7 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
|
| // because then the 0s get shifted into bit 30 instead of bit 31.
|
| __ SmiUntag(scratch1, left);
|
| __ GetLeastBitsFromSmi(scratch2, right, 5);
|
| - __ mov(scratch1, Operand(scratch1, LSR, scratch2));
|
| + __ lsr(scratch1, scratch1, scratch2);
|
| // Unsigned shift is not allowed to produce a negative number, so
|
| // check the sign bit and the sign bit after Smi tagging.
|
| __ tst(scratch1, Operand(0xc0000000));
|
| @@ -2512,10 +2544,11 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
|
| // Remove tags from operands.
|
| __ SmiUntag(scratch1, left);
|
| __ GetLeastBitsFromSmi(scratch2, right, 5);
|
| - __ mov(scratch1, Operand(scratch1, LSL, scratch2));
|
| + __ lsl(scratch1, scratch1, scratch2);
|
| // Check that the signed result fits in a Smi.
|
| - __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
|
| - __ b(mi, ¬_smi_result);
|
| + __ add(scratch2, scratch1, Operand(0x40000000));
|
| + __ cmpge(scratch2, Operand(0));
|
| + __ bf(¬_smi_result);
|
| __ SmiTag(right, scratch1);
|
| __ Ret();
|
| break;
|
| @@ -2537,9 +2570,9 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
|
| Register scratch3 = r4;
|
|
|
| ASSERT(smi_operands || (not_numbers != NULL));
|
| - if (smi_operands) {
|
| - __ AssertSmi(left);
|
| - __ AssertSmi(right);
|
| + if (smi_operands && FLAG_debug_code) {
|
| + __ AbortIfNotSmi(left);
|
| + __ AbortIfNotSmi(right);
|
| }
|
|
|
| Register heap_number_map = r6;
|
| @@ -2554,7 +2587,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
|
| // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
|
| // depending on whether VFP3 is available or not.
|
| FloatingPointHelper::Destination destination =
|
| - CpuFeatures::IsSupported(VFP2) &&
|
| + CpuFeatures::IsSupported(FPU) &&
|
| op_ != Token::MOD ?
|
| FloatingPointHelper::kVFPRegisters :
|
| FloatingPointHelper::kCoreRegisters;
|
| @@ -2578,30 +2611,29 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
|
|
|
| // Calculate the result.
|
| if (destination == FloatingPointHelper::kVFPRegisters) {
|
| - // Using VFP registers:
|
| - // d6: Left value
|
| - // d7: Right value
|
| - CpuFeatures::Scope scope(VFP2);
|
| + // Using FPU registers:
|
| + // dr0: Left value
|
| + // dr2: Right value
|
| switch (op_) {
|
| case Token::ADD:
|
| - __ vadd(d5, d6, d7);
|
| + __ fadd(dr0, dr2);
|
| break;
|
| case Token::SUB:
|
| - __ vsub(d5, d6, d7);
|
| + __ fsub(dr0, dr2);
|
| break;
|
| case Token::MUL:
|
| - __ vmul(d5, d6, d7);
|
| + __ fmul(dr0, dr2);
|
| break;
|
| case Token::DIV:
|
| - __ vdiv(d5, d6, d7);
|
| + __ fdiv(dr0, dr2);
|
| break;
|
| default:
|
| UNREACHABLE();
|
| }
|
|
|
| - __ sub(r0, result, Operand(kHeapObjectTag));
|
| - __ vstr(d5, r0, HeapNumber::kValueOffset);
|
| - __ add(r0, r0, Operand(kHeapObjectTag));
|
| + __ sub(r0, result, Operand(kHeapObjectTag - HeapNumber::kValueOffset));
|
| + __ dstr(dr0, MemOperand(r0, 0));
|
| + __ add(r0, r0, Operand(kHeapObjectTag - HeapNumber::kValueOffset));
|
| __ Ret();
|
| } else {
|
| // Call the C function to handle the double operation.
|
| @@ -2633,7 +2665,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
|
| scratch1,
|
| scratch2,
|
| scratch3,
|
| - d0,
|
| + dr0,
|
| not_numbers);
|
| FloatingPointHelper::ConvertNumberToInt32(masm,
|
| right,
|
| @@ -2642,52 +2674,54 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
|
| scratch1,
|
| scratch2,
|
| scratch3,
|
| - d0,
|
| + dr0,
|
| not_numbers);
|
| }
|
|
|
| Label result_not_a_smi;
|
| switch (op_) {
|
| case Token::BIT_OR:
|
| - __ orr(r2, r3, Operand(r2));
|
| + __ orr(r2, r3, r2);
|
| break;
|
| case Token::BIT_XOR:
|
| - __ eor(r2, r3, Operand(r2));
|
| + __ eor(r2, r3, r2);
|
| break;
|
| case Token::BIT_AND:
|
| - __ and_(r2, r3, Operand(r2));
|
| + __ land(r2, r3, r2);
|
| break;
|
| case Token::SAR:
|
| // Use only the 5 least significant bits of the shift count.
|
| __ GetLeastBitsFromInt32(r2, r2, 5);
|
| - __ mov(r2, Operand(r3, ASR, r2));
|
| + __ asr(r2, r3, r2);
|
| break;
|
| case Token::SHR:
|
| // Use only the 5 least significant bits of the shift count.
|
| __ GetLeastBitsFromInt32(r2, r2, 5);
|
| - __ mov(r2, Operand(r3, LSR, r2), SetCC);
|
| + __ lsr(r2, r3, r2);
|
| + __ cmpge(r2, Operand(0)); // Check non-negative (see comment below).
|
| // SHR is special because it is required to produce a positive answer.
|
| // The code below for writing into heap numbers isn't capable of
|
| // writing the register as an unsigned int so we go to slow case if we
|
| // hit this case.
|
| - if (CpuFeatures::IsSupported(VFP2)) {
|
| - __ b(mi, &result_not_a_smi);
|
| + if (CpuFeatures::IsSupported(FPU)) {
|
| + __ bf(&result_not_a_smi);
|
| } else {
|
| - __ b(mi, not_numbers);
|
| + __ bf(not_numbers);
|
| }
|
| break;
|
| case Token::SHL:
|
| // Use only the 5 least significant bits of the shift count.
|
| __ GetLeastBitsFromInt32(r2, r2, 5);
|
| - __ mov(r2, Operand(r3, LSL, r2));
|
| + __ lsl(r2, r3, r2);
|
| break;
|
| default:
|
| UNREACHABLE();
|
| }
|
|
|
| // Check that the *signed* result fits in a smi.
|
| - __ add(r3, r2, Operand(0x40000000), SetCC);
|
| - __ b(mi, &result_not_a_smi);
|
| + __ add(r3, r2, Operand(0x40000000));
|
| + __ cmpge(r3, Operand(0));
|
| + __ bf(&result_not_a_smi);
|
| __ SmiTag(r0, r2);
|
| __ Ret();
|
|
|
| @@ -2707,20 +2741,16 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
|
|
|
| // Nothing can go wrong now, so move the heap number to r0, which is the
|
| // result.
|
| - __ mov(r0, Operand(r5));
|
| + __ mov(r0, r5); // TODO(stm): look at this: it should be better
|
|
|
| - if (CpuFeatures::IsSupported(VFP2)) {
|
| - // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
|
| - // mentioned above SHR needs to always produce a positive result.
|
| - CpuFeatures::Scope scope(VFP2);
|
| - __ vmov(s0, r2);
|
| + if (CpuFeatures::IsSupported(FPU)) {
|
| if (op_ == Token::SHR) {
|
| - __ vcvt_f64_u32(d0, s0);
|
| + __ dufloat(dr0, r2, dr2, sh4_rtmp);
|
| } else {
|
| - __ vcvt_f64_s32(d0, s0);
|
| + __ dfloat(dr0, r2);
|
| }
|
| - __ sub(r3, r0, Operand(kHeapObjectTag));
|
| - __ vstr(d0, r3, HeapNumber::kValueOffset);
|
| + __ sub(r3, r0, Operand(kHeapObjectTag - HeapNumber::kValueOffset));
|
| + __ dstr(dr0, MemOperand(r3, 0), r3);
|
| __ Ret();
|
| } else {
|
| // Tail call that writes the int32 in r2 to the heap number in r0, using
|
| @@ -2752,7 +2782,7 @@ void BinaryOpStub::GenerateSmiCode(
|
| Register scratch1 = r7;
|
|
|
| // Perform combined smi check on both operands.
|
| - __ orr(scratch1, left, Operand(right));
|
| + __ orr(scratch1, left, right);
|
| STATIC_ASSERT(kSmiTag == 0);
|
| __ JumpIfNotSmi(scratch1, ¬_smis);
|
|
|
| @@ -2816,13 +2846,13 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
|
|
|
| // Test if left operand is a string.
|
| __ JumpIfSmi(left, &call_runtime);
|
| - __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
|
| - __ b(ge, &call_runtime);
|
| + __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE, ge);
|
| + __ bt(&call_runtime);
|
|
|
| // Test if right operand is a string.
|
| __ JumpIfSmi(right, &call_runtime);
|
| - __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
|
| - __ b(ge, &call_runtime);
|
| + __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE, ge);
|
| + __ bt(&call_runtime);
|
|
|
| StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
|
| GenerateRegisterArgsPush(masm);
|
| @@ -2840,7 +2870,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
| Register right = r0;
|
| Register scratch1 = r7;
|
| Register scratch2 = r9;
|
| - DwVfpRegister double_scratch = d0;
|
| + DwVfpRegister double_scratch = dr0;
|
|
|
| Register heap_number_result = no_reg;
|
| Register heap_number_map = r6;
|
| @@ -2870,50 +2900,47 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
| // Jump to type transition if they are not. The registers r0 and r1 (right
|
| // and left) are preserved for the runtime call.
|
| FloatingPointHelper::Destination destination =
|
| - (CpuFeatures::IsSupported(VFP2) && op_ != Token::MOD)
|
| + (CpuFeatures::IsSupported(FPU) && op_ != Token::MOD)
|
| ? FloatingPointHelper::kVFPRegisters
|
| : FloatingPointHelper::kCoreRegisters;
|
|
|
| FloatingPointHelper::LoadNumberAsInt32Double(masm,
|
| right,
|
| destination,
|
| - d7,
|
| - d8,
|
| + dr2,
|
| r2,
|
| r3,
|
| heap_number_map,
|
| scratch1,
|
| scratch2,
|
| - s0,
|
| + fr4,
|
| &transition);
|
| FloatingPointHelper::LoadNumberAsInt32Double(masm,
|
| left,
|
| destination,
|
| - d6,
|
| - d8,
|
| + dr0,
|
| r4,
|
| r5,
|
| heap_number_map,
|
| scratch1,
|
| scratch2,
|
| - s0,
|
| + fr4,
|
| &transition);
|
|
|
| - if (destination == FloatingPointHelper::kVFPRegisters) {
|
| - CpuFeatures::Scope scope(VFP2);
|
| + if (destination == FloatingPointHelper::kVFPRegisters) {
|
| Label return_heap_number;
|
| switch (op_) {
|
| case Token::ADD:
|
| - __ vadd(d5, d6, d7);
|
| + __ fadd(dr0, dr2);
|
| break;
|
| case Token::SUB:
|
| - __ vsub(d5, d6, d7);
|
| + __ fsub(dr0, dr2);
|
| break;
|
| case Token::MUL:
|
| - __ vmul(d5, d6, d7);
|
| + __ fmul(dr0, dr2);
|
| break;
|
| case Token::DIV:
|
| - __ vdiv(d5, d6, d7);
|
| + __ fdiv(dr0, dr2);
|
| break;
|
| default:
|
| UNREACHABLE();
|
| @@ -2925,11 +2952,10 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
| // Otherwise return a heap number if allowed, or jump to type
|
| // transition.
|
|
|
| - __ EmitVFPTruncate(kRoundToZero,
|
| - scratch1,
|
| - d5,
|
| + __ EmitFPUTruncate(kRoundToZero,
|
| scratch2,
|
| - d8);
|
| + dr0,
|
| + scratch1);
|
|
|
| if (result_type_ <= BinaryOpIC::INT32) {
|
| // If the ne condition is set, result does
|
| @@ -2938,14 +2964,15 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
| }
|
|
|
| // Check if the result fits in a smi.
|
| - __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
|
| + __ add(scratch2, scratch1, Operand(0x40000000));
|
| + __ cmpge(scratch2, Operand(0));
|
| // If not try to return a heap number.
|
| - __ b(mi, &return_heap_number);
|
| + __ bt(&return_heap_number);
|
| // Check for minus zero. Return heap number for minus zero.
|
| Label not_zero;
|
| - __ cmp(scratch1, Operand::Zero());
|
| + __ cmp(scratch1, Operand(0));
|
| __ b(ne, ¬_zero);
|
| - __ vmov(scratch2, d5.high());
|
| + __ isingle(scratch2, dr0.high());
|
| __ tst(scratch2, Operand(HeapNumber::kSignMask));
|
| __ b(ne, &return_heap_number);
|
| __ bind(¬_zero);
|
| @@ -2971,7 +2998,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
| scratch2,
|
| &call_runtime);
|
| __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
|
| - __ vstr(d5, r0, HeapNumber::kValueOffset);
|
| + __ dstr(dr0, MemOperand(r0, HeapNumber::kValueOffset));
|
| __ mov(r0, heap_number_result);
|
| __ Ret();
|
| }
|
| @@ -3031,8 +3058,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
| scratch1,
|
| scratch2,
|
| scratch3,
|
| - d0,
|
| - d1,
|
| + dr0,
|
| &transition);
|
| FloatingPointHelper::LoadNumberAsInt32(masm,
|
| right,
|
| @@ -3041,57 +3067,58 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
| scratch1,
|
| scratch2,
|
| scratch3,
|
| - d0,
|
| - d1,
|
| + dr0,
|
| &transition);
|
|
|
| // The ECMA-262 standard specifies that, for shift operations, only the
|
| // 5 least significant bits of the shift value should be used.
|
| switch (op_) {
|
| case Token::BIT_OR:
|
| - __ orr(r2, r3, Operand(r2));
|
| + __ orr(r2, r3, r2);
|
| break;
|
| case Token::BIT_XOR:
|
| - __ eor(r2, r3, Operand(r2));
|
| + __ eor(r2, r3, r2);
|
| break;
|
| case Token::BIT_AND:
|
| - __ and_(r2, r3, Operand(r2));
|
| + __ land(r2, r3, r2);
|
| break;
|
| case Token::SAR:
|
| - __ and_(r2, r2, Operand(0x1f));
|
| - __ mov(r2, Operand(r3, ASR, r2));
|
| + __ land(r2, r2, Operand(0x1f));
|
| + __ asr(r2, r3, r2);
|
| break;
|
| case Token::SHR:
|
| - __ and_(r2, r2, Operand(0x1f));
|
| - __ mov(r2, Operand(r3, LSR, r2), SetCC);
|
| + __ land(r2, r2, Operand(0x1f));
|
| + __ lsr(r2, r3, r2);
|
| // SHR is special because it is required to produce a positive answer.
|
| // We only get a negative result if the shift value (r2) is 0.
|
| // This result cannot be respresented as a signed 32-bit integer, try
|
| // to return a heap number if we can.
|
| - // The non vfp2 code does not support this special case, so jump to
|
| + // The non vfp3 code does not support this special case, so jump to
|
| // runtime if we don't support it.
|
| - if (CpuFeatures::IsSupported(VFP2)) {
|
| - __ b(mi, (result_type_ <= BinaryOpIC::INT32)
|
| + __ cmpge(r2, Operand(0));
|
| + if (CpuFeatures::IsSupported(FPU)) {
|
| + __ b(f, (result_type_ <= BinaryOpIC::INT32)
|
| ? &transition
|
| : &return_heap_number);
|
| } else {
|
| - __ b(mi, (result_type_ <= BinaryOpIC::INT32)
|
| + __ b(f, (result_type_ <= BinaryOpIC::INT32)
|
| ? &transition
|
| : &call_runtime);
|
| }
|
| break;
|
| case Token::SHL:
|
| - __ and_(r2, r2, Operand(0x1f));
|
| - __ mov(r2, Operand(r3, LSL, r2));
|
| + __ land(r2, r2, Operand(0x1f));
|
| + __ lsl(r2, r3, r2);
|
| break;
|
| default:
|
| UNREACHABLE();
|
| }
|
|
|
| // Check if the result fits in a smi.
|
| - __ add(scratch1, r2, Operand(0x40000000), SetCC);
|
| + __ add(scratch1, r2, Operand(0x40000000));
|
| // If not try to return a heap number. (We know the result is an int32.)
|
| - __ b(mi, &return_heap_number);
|
| + __ cmpge(scratch1, Operand(0));
|
| + __ b(f, &return_heap_number);
|
| // Tag the result and return.
|
| __ SmiTag(r0, r2);
|
| __ Ret();
|
| @@ -3105,21 +3132,17 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
| scratch2,
|
| &call_runtime);
|
|
|
| - if (CpuFeatures::IsSupported(VFP2)) {
|
| - CpuFeatures::Scope scope(VFP2);
|
| - if (op_ != Token::SHR) {
|
| + if (CpuFeatures::IsSupported(FPU)) {
|
| + if ((op_ != Token::SHR)) {
|
| // Convert the result to a floating point value.
|
| - __ vmov(double_scratch.low(), r2);
|
| - __ vcvt_f64_s32(double_scratch, double_scratch.low());
|
| + __ dfloat(double_scratch, r2);
|
| } else {
|
| - // The result must be interpreted as an unsigned 32-bit integer.
|
| - __ vmov(double_scratch.low(), r2);
|
| - __ vcvt_f64_u32(double_scratch, double_scratch.low());
|
| + __ dufloat(double_scratch, r2, dr2, sh4_rtmp);
|
| }
|
|
|
| // Store the result.
|
| __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
|
| - __ vstr(double_scratch, r0, HeapNumber::kValueOffset);
|
| + __ dstr(double_scratch, MemOperand(r0, HeapNumber::kValueOffset));
|
| __ mov(r0, heap_number_result);
|
| __ Ret();
|
| } else {
|
| @@ -3169,10 +3192,10 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
|
| } else {
|
| __ LoadRoot(r1, Heap::kNanValueRootIndex);
|
| }
|
| - __ jmp(&done);
|
| + __ jmp_near(&done);
|
| __ bind(&check);
|
| __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
|
| - __ b(ne, &done);
|
| + __ b(ne, &done, Label::kNear);
|
| if (Token::IsBitOp(op_)) {
|
| __ mov(r0, Operand(Smi::FromInt(0)));
|
| } else {
|
| @@ -3219,8 +3242,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
|
|
|
| // Check if left argument is a string.
|
| __ JumpIfSmi(left, &left_not_string);
|
| - __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
|
| - __ b(ge, &left_not_string);
|
| + __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE, ge);
|
| + __ bt(&left_not_string);
|
|
|
| StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
|
| GenerateRegisterArgsPush(masm);
|
| @@ -3229,8 +3252,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
|
| // Left operand is not a string, test right.
|
| __ bind(&left_not_string);
|
| __ JumpIfSmi(right, &call_runtime);
|
| - __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
|
| - __ b(ge, &call_runtime);
|
| + __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE, ge);
|
| + __ bt(&call_runtime);
|
|
|
| StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
|
| GenerateRegisterArgsPush(masm);
|
| @@ -3305,7 +3328,7 @@ void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
|
| __ b(&allocated);
|
| __ bind(&skip_allocation);
|
| // Use object holding the overwritable operand for result.
|
| - __ mov(result, Operand(overwritable_operand));
|
| + __ mov(result, overwritable_operand);
|
| __ bind(&allocated);
|
| } else {
|
| ASSERT(mode_ == NO_OVERWRITE);
|
| @@ -3321,8 +3344,8 @@ void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
|
|
|
|
|
| void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
|
| - // Untagged case: double input in d2, double result goes
|
| - // into d2.
|
| + // Untagged case: double input in dr2, double result goes
|
| + // into dr2.
|
| // Tagged case: tagged input on top of stack and in r0,
|
| // tagged result (heap number) goes into r0.
|
|
|
| @@ -3335,8 +3358,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
|
| const Register cache_entry = r0;
|
| const bool tagged = (argument_type_ == TAGGED);
|
|
|
| - if (CpuFeatures::IsSupported(VFP2)) {
|
| - CpuFeatures::Scope scope(VFP2);
|
| + if (CpuFeatures::IsSupported(FPU)) {
|
| if (tagged) {
|
| // Argument is a number and is on stack and in r0.
|
| // Load argument and check if it is a smi.
|
| @@ -3344,7 +3366,9 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
|
|
|
| // Input is a smi. Convert to double and load the low and high words
|
| // of the double into r2, r3.
|
| - __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
|
| + __ asr(scratch0, r0, Operand(kSmiTagSize));
|
| + __ dfloat(dr0, scratch0);
|
| + __ movd(r2, r3, dr0);
|
| __ b(&loaded);
|
|
|
| __ bind(&input_not_smi);
|
| @@ -3356,22 +3380,24 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
|
| DONT_DO_SMI_CHECK);
|
| // Input is a HeapNumber. Load it to a double register and store the
|
| // low and high words into r2, r3.
|
| - __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
|
| - __ vmov(r2, r3, d0);
|
| + __ dldr(dr0, FieldMemOperand(r0, HeapNumber::kValueOffset));
|
| + __ movd(r2, r3, dr0);
|
| } else {
|
| - // Input is untagged double in d2. Output goes to d2.
|
| - __ vmov(r2, r3, d2);
|
| + UNIMPLEMENTED();
|
| }
|
| __ bind(&loaded);
|
| // r2 = low 32 bits of double value
|
| // r3 = high 32 bits of double value
|
| // Compute hash (the shifts are arithmetic):
|
| // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
|
| - __ eor(r1, r2, Operand(r3));
|
| - __ eor(r1, r1, Operand(r1, ASR, 16));
|
| - __ eor(r1, r1, Operand(r1, ASR, 8));
|
| + __ eor(r1, r2, r3);
|
| + __ asr(scratch0, r1, Operand(16));
|
| + __ eor(r1, r1, scratch0);
|
| + __ asr(scratch0, r1, Operand(8));
|
| + __ eor(r1, r1, scratch0);
|
| ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
|
| - __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
|
| + // TODO(STM): ??
|
| + __ land(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
|
|
|
| // r2 = low 32 bits of double value.
|
| // r3 = high 32 bits of double value.
|
| @@ -3405,12 +3431,17 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
|
| #endif
|
|
|
| // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
|
| - __ add(r1, r1, Operand(r1, LSL, 1));
|
| - __ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
|
| + __ lsl(scratch0, r1, Operand(1));
|
| + __ add(r1, r1, scratch0);
|
| + __ lsl(scratch0, r1, Operand(2));
|
| + __ add(cache_entry, cache_entry, scratch0);
|
| // Check if cache matches: Double value is stored in uint32_t[2] array.
|
| - __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
|
| + __ ldr(r4, MemOperand(cache_entry, 0));
|
| + __ ldr(r5, MemOperand(cache_entry, 4));
|
| + __ ldr(r6, MemOperand(cache_entry, 8));
|
| __ cmp(r2, r4);
|
| - __ cmp(r3, r5, eq);
|
| + __ b(ne, &calculate);
|
| + __ cmp(r3, r5);
|
| __ b(ne, &calculate);
|
| // Cache hit. Load result, cleanup and return.
|
| Counters* counters = masm->isolate()->counters();
|
| @@ -3419,13 +3450,13 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
|
| if (tagged) {
|
| // Pop input value from stack and load result into r0.
|
| __ pop();
|
| - __ mov(r0, Operand(r6));
|
| + __ mov(r0, r6);
|
| } else {
|
| - // Load result into d2.
|
| - __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
|
| + // Load result into dr2.
|
| + __ dldr(dr2, FieldMemOperand(r6, HeapNumber::kValueOffset));
|
| }
|
| __ Ret();
|
| - } // if (CpuFeatures::IsSupported(VFP3))
|
| + } // if (CpuFeatures::IsSupported(FPU))
|
|
|
| __ bind(&calculate);
|
| Counters* counters = masm->isolate()->counters();
|
| @@ -3437,79 +3468,18 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
|
| ExternalReference(RuntimeFunction(), masm->isolate());
|
| __ TailCallExternalReference(runtime_function, 1, 1);
|
| } else {
|
| - ASSERT(CpuFeatures::IsSupported(VFP2));
|
| - CpuFeatures::Scope scope(VFP2);
|
| -
|
| - Label no_update;
|
| - Label skip_cache;
|
| -
|
| - // Call C function to calculate the result and update the cache.
|
| - // r0: precalculated cache entry address.
|
| - // r2 and r3: parts of the double value.
|
| - // Store r0, r2 and r3 on stack for later before calling C function.
|
| - __ Push(r3, r2, cache_entry);
|
| - GenerateCallCFunction(masm, scratch0);
|
| - __ GetCFunctionDoubleResult(d2);
|
| -
|
| - // Try to update the cache. If we cannot allocate a
|
| - // heap number, we return the result without updating.
|
| - __ Pop(r3, r2, cache_entry);
|
| - __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
|
| - __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update);
|
| - __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
|
| - __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit());
|
| - __ Ret();
|
| -
|
| - __ bind(&invalid_cache);
|
| - // The cache is invalid. Call runtime which will recreate the
|
| - // cache.
|
| - __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
|
| - __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
|
| - __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
|
| - {
|
| - FrameScope scope(masm, StackFrame::INTERNAL);
|
| - __ push(r0);
|
| - __ CallRuntime(RuntimeFunction(), 1);
|
| - }
|
| - __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
|
| - __ Ret();
|
| -
|
| - __ bind(&skip_cache);
|
| - // Call C function to calculate the result and answer directly
|
| - // without updating the cache.
|
| - GenerateCallCFunction(masm, scratch0);
|
| - __ GetCFunctionDoubleResult(d2);
|
| - __ bind(&no_update);
|
| -
|
| - // We return the value in d2 without adding it to the cache, but
|
| - // we cause a scavenging GC so that future allocations will succeed.
|
| - {
|
| - FrameScope scope(masm, StackFrame::INTERNAL);
|
| -
|
| - // Allocate an aligned object larger than a HeapNumber.
|
| - ASSERT(4 * kPointerSize >= HeapNumber::kSize);
|
| - __ mov(scratch0, Operand(4 * kPointerSize));
|
| - __ push(scratch0);
|
| - __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
|
| - }
|
| - __ Ret();
|
| + UNREACHABLE();
|
| }
|
| }
|
|
|
|
|
| void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
|
| Register scratch) {
|
| - ASSERT(CpuFeatures::IsEnabled(VFP2));
|
| Isolate* isolate = masm->isolate();
|
|
|
| __ push(lr);
|
| __ PrepareCallCFunction(0, 1, scratch);
|
| - if (masm->use_eabi_hardfloat()) {
|
| - __ vmov(d0, d2);
|
| - } else {
|
| - __ vmov(r0, r1, d2);
|
| - }
|
| - AllowExternalCallThatCantCauseGC scope(masm);
|
| + __ movd(dr4, r0, r1);
|
| switch (type_) {
|
| case TranscendentalCache::SIN:
|
| __ CallCFunction(ExternalReference::math_sin_double_function(isolate),
|
| @@ -3560,201 +3530,102 @@ void InterruptStub::Generate(MacroAssembler* masm) {
|
|
|
|
|
| void MathPowStub::Generate(MacroAssembler* masm) {
|
| - CpuFeatures::Scope vfp2_scope(VFP2);
|
| - const Register base = r1;
|
| - const Register exponent = r2;
|
| - const Register heapnumbermap = r5;
|
| - const Register heapnumber = r0;
|
| - const DoubleRegister double_base = d1;
|
| - const DoubleRegister double_exponent = d2;
|
| - const DoubleRegister double_result = d3;
|
| - const DoubleRegister double_scratch = d0;
|
| - const SwVfpRegister single_scratch = s0;
|
| - const Register scratch = r9;
|
| - const Register scratch2 = r7;
|
| -
|
| - Label call_runtime, done, int_exponent;
|
| - if (exponent_type_ == ON_STACK) {
|
| - Label base_is_smi, unpack_exponent;
|
| - // The exponent and base are supplied as arguments on the stack.
|
| - // This can only happen if the stub is called from non-optimized code.
|
| - // Load input parameters from stack to double registers.
|
| + // TODO(STM): not merged !
|
| + Label call_runtime;
|
| + if (CpuFeatures::IsSupported(FPU)) {
|
| + Label base_not_smi;
|
| + Label exponent_not_smi;
|
| + Label convert_exponent;
|
| +
|
| + const Register base = r0;
|
| + const Register exponent = r4;
|
| + const Register heapnumbermap = sh4_r8;
|
| + const Register heapnumber = r9;
|
| + const DoubleRegister double_base = dr4;
|
| + const DoubleRegister double_exponent = dr6;
|
| + const DoubleRegister double_result = dr0;
|
| + const Register scratch = r5;
|
| + const Register scratch2 = r6;
|
| +
|
| + __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
|
| __ ldr(base, MemOperand(sp, 1 * kPointerSize));
|
| __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
|
|
|
| - __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
|
| + // Convert base to double value and store it in dr0.
|
| + __ JumpIfNotSmi(base, &base_not_smi);
|
| + // Base is a Smi. Untag and convert it.
|
| + __ SmiUntag(base);
|
| + __ dfloat(double_base, base);
|
| + __ b(&convert_exponent);
|
|
|
| - __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
|
| + __ bind(&base_not_smi);
|
| __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
|
| __ cmp(scratch, heapnumbermap);
|
| __ b(ne, &call_runtime);
|
| + // Base is a heapnumber. Load it into double register.
|
| + __ dldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
|
| +
|
| + __ bind(&convert_exponent);
|
| + __ JumpIfNotSmi(exponent, &exponent_not_smi);
|
| + __ SmiUntag(exponent);
|
| +
|
| + // The base is in a double register and the exponent is
|
| + // an untagged smi. Allocate a heap number and call a
|
| + // C function for integer exponents. The register containing
|
| + // the heap number is callee-saved.
|
| + __ AllocateHeapNumber(heapnumber,
|
| + scratch,
|
| + scratch2,
|
| + heapnumbermap,
|
| + &call_runtime);
|
| + __ push(pr);
|
| + __ PrepareCallCFunction(1, 1, scratch);
|
| + // check that the argument are stored in the right registers (sh4 ABI)
|
| + ASSERT(double_base.is(dr4) && exponent.is(r4));
|
| + __ CallCFunction(
|
| + ExternalReference::power_double_int_function(masm->isolate()),
|
| + 1, 1);
|
| + __ pop(pr);
|
| + ASSERT(double_result.is(dr0));
|
| + __ dstr(double_result,
|
| + FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
|
| + __ mov(r0, heapnumber);
|
| + __ Drop(2);
|
| + __ rts();
|
|
|
| - __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
|
| - __ jmp(&unpack_exponent);
|
| -
|
| - __ bind(&base_is_smi);
|
| - __ vmov(single_scratch, scratch);
|
| - __ vcvt_f64_s32(double_base, single_scratch);
|
| - __ bind(&unpack_exponent);
|
| -
|
| - __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
|
| -
|
| + __ bind(&exponent_not_smi);
|
| __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
|
| __ cmp(scratch, heapnumbermap);
|
| __ b(ne, &call_runtime);
|
| - __ vldr(double_exponent,
|
| + // Exponent is a heapnumber. Load it into double register.
|
| + __ dldr(double_exponent,
|
| FieldMemOperand(exponent, HeapNumber::kValueOffset));
|
| - } else if (exponent_type_ == TAGGED) {
|
| - // Base is already in double_base.
|
| - __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
|
| -
|
| - __ vldr(double_exponent,
|
| - FieldMemOperand(exponent, HeapNumber::kValueOffset));
|
| - }
|
|
|
| - if (exponent_type_ != INTEGER) {
|
| - Label int_exponent_convert;
|
| - // Detect integer exponents stored as double.
|
| - __ vcvt_u32_f64(single_scratch, double_exponent);
|
| - // We do not check for NaN or Infinity here because comparing numbers on
|
| - // ARM correctly distinguishes NaNs. We end up calling the built-in.
|
| - __ vcvt_f64_u32(double_scratch, single_scratch);
|
| - __ VFPCompareAndSetFlags(double_scratch, double_exponent);
|
| - __ b(eq, &int_exponent_convert);
|
| -
|
| - if (exponent_type_ == ON_STACK) {
|
| - // Detect square root case. Crankshaft detects constant +/-0.5 at
|
| - // compile time and uses DoMathPowHalf instead. We then skip this check
|
| - // for non-constant cases of +/-0.5 as these hardly occur.
|
| - Label not_plus_half;
|
| -
|
| - // Test for 0.5.
|
| - __ vmov(double_scratch, 0.5, scratch);
|
| - __ VFPCompareAndSetFlags(double_exponent, double_scratch);
|
| - __ b(ne, ¬_plus_half);
|
| -
|
| - // Calculates square root of base. Check for the special case of
|
| - // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
|
| - __ vmov(double_scratch, -V8_INFINITY, scratch);
|
| - __ VFPCompareAndSetFlags(double_base, double_scratch);
|
| - __ vneg(double_result, double_scratch, eq);
|
| - __ b(eq, &done);
|
| -
|
| - // Add +0 to convert -0 to +0.
|
| - __ vadd(double_scratch, double_base, kDoubleRegZero);
|
| - __ vsqrt(double_result, double_scratch);
|
| - __ jmp(&done);
|
| -
|
| - __ bind(¬_plus_half);
|
| - __ vmov(double_scratch, -0.5, scratch);
|
| - __ VFPCompareAndSetFlags(double_exponent, double_scratch);
|
| - __ b(ne, &call_runtime);
|
| -
|
| - // Calculates square root of base. Check for the special case of
|
| - // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
|
| - __ vmov(double_scratch, -V8_INFINITY, scratch);
|
| - __ VFPCompareAndSetFlags(double_base, double_scratch);
|
| - __ vmov(double_result, kDoubleRegZero, eq);
|
| - __ b(eq, &done);
|
| -
|
| - // Add +0 to convert -0 to +0.
|
| - __ vadd(double_scratch, double_base, kDoubleRegZero);
|
| - __ vmov(double_result, 1.0, scratch);
|
| - __ vsqrt(double_scratch, double_scratch);
|
| - __ vdiv(double_result, double_result, double_scratch);
|
| - __ jmp(&done);
|
| - }
|
| -
|
| - __ push(lr);
|
| - {
|
| - AllowExternalCallThatCantCauseGC scope(masm);
|
| - __ PrepareCallCFunction(0, 2, scratch);
|
| - __ SetCallCDoubleArguments(double_base, double_exponent);
|
| - __ CallCFunction(
|
| - ExternalReference::power_double_double_function(masm->isolate()),
|
| - 0, 2);
|
| - }
|
| - __ pop(lr);
|
| - __ GetCFunctionDoubleResult(double_result);
|
| - __ jmp(&done);
|
| -
|
| - __ bind(&int_exponent_convert);
|
| - __ vcvt_u32_f64(single_scratch, double_exponent);
|
| - __ vmov(scratch, single_scratch);
|
| - }
|
| -
|
| - // Calculate power with integer exponent.
|
| - __ bind(&int_exponent);
|
| -
|
| - // Get two copies of exponent in the registers scratch and exponent.
|
| - if (exponent_type_ == INTEGER) {
|
| - __ mov(scratch, exponent);
|
| - } else {
|
| - // Exponent has previously been stored into scratch as untagged integer.
|
| - __ mov(exponent, scratch);
|
| - }
|
| - __ vmov(double_scratch, double_base); // Back up base.
|
| - __ vmov(double_result, 1.0, scratch2);
|
| -
|
| - // Get absolute value of exponent.
|
| - __ cmp(scratch, Operand(0));
|
| - __ mov(scratch2, Operand(0), LeaveCC, mi);
|
| - __ sub(scratch, scratch2, scratch, LeaveCC, mi);
|
| -
|
| - Label while_true;
|
| - __ bind(&while_true);
|
| - __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
|
| - __ vmul(double_result, double_result, double_scratch, cs);
|
| - __ vmul(double_scratch, double_scratch, double_scratch, ne);
|
| - __ b(ne, &while_true);
|
| -
|
| - __ cmp(exponent, Operand(0));
|
| - __ b(ge, &done);
|
| - __ vmov(double_scratch, 1.0, scratch);
|
| - __ vdiv(double_result, double_scratch, double_result);
|
| - // Test whether result is zero. Bail out to check for subnormal result.
|
| - // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
|
| - __ VFPCompareAndSetFlags(double_result, 0.0);
|
| - __ b(ne, &done);
|
| - // double_exponent may not containe the exponent value if the input was a
|
| - // smi. We set it with exponent value before bailing out.
|
| - __ vmov(single_scratch, exponent);
|
| - __ vcvt_f64_s32(double_exponent, single_scratch);
|
| -
|
| - // Returning or bailing out.
|
| - Counters* counters = masm->isolate()->counters();
|
| - if (exponent_type_ == ON_STACK) {
|
| - // The arguments are still on the stack.
|
| - __ bind(&call_runtime);
|
| - __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
|
| -
|
| - // The stub is called from non-optimized code, which expects the result
|
| - // as heap number in exponent.
|
| - __ bind(&done);
|
| - __ AllocateHeapNumber(
|
| - heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
|
| - __ vstr(double_result,
|
| + // The base and the exponent are in double registers.
|
| + // Allocate a heap number and call a C function for
|
| + // double exponents. The register containing
|
| + // the heap number is callee-saved.
|
| + __ AllocateHeapNumber(heapnumber,
|
| + scratch,
|
| + scratch2,
|
| + heapnumbermap,
|
| + &call_runtime);
|
| + __ push(pr);
|
| + __ PrepareCallCFunction(0, 2, scratch);
|
| + ASSERT(double_base.is(dr4) && double_exponent.is(dr6));
|
| + __ CallCFunction(
|
| + ExternalReference::power_double_double_function(masm->isolate()),
|
| + 0, 2);
|
| + __ pop(pr);
|
| + ASSERT(double_result.is(dr0));
|
| + __ dstr(double_result,
|
| FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
|
| - ASSERT(heapnumber.is(r0));
|
| - __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
|
| - __ Ret(2);
|
| - } else {
|
| - __ push(lr);
|
| - {
|
| - AllowExternalCallThatCantCauseGC scope(masm);
|
| - __ PrepareCallCFunction(0, 2, scratch);
|
| - __ SetCallCDoubleArguments(double_base, double_exponent);
|
| - __ CallCFunction(
|
| - ExternalReference::power_double_double_function(masm->isolate()),
|
| - 0, 2);
|
| - }
|
| - __ pop(lr);
|
| - __ GetCFunctionDoubleResult(double_result);
|
| -
|
| - __ bind(&done);
|
| - __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
|
| - __ Ret();
|
| + __ mov(r0, heapnumber);
|
| + __ Drop(2);
|
| + __ rts();
|
| }
|
| + __ bind(&call_runtime);
|
| + __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
|
| }
|
|
|
|
|
| @@ -3800,15 +3671,27 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
|
| Label* throw_out_of_memory_exception,
|
| bool do_gc,
|
| bool always_allocate) {
|
| + // WARNING: this function use the SH4 ABI !!
|
| +
|
| + // Input
|
| // r0: result parameter for PerformGC, if any
|
| - // r4: number of arguments including receiver (C callee-saved)
|
| - // r5: pointer to builtin function (C callee-saved)
|
| - // r6: pointer to the first argument (C callee-saved)
|
| + // sh4_r8: number of arguments including receiver (C callee-saved)
|
| + // Used later by LeaveExitFrame()
|
| + // sh4_r9: pointer to builtin function (C callee-saved)
|
| + // sh4_r10: pointer to the first argument (C callee-saved)
|
| + // TODO(stm): use of r10 is dangerous (ip)
|
| + // sh4: moved callee-saved to stack localtion (see ::Generate())
|
| Isolate* isolate = masm->isolate();
|
| + ASSERT(!r0.is(sh4_rtmp));
|
| + ASSERT(!r0.is(sh4_ip));
|
| + // TODO(STM): fix this merge
|
| + // ASSERT(!sh4_r8.is(sh4_rtmp) && !sh4_r9.is(sh4_rtmp) &&
|
| + // !sh4_r10.is(sh4_rtmp));
|
|
|
| if (do_gc) {
|
| // Passing r0.
|
| __ PrepareCallCFunction(1, 0, r1);
|
| + __ mov(r4, r0);
|
| __ CallCFunction(ExternalReference::perform_gc_function(isolate),
|
| 1, 0);
|
| }
|
| @@ -3823,12 +3706,15 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
|
| }
|
|
|
| // Call C built-in.
|
| - // r0 = argc, r1 = argv
|
| - __ mov(r0, Operand(r4));
|
| - __ mov(r1, Operand(r6));
|
| -
|
| -#if defined(V8_HOST_ARCH_ARM)
|
| - int frame_alignment = MacroAssembler::ActivationFrameAlignment();
|
| + // r4 = argc, r5 = argv, r6 = isolate
|
| + // __ mov(r4, sh4_r8);
|
| + // __ mov(r5, sh4_r10);
|
| + // sh4: ref to ::Generate that stored into the stack
|
| + __ ldr(r4, MemOperand(sp, (1+0)*kPointerSize));
|
| + __ ldr(r5, MemOperand(sp, (1+2)*kPointerSize));
|
| +
|
| +#if defined(V8_HOST_ARCH_SH4)
|
| + int frame_alignment = OS::ActivationFrameAlignment();
|
| int frame_alignment_mask = frame_alignment - 1;
|
| if (FLAG_debug_code) {
|
| if (frame_alignment > kPointerSize) {
|
| @@ -3843,7 +3729,10 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
|
| }
|
| #endif
|
|
|
| - __ mov(r2, Operand(ExternalReference::isolate_address()));
|
| + __ mov(r6, Operand(ExternalReference::isolate_address()));
|
| +
|
| + // sh4: ref to ::Generate() that stored the builtin into the stack
|
| + __ ldr(r2, MemOperand(sp, (1+1)*kPointerSize));
|
|
|
| // To let the GC traverse the return address of the exit frames, we need to
|
| // know where the return address is. The CEntryStub is unmovable, so
|
| @@ -3852,13 +3741,21 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
|
| // Compute the return address in lr to return to after the jump below. Pc is
|
| // already at '+ 8' from the current instruction but return is after three
|
| // instructions so add another 4 to pc to get the return address.
|
| - {
|
| - // Prevent literal pool emission before return address.
|
| - Assembler::BlockConstPoolScope block_const_pool(masm);
|
| - masm->add(lr, pc, Operand(4));
|
| - __ str(lr, MemOperand(sp, 0));
|
| - masm->Jump(r5);
|
| - }
|
| +
|
| + // Compute the return address in pr to return to after the jsr below.
|
| + // We use the addpc operation for this with an offset of 6.
|
| + // We add 3 * kInstrSize to the pc after the addpc for the size of
|
| + // the sequence: [str, jsr, nop(delay slot)].
|
| + __ addpc(r3, 3 * Assembler::kInstrSize, pr);
|
| +#ifdef DEBUG
|
| + int old_pc = masm->pc_offset();
|
| +#endif
|
| + __ str(r3, MemOperand(sp, 0));
|
| + __ jsr(r2);
|
| + // __ jsr(sh4_r9);
|
| +#ifdef DEBUG
|
| + ASSERT(masm->pc_offset() - old_pc == 3 * Assembler::kInstrSize);
|
| +#endif
|
|
|
| if (always_allocate) {
|
| // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
|
| @@ -3881,16 +3778,19 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
|
| // r0:r1: result
|
| // sp: stack pointer
|
| // fp: frame pointer
|
| - // Callee-saved register r4 still holds argc.
|
| - __ LeaveExitFrame(save_doubles_, r4);
|
| - __ mov(pc, lr);
|
| + // Callee-saved register sh4_r8 still holds argc.
|
| + // sh4: stored on stack into ::Generate()
|
| + __ ldr(r2, MemOperand(sp, (1+0)*kPointerSize));
|
| + __ LeaveExitFrame(save_doubles_, r2);
|
| + // __ LeaveExitFrame(save_doubles_, sh4_r8);
|
| + __ rts();
|
|
|
| // check if we should retry or throw exception
|
| Label retry;
|
| __ bind(&failure_returned);
|
| STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
|
| __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
|
| - __ b(eq, &retry);
|
| + __ b(eq, &retry, Label::kNear);
|
|
|
| // Special handling of out of memory exceptions.
|
| Failure* out_of_memory = Failure::OutOfMemoryException();
|
| @@ -3906,7 +3806,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
|
|
|
| // Special handling of termination exceptions which are uncatchable
|
| // by javascript code.
|
| - __ cmp(r0, Operand(isolate->factory()->termination_exception()));
|
| + __ mov(r3, Operand(isolate->factory()->termination_exception()));
|
| + __ cmpeq(r0, r3);
|
| __ b(eq, throw_termination_exception);
|
|
|
| // Handle normal exception.
|
| @@ -3924,6 +3825,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
| // sp: stack pointer (restored as callee's sp after C call)
|
| // cp: current context (C callee-saved)
|
|
|
| + // sh4: clobbers r3
|
| // Result returned in r0 or r0+r1 by default.
|
|
|
| // NOTE: Invocations of builtins may return failure objects
|
| @@ -3932,20 +3834,33 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
| // builtin once.
|
|
|
| // Compute the argv pointer in a callee-saved register.
|
| - __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
|
| - __ sub(r6, r6, Operand(kPointerSize));
|
| + // sh4: will be saved on stack
|
| + // __ lsl(sh4_r10, r0, Operand(kPointerSizeLog2));
|
| + // __ add(sh4_r10, sp, sh4_r10);
|
| + // __ sub(sh4_r10, sh4_r10, Operand(kPointerSize));
|
| + __ lsl(r3, r0, Operand(kPointerSizeLog2));
|
| + __ add(r3, sp, r3);
|
| + __ sub(r3, r3, Operand(kPointerSize));
|
|
|
| // Enter the exit frame that transitions from JavaScript to C++.
|
| FrameScope scope(masm, StackFrame::MANUAL);
|
| - __ EnterExitFrame(save_doubles_);
|
| -
|
| - // Set up argc and the builtin function in callee-saved registers.
|
| - __ mov(r4, Operand(r0));
|
| - __ mov(r5, Operand(r1));
|
| -
|
| - // r4: number of arguments (C callee-saved)
|
| - // r5: pointer to builtin function (C callee-saved)
|
| - // r6: pointer to first argument (C callee-saved)
|
| + // SH4: Reserve space for 3 stack locations
|
| + __ EnterExitFrame(save_doubles_, 3);
|
| +
|
| + // Setup argc and the builtin function in callee-saved registers. sh4: save
|
| + // on stack instead of keep in callee-saved sh4: sp contains: sp[0] == lr;
|
| + // sp[1] == argc; sp[2] == builtin; sp[3] = argv
|
| + // __ mov(sh4_r8, r0);
|
| + // __ mov(sh4_r9, r1);
|
| + __ str(r0, MemOperand(sp, (1+0)*kPointerSize)); // skip lr location at sp[1]
|
| + __ str(r1, MemOperand(sp, (1+1)*kPointerSize));
|
| + __ str(r3, MemOperand(sp, (1+2)*kPointerSize));
|
| +
|
| + // sh4_r8: number of arguments (C callee-saved)
|
| + // sh4_r9: pointer to builtin function (C callee-saved)
|
| + // sh4_r10: pointer to first argument (C callee-saved)
|
| + // ASSERT(!sh4_r8.is(sh4_rtmp) && !sh4_r9.is(sh4_rtmp) &&
|
| + // !sh4_r10.is(sh4_rtmp));
|
|
|
| Label throw_normal_exception;
|
| Label throw_termination_exception;
|
| @@ -4003,38 +3918,35 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
|
|
|
|
| void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
| - // r0: code entry
|
| - // r1: function
|
| - // r2: receiver
|
| - // r3: argc
|
| + // r4: code entry
|
| + // r5: function
|
| + // r6: receiver
|
| + // r7: argc
|
| // [sp+0]: argv
|
|
|
| Label invoke, handler_entry, exit;
|
|
|
| - // Called from C, so do not pop argc and args on exit (preserve sp)
|
| - // No need to save register-passed args
|
| - // Save callee-saved registers (incl. cp and fp), sp, and lr
|
| - __ stm(db_w, sp, kCalleeSaved | lr.bit());
|
| + // Save callee-saved registers
|
| + __ push(pr);
|
| + __ pushm(kCalleeSaved);
|
|
|
| - if (CpuFeatures::IsSupported(VFP2)) {
|
| - CpuFeatures::Scope scope(VFP2);
|
| - // Save callee-saved vfp registers.
|
| - __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
|
| - // Set up the reserved register for 0.0.
|
| - __ vmov(kDoubleRegZero, 0.0);
|
| - }
|
| + // We don't need to save the callee saved double registers: we only use the
|
| + // caller saved ones.
|
| +
|
| + // Move the registers to use ARM ABI (and JS ABI)
|
| + __ mov(r0, r4);
|
| + __ mov(r1, r5);
|
| + __ mov(r2, r6);
|
| + __ mov(r3, r7);
|
|
|
| - // Get address of argv, see stm above.
|
| + // Get address of argv
|
| // r0: code entry
|
| // r1: function
|
| // r2: receiver
|
| // r3: argc
|
|
|
| - // Set up argv in r4.
|
| + // Setup argv in r4.
|
| int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
|
| - if (CpuFeatures::IsSupported(VFP2)) {
|
| - offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
|
| - }
|
| __ ldr(r4, MemOperand(sp, offset_to_argv));
|
|
|
| // Push a frame with special values setup to mark it as an entry frame.
|
| @@ -4044,16 +3956,21 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
| // r3: argc
|
| // r4: argv
|
| Isolate* isolate = masm->isolate();
|
| - __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
|
| + // Push a bad frame pointer to fail if it is used.
|
| + __ mov(ip, Operand(-1));
|
| + __ push(ip);
|
| +
|
| int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
|
| __ mov(r7, Operand(Smi::FromInt(marker)));
|
| __ mov(r6, Operand(Smi::FromInt(marker)));
|
| __ mov(r5,
|
| Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
|
| __ ldr(r5, MemOperand(r5));
|
| - __ Push(r8, r7, r6, r5);
|
| + __ push(r7);
|
| + __ push(r6);
|
| + __ push(r5);
|
|
|
| - // Set up frame pointer for the frame to be pushed.
|
| + // Setup frame pointer for the frame to be pushed.
|
| __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
|
|
|
| // If this is the outermost JS call, set js_entry_sp value.
|
| @@ -4061,12 +3978,12 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
| ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
|
| __ mov(r5, Operand(ExternalReference(js_entry_sp)));
|
| __ ldr(r6, MemOperand(r5));
|
| - __ cmp(r6, Operand::Zero());
|
| - __ b(ne, &non_outermost_js);
|
| + __ cmp(r6, Operand(0));
|
| + __ b(ne, &non_outermost_js, Label::kNear);
|
| __ str(fp, MemOperand(r5));
|
| __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
|
| Label cont;
|
| - __ b(&cont);
|
| + __ b_near(&cont);
|
| __ bind(&non_outermost_js);
|
| __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
|
| __ bind(&cont);
|
| @@ -4080,7 +3997,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
| // entry. This avoids making the assumption that literal pools are always
|
| // emitted after an instruction is emitted, rather than before.
|
| {
|
| - Assembler::BlockConstPoolScope block_const_pool(masm);
|
| + // TODO(STM): block constant pool
|
| __ bind(&handler_entry);
|
| handler_offset_ = handler_entry.pos();
|
| // Caught exception: Store result (exception) in the pending exception
|
| @@ -4100,7 +4017,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
| // Must preserve r0-r4, r5-r7 are available.
|
| __ PushTryHandler(StackHandler::JS_ENTRY, 0);
|
| // If an exception not caught by another handler occurs, this handler
|
| - // returns control to the code after the bl(&invoke) above, which
|
| + // returns control to the code after the jmp(&invoke) above, which
|
| // restores all kCalleeSaved registers (including cp and fp) to their
|
| // saved values before returning a failure to C.
|
|
|
| @@ -4130,15 +4047,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
| }
|
| __ ldr(ip, MemOperand(ip)); // deref address
|
|
|
| - // Branch and link to JSEntryTrampoline. We don't use the double underscore
|
| - // macro for the add instruction because we don't want the coverage tool
|
| - // inserting instructions here after we read the pc. We block literal pool
|
| - // emission for the same reason.
|
| - {
|
| - Assembler::BlockConstPoolScope block_const_pool(masm);
|
| - __ mov(lr, Operand(pc));
|
| - masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| - }
|
| + // JSEntryTrampoline
|
| + __ add(ip, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| + __ jsr(ip);
|
|
|
| // Unlink this frame from the handler chain.
|
| __ PopTryHandler();
|
| @@ -4148,8 +4059,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
| Label non_outermost_js_2;
|
| __ pop(r5);
|
| __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
|
| - __ b(ne, &non_outermost_js_2);
|
| - __ mov(r6, Operand::Zero());
|
| + __ b(ne, &non_outermost_js_2, Label::kNear);
|
| + __ mov(r6, Operand(0));
|
| __ mov(r5, Operand(ExternalReference(js_entry_sp)));
|
| __ str(r6, MemOperand(r5));
|
| __ bind(&non_outermost_js_2);
|
| @@ -4164,19 +4075,10 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
| __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
|
|
|
| // Restore callee-saved registers and return.
|
| -#ifdef DEBUG
|
| - if (FLAG_debug_code) {
|
| - __ mov(lr, Operand(pc));
|
| - }
|
| -#endif
|
| -
|
| - if (CpuFeatures::IsSupported(VFP2)) {
|
| - CpuFeatures::Scope scope(VFP2);
|
| - // Restore callee-saved vfp registers.
|
| - __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
|
| - }
|
| + __ popm(kCalleeSaved);
|
| + __ pop(pr);
|
|
|
| - __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
|
| + __ rts();
|
| }
|
|
|
|
|
| @@ -4267,8 +4169,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
| // Loop through the prototype chain looking for the function prototype.
|
| __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
|
| __ bind(&loop);
|
| - __ cmp(scratch, Operand(prototype));
|
| - __ b(eq, &is_instance);
|
| + __ cmp(scratch, prototype);
|
| + __ b(eq, &is_instance, Label::kNear);
|
| __ cmp(scratch, scratch2);
|
| __ b(eq, &is_not_instance);
|
| __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
|
| @@ -4316,12 +4218,12 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
| // Before null, smi and string value checks, check that the rhs is a function
|
| // as for a non-function rhs an exception needs to be thrown.
|
| __ JumpIfSmi(function, &slow);
|
| - __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
|
| + __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE, eq);
|
| __ b(ne, &slow);
|
|
|
| // Null is not instance of anything.
|
| __ cmp(scratch, Operand(masm->isolate()->factory()->null_value()));
|
| - __ b(ne, &object_not_null);
|
| + __ b(ne, &object_not_null, Label::kNear);
|
| __ mov(r0, Operand(Smi::FromInt(1)));
|
| __ Ret(HasArgsInRegisters() ? 0 : 2);
|
|
|
| @@ -4350,9 +4252,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
| __ Push(r0, r1);
|
| __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
|
| }
|
| - __ cmp(r0, Operand::Zero());
|
| - __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
|
| - __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
|
| + Label ltrue, lfalse;
|
| + __ cmp(r0, Operand(0));
|
| + __ bf_near(&lfalse);
|
| + __ LoadRoot(r0, Heap::kTrueValueRootIndex);
|
| + __ jmp_near(<rue);
|
| + __ bind(&lfalse);
|
| + __ LoadRoot(r0, Heap::kFalseValueRootIndex);
|
| + __ bind(<rue);
|
| __ Ret(HasArgsInRegisters() ? 0 : 2);
|
| }
|
| }
|
| @@ -4372,40 +4279,42 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
|
|
|
| // Check that the key is a smi.
|
| Label slow;
|
| - __ JumpIfNotSmi(r1, &slow);
|
| + __ JumpIfNotSmi(r1, &slow, Label::kNear);
|
|
|
| // Check if the calling frame is an arguments adaptor frame.
|
| Label adaptor;
|
| __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
|
| __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
| - __ b(eq, &adaptor);
|
| + __ b(eq, &adaptor, Label::kNear);
|
|
|
| // Check index against formal parameters count limit passed in
|
| // through register r0. Use unsigned comparison to get negative
|
| // check for free.
|
| - __ cmp(r1, r0);
|
| - __ b(hs, &slow);
|
| + __ cmphs(r1, r0);
|
| + __ bt_near(&slow);
|
|
|
| // Read the argument from the stack and return it.
|
| __ sub(r3, r0, r1);
|
| - __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
|
| + __ lsl(r0, r3, Operand(kPointerSizeLog2 - kSmiTagSize));
|
| + __ add(r3, fp, r0);
|
| __ ldr(r0, MemOperand(r3, kDisplacement));
|
| - __ Jump(lr);
|
| + __ rts();
|
|
|
| // Arguments adaptor case: Check index against actual arguments
|
| // limit found in the arguments adaptor frame. Use unsigned
|
| // comparison to get negative check for free.
|
| __ bind(&adaptor);
|
| __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
| - __ cmp(r1, r0);
|
| - __ b(cs, &slow);
|
| + __ cmphs(r1, r0);
|
| + __ bt_near(&slow);
|
|
|
| // Read the argument from the adaptor frame and return it.
|
| __ sub(r3, r0, r1);
|
| - __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
|
| + __ lsl(r0, r3, Operand(kPointerSizeLog2 - kSmiTagSize));
|
| + __ add(r3, r2, r0);
|
| __ ldr(r0, MemOperand(r3, kDisplacement));
|
| - __ Jump(lr);
|
| + __ rts();
|
|
|
| // Slow-case: Handle non-smi or out-of-bounds access to arguments
|
| // by calling the runtime system.
|
| @@ -4425,12 +4334,13 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
|
| __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
|
| __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
| - __ b(ne, &runtime);
|
| + __ b(ne, &runtime, Label::kNear);
|
|
|
| // Patch the arguments.length and the parameters pointer in the current frame.
|
| __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
| __ str(r2, MemOperand(sp, 0 * kPointerSize));
|
| - __ add(r3, r3, Operand(r2, LSL, 1));
|
| + __ lsl(ip, r2, Operand(1));
|
| + __ add(r3, r3, ip);
|
| __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
|
| __ str(r3, MemOperand(sp, 1 * kPointerSize));
|
|
|
| @@ -4457,24 +4367,25 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
|
| __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
|
| __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
| - __ b(eq, &adaptor_frame);
|
| + __ b(eq, &adaptor_frame, Label::kNear);
|
|
|
| // No adaptor, parameter count = argument count.
|
| __ mov(r2, r1);
|
| - __ b(&try_allocate);
|
| + __ b_near(&try_allocate);
|
|
|
| // We have an adaptor frame. Patch the parameters pointer.
|
| __ bind(&adaptor_frame);
|
| __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
| - __ add(r3, r3, Operand(r2, LSL, 1));
|
| + __ lsl(ip, r2, Operand(1));
|
| + __ add(r3, r3, ip);
|
| __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
|
| __ str(r3, MemOperand(sp, 1 * kPointerSize));
|
|
|
| // r1 = parameter count (tagged)
|
| // r2 = argument count (tagged)
|
| // Compute the mapped parameter count = min(r1, r2) in r1.
|
| - __ cmp(r1, Operand(r2));
|
| - __ mov(r1, Operand(r2), LeaveCC, gt);
|
| + __ cmpgt(r1, r2);
|
| + __ mov(r1, r2, t);
|
|
|
| __ bind(&try_allocate);
|
|
|
| @@ -4483,13 +4394,17 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
|
| const int kParameterMapHeaderSize =
|
| FixedArray::kHeaderSize + 2 * kPointerSize;
|
| // If there are no mapped parameters, we do not need the parameter_map.
|
| - __ cmp(r1, Operand(Smi::FromInt(0)));
|
| - __ mov(r9, Operand::Zero(), LeaveCC, eq);
|
| - __ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne);
|
| - __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
|
| + __ cmpeq(r1, Operand(Smi::FromInt(0)));
|
| + __ mov(r9, Operand(0), t);
|
| + Label skip;
|
| + __ bt_near(&skip);
|
| + __ lsl(r9, r1, Operand(1));
|
| + __ add(r9, r9, Operand(kParameterMapHeaderSize));
|
| + __ bind(&skip);
|
|
|
| // 2. Backing store.
|
| - __ add(r9, r9, Operand(r2, LSL, 1));
|
| + __ lsl(ip, r2, Operand(1));
|
| + __ add(r9, r9, ip);
|
| __ add(r9, r9, Operand(FixedArray::kHeaderSize));
|
|
|
| // 3. Arguments object.
|
| @@ -4506,11 +4421,16 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
|
| const int kAliasedOffset =
|
| Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
|
|
|
| - __ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
|
| + __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
|
| __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
|
| - __ cmp(r1, Operand::Zero());
|
| - __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
|
| - __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
|
| + __ cmp(r1, Operand(0));
|
| + Label lf, end;
|
| + __ bf_near(&lf);
|
| + __ ldr(r4, MemOperand(r4, kNormalOffset));
|
| + __ b_near(&end);
|
| + __ bind(&lf);
|
| + __ ldr(r4, MemOperand(r4, kAliasedOffset));
|
| + __ bind(&end);
|
|
|
| // r0 = address of new object (tagged)
|
| // r1 = mapped parameter count (tagged)
|
| @@ -4550,15 +4470,16 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
|
| __ cmp(r1, Operand(Smi::FromInt(0)));
|
| // Move backing store address to r3, because it is
|
| // expected there when filling in the unmapped arguments.
|
| - __ mov(r3, r4, LeaveCC, eq);
|
| + __ mov(r3, r4, eq);
|
| __ b(eq, &skip_parameter_map);
|
|
|
| __ LoadRoot(r6, Heap::kNonStrictArgumentsElementsMapRootIndex);
|
| __ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset));
|
| __ add(r6, r1, Operand(Smi::FromInt(2)));
|
| __ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
|
| - __ str(r8, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
|
| - __ add(r6, r4, Operand(r1, LSL, 1));
|
| + __ str(cp, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
|
| + __ lsl(r6, r1, Operand(1));
|
| + __ add(r6, r4, r6);
|
| __ add(r6, r6, Operand(kParameterMapHeaderSize));
|
| __ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
|
|
|
| @@ -4574,9 +4495,10 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
|
| __ mov(r6, r1);
|
| __ ldr(r9, MemOperand(sp, 0 * kPointerSize));
|
| __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
|
| - __ sub(r9, r9, Operand(r1));
|
| + __ sub(r9, r9, r1);
|
| __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
|
| - __ add(r3, r4, Operand(r6, LSL, 1));
|
| + __ lsl(r3, r6, Operand(1));
|
| + __ add(r3, r4, r3);
|
| __ add(r3, r3, Operand(kParameterMapHeaderSize));
|
|
|
| // r6 = loop variable (tagged)
|
| @@ -4585,11 +4507,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
|
| // r4 = address of parameter map (tagged)
|
| // r5 = temporary scratch (a.o., for address calculation)
|
| // r7 = the hole value
|
| - __ jmp(¶meters_test);
|
| + __ jmp_near(¶meters_test);
|
|
|
| __ bind(¶meters_loop);
|
| __ sub(r6, r6, Operand(Smi::FromInt(1)));
|
| - __ mov(r5, Operand(r6, LSL, 1));
|
| + __ lsl(r5, r6, Operand(1));
|
| __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
|
| __ str(r9, MemOperand(r4, r5));
|
| __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
|
| @@ -4611,19 +4533,21 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
|
| Label arguments_loop, arguments_test;
|
| __ mov(r9, r1);
|
| __ ldr(r4, MemOperand(sp, 1 * kPointerSize));
|
| - __ sub(r4, r4, Operand(r9, LSL, 1));
|
| - __ jmp(&arguments_test);
|
| + __ lsl(r5, r9, Operand(1));
|
| + __ sub(r4, r4, r5);
|
| + __ jmp_near(&arguments_test);
|
|
|
| __ bind(&arguments_loop);
|
| __ sub(r4, r4, Operand(kPointerSize));
|
| __ ldr(r6, MemOperand(r4, 0));
|
| - __ add(r5, r3, Operand(r9, LSL, 1));
|
| + __ lsl(r5, r9, Operand(1));
|
| + __ add(r5, r3, r5);
|
| __ str(r6, FieldMemOperand(r5, FixedArray::kHeaderSize));
|
| __ add(r9, r9, Operand(Smi::FromInt(1)));
|
|
|
| __ bind(&arguments_test);
|
| - __ cmp(r9, Operand(r2));
|
| - __ b(lt, &arguments_loop);
|
| + __ cmpge(r9, r2);
|
| + __ bf(&arguments_loop);
|
|
|
| // Return and remove the on-stack parameters.
|
| __ add(sp, sp, Operand(3 * kPointerSize));
|
| @@ -4646,17 +4570,18 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
|
| __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
|
| __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
| - __ b(eq, &adaptor_frame);
|
| + __ b(eq, &adaptor_frame, Label::kNear);
|
|
|
| // Get the length from the frame.
|
| __ ldr(r1, MemOperand(sp, 0));
|
| - __ b(&try_allocate);
|
| + __ b_near(&try_allocate);
|
|
|
| // Patch the arguments.length and the parameters pointer.
|
| __ bind(&adaptor_frame);
|
| __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
| __ str(r1, MemOperand(sp, 0));
|
| - __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
|
| + __ lsl(r3, r1, Operand(kPointerSizeLog2 - kSmiTagSize));
|
| + __ add(r3, r2, r3);
|
| __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
|
| __ str(r3, MemOperand(sp, 1 * kPointerSize));
|
|
|
| @@ -4665,17 +4590,17 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
|
| Label add_arguments_object;
|
| __ bind(&try_allocate);
|
| __ cmp(r1, Operand(0, RelocInfo::NONE));
|
| - __ b(eq, &add_arguments_object);
|
| - __ mov(r1, Operand(r1, LSR, kSmiTagSize));
|
| + __ b(eq, &add_arguments_object, Label::kNear);
|
| + __ lsr(r1, r1, Operand(kSmiTagSize));
|
| __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
|
| __ bind(&add_arguments_object);
|
| __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
|
|
|
| // Do the allocation of both objects in one go.
|
| - __ AllocateInNewSpace(r1,
|
| - r0,
|
| - r2,
|
| - r3,
|
| + __ AllocateInNewSpace(r1, // object size
|
| + r0, // result
|
| + r2, // scratch1
|
| + r3, // scratch2
|
| &runtime,
|
| static_cast<AllocationFlags>(TAG_OBJECT |
|
| SIZE_IN_WORDS));
|
| @@ -4698,12 +4623,12 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
|
| // If there are no actual arguments, we're done.
|
| Label done;
|
| __ cmp(r1, Operand(0, RelocInfo::NONE));
|
| - __ b(eq, &done);
|
| + __ b(eq, &done, Label::kNear);
|
|
|
| // Get the parameters pointer from the stack.
|
| __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
|
|
|
| - // Set up the elements pointer in the allocated arguments object and
|
| + // Setup the elements pointer in the allocated arguments object and
|
| // initialize the header in the elements fixed array.
|
| __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
|
| __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
|
| @@ -4711,20 +4636,20 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
|
| __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
|
| __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
|
| // Untag the length for the loop.
|
| - __ mov(r1, Operand(r1, LSR, kSmiTagSize));
|
| + __ lsr(r1, r1, Operand(kSmiTagSize));
|
|
|
| // Copy the fixed array slots.
|
| Label loop;
|
| - // Set up r4 to point to the first array slot.
|
| + // Setup r4 to point to the first array slot.
|
| __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
| __ bind(&loop);
|
| // Pre-decrement r2 with kPointerSize on each iteration.
|
| // Pre-decrement in order to skip receiver.
|
| - __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
|
| + __ sub(r2, r2, Operand(kPointerSize));
|
| + __ ldr(r3, MemOperand(r2));
|
| // Post-increment r4 with kPointerSize on each iteration.
|
| __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
|
| - __ sub(r1, r1, Operand(1));
|
| - __ cmp(r1, Operand(0, RelocInfo::NONE));
|
| + __ dt(r1);
|
| __ b(ne, &loop);
|
|
|
| // Return and remove the on-stack parameters.
|
| @@ -4783,7 +4708,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
|
| STATIC_ASSERT(kSmiTag == 0);
|
| __ JumpIfSmi(r0, &runtime);
|
| - __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
|
| + __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE, eq);
|
| __ b(ne, &runtime);
|
|
|
| // Check that the RegExp has been compiled (data contains a fixed array).
|
| @@ -4791,7 +4716,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| if (FLAG_debug_code) {
|
| __ tst(regexp_data, Operand(kSmiTagMask));
|
| __ Check(ne, "Unexpected type for RegExp data, FixedArray expected");
|
| - __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
|
| + __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE, eq);
|
| __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
|
| }
|
|
|
| @@ -4811,8 +4736,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
|
| __ add(r2, r2, Operand(2)); // r2 was a smi.
|
| // Check that the static offsets vector buffer is large enough.
|
| - __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize));
|
| - __ b(hi, &runtime);
|
| + __ cmphi(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize));
|
| + __ bt(&runtime);
|
|
|
| // r2: Number of capture registers
|
| // regexp_data: RegExp data (FixedArray)
|
| @@ -4832,8 +4757,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| // string length. A negative value will be greater (unsigned comparison).
|
| __ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
|
| __ JumpIfNotSmi(r0, &runtime);
|
| - __ cmp(r3, Operand(r0));
|
| - __ b(ls, &runtime);
|
| + __ cmphi(r3, r0);
|
| + __ bf(&runtime);
|
|
|
| // r2: Number of capture registers
|
| // subject: Subject string
|
| @@ -4841,7 +4766,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| // Check that the fourth object is a JSArray object.
|
| __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
|
| __ JumpIfSmi(r0, &runtime);
|
| - __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
|
| + __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE, eq);
|
| __ b(ne, &runtime);
|
| // Check that the JSArray is in fast case.
|
| __ ldr(last_match_info_elements,
|
| @@ -4854,8 +4779,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| __ ldr(r0,
|
| FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
|
| __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
|
| - __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
|
| - __ b(gt, &runtime);
|
| + __ asr(ip, r0, Operand(kSmiTagSize));
|
| + __ cmpgt(r2, ip);
|
| + __ bt(&runtime);
|
|
|
| // Reset offset for possibly sliced string.
|
| __ mov(r9, Operand(0));
|
| @@ -4867,18 +4793,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
|
| // First check for flat string. None of the following string type tests will
|
| // succeed if subject is not a string or a short external string.
|
| - __ and_(r1,
|
| + __ land(r1,
|
| r0,
|
| Operand(kIsNotStringMask |
|
| kStringRepresentationMask |
|
| - kShortExternalStringMask),
|
| - SetCC);
|
| + kShortExternalStringMask));
|
| + __ tst(r1, r1);
|
| STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
|
| __ b(eq, &seq_string);
|
|
|
| // subject: Subject string
|
| // regexp_data: RegExp data (FixedArray)
|
| - // r1: whether subject is a string and if yes, its string representation
|
| // Check for flat cons string or sliced string.
|
| // A flat cons string is a cons string where the second part is the empty
|
| // string. In that case the subject string is just the first part of the cons
|
| @@ -4890,9 +4815,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
|
| STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
|
| STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
|
| - __ cmp(r1, Operand(kExternalStringTag));
|
| - __ b(lt, &cons_string);
|
| - __ b(eq, &external_string);
|
| + __ cmpge(r1, Operand(kExternalStringTag));
|
| + __ bf(&cons_string);
|
| + __ cmpeq(r1, Operand(kExternalStringTag));
|
| + __ bt(&external_string);
|
|
|
| // Catch non-string subject or short external string.
|
| STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
|
| @@ -4901,7 +4827,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
|
|
| // String is sliced.
|
| __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
|
| - __ mov(r9, Operand(r9, ASR, kSmiTagSize));
|
| + __ asr(r9, r9, Operand(kSmiTagSize));
|
| __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
|
| // r9: offset of sliced string, smi-tagged.
|
| __ jmp(&check_encoding);
|
| @@ -4926,10 +4852,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| STATIC_ASSERT(4 == kAsciiStringTag);
|
| STATIC_ASSERT(kTwoByteStringTag == 0);
|
| // Find the code object based on the assumptions above.
|
| - __ and_(r0, r0, Operand(kStringEncodingMask));
|
| - __ mov(r3, Operand(r0, ASR, 2), SetCC);
|
| - __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
|
| - __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
|
| + __ land(r0, r0, Operand(kStringEncodingMask));
|
| + __ asr(r3, r0, Operand(2));
|
| + __ tst(r3, r3);
|
| + Label skip_true, skip_end;
|
| + __ bt_near(&skip_true);
|
| + __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
|
| + __ b_near(&skip_end);
|
| + __ bind(&skip_true);
|
| + __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
|
| + __ bind(&skip_end);
|
|
|
| // Check that the irregexp code has been generated for the actual string
|
| // encoding. If it has, the field contains a code object otherwise it contains
|
| @@ -4943,7 +4875,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| // Load used arguments before starting to push arguments for call to native
|
| // RegExp code to avoid handling changing stack height.
|
| __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
|
| - __ mov(r1, Operand(r1, ASR, kSmiTagSize));
|
| + __ asr(r1, r1, Operand(kSmiTagSize));
|
|
|
| // r1: previous index
|
| // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
|
| @@ -4953,6 +4885,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| // All checks done. Now push arguments for native regexp code.
|
| __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
|
|
|
| + // Save r5-r7 as they are going to be used afterward in the C code
|
| + // r4 is restored by a load on the right place in the same frame
|
| + __ Push(r5, r6, r7);
|
| +
|
| // Isolates: note we add an additional parameter here (isolate pointer).
|
| const int kRegExpExecuteArguments = 9;
|
| const int kParameterRegisters = 4;
|
| @@ -4974,7 +4910,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| __ ldr(r0, MemOperand(r0, 0));
|
| __ mov(r2, Operand(address_of_regexp_stack_memory_size));
|
| __ ldr(r2, MemOperand(r2, 0));
|
| - __ add(r0, r0, Operand(r2));
|
| + __ add(r0, r0, r2);
|
| __ str(r0, MemOperand(sp, 3 * kPointerSize));
|
|
|
| // Argument 6: Set the number of capture registers to zero to force global
|
| @@ -4989,23 +4925,29 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
|
|
| // For arguments 4 and 3 get string length, calculate start of string data and
|
| // calculate the shift of the index (0 for ASCII and 1 for two byte).
|
| - __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
|
| + __ add(sh4_r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
|
| __ eor(r3, r3, Operand(1));
|
| // Load the length from the original subject string from the previous stack
|
| // frame. Therefore we have to use fp, which points exactly to two pointer
|
| // sizes below the previous sp. (Because creating a new stack frame pushes
|
| - // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
|
| - __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
|
| + // the previous fp onto the stack and moves up sp by 2 * kPointerSize.) We
|
| + // also have to take into account the 3 registers pushed on the stack
|
| + // [r5, r7]
|
| + __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize +
|
| + 3 * kPointerSize));
|
| // If slice offset is not 0, load the length from the original sliced string.
|
| // Argument 4, r3: End of string data
|
| // Argument 3, r2: Start of string data
|
| // Prepare start and end index of the input.
|
| - __ add(r9, r8, Operand(r9, LSL, r3));
|
| - __ add(r2, r9, Operand(r1, LSL, r3));
|
| + __ lsl(r9, r9, r3);
|
| + __ add(r9, sh4_r8, r9);
|
| + __ lsl(ip, r1, r3);
|
| + __ add(r2, r9, ip);
|
|
|
| - __ ldr(r8, FieldMemOperand(subject, String::kLengthOffset));
|
| - __ mov(r8, Operand(r8, ASR, kSmiTagSize));
|
| - __ add(r3, r9, Operand(r8, LSL, r3));
|
| + __ ldr(sh4_r8, FieldMemOperand(subject, String::kLengthOffset));
|
| + __ asr(sh4_r8, sh4_r8, Operand(kSmiTagSize));
|
| + __ lsl(r3, sh4_r8, r3);
|
| + __ add(r3, r9, r3);
|
|
|
| // Argument 2 (r1): Previous index.
|
| // Already there
|
| @@ -5013,12 +4955,23 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| // Argument 1 (r0): Subject string.
|
| __ mov(r0, subject);
|
|
|
| + __ mov(r4, r0);
|
| + __ mov(r5, r1);
|
| + __ mov(r6, r2);
|
| +
|
| // Locate the code entry and call it.
|
| - __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| - DirectCEntryStub stub;
|
| - stub.GenerateCall(masm, r7);
|
| + __ add(r0, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| + __ mov(r7, r3);
|
| +
|
| + DirectCEntryStub stub(r2);
|
| + stub.GenerateCall(masm, r0, r3);
|
|
|
| + // Get back the subject from the previous frame: r4 will not be scratched by
|
| + // a call to LeaveExitFrame
|
| + __ ldr(r4, MemOperand(fp, kSubjectOffset + 2 * kPointerSize +
|
| + 3 * kPointerSize));
|
| __ LeaveExitFrame(false, no_reg);
|
| + __ Pop(r5, r6, r7);
|
|
|
| // r0: result
|
| // subject: subject string (callee saved)
|
| @@ -5080,7 +5033,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| // r1: number of capture registers
|
| // r4: subject string
|
| // Store the capture count.
|
| - __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi.
|
| + __ lsl(r2, r1, Operand(kSmiTagSize + kSmiShiftSize)); // To smi.
|
| __ str(r2, FieldMemOperand(last_match_info_elements,
|
| RegExpImpl::kLastCaptureCountOffset));
|
| // Store last subject and last input.
|
| @@ -5118,12 +5071,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| last_match_info_elements,
|
| Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
|
| __ bind(&next_capture);
|
| - __ sub(r1, r1, Operand(1), SetCC);
|
| - __ b(mi, &done);
|
| + __ sub(r1, r1, Operand(1));
|
| + __ cmpge(r1, Operand(0));
|
| + __ bf(&done);
|
| // Read the value from the static offsets vector buffer.
|
| __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
|
| // Store the smi value in the last match info.
|
| - __ mov(r3, Operand(r3, LSL, kSmiTagSize));
|
| + __ lsl(r3, r3, Operand(kSmiTagSize));
|
| __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
|
| __ jmp(&next_capture);
|
| __ bind(&done);
|
| @@ -5170,8 +5124,8 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
|
| STATIC_ASSERT(kSmiTag == 0);
|
| STATIC_ASSERT(kSmiTagSize == 1);
|
| __ JumpIfNotSmi(r1, &slowcase);
|
| - __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
|
| - __ b(hi, &slowcase);
|
| + __ cmphi(r1, Operand(Smi::FromInt(kMaxInlineLength)));
|
| + __ b(t, &slowcase);
|
| // Smi-tagging is equivalent to multiplying by 2.
|
| // Allocate RegExpResult followed by FixedArray with size in ebx.
|
| // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
|
| @@ -5180,7 +5134,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
|
| // FixedArray.
|
| int objects_size =
|
| (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
|
| - __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
|
| + __ lsr(r5, r1, Operand(kSmiTagSize + kSmiShiftSize));
|
| __ add(r2, r5, Operand(objects_size));
|
| __ AllocateInNewSpace(
|
| r2, // In: Size, in words.
|
| @@ -5223,7 +5177,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
|
| __ mov(r2, Operand(factory->fixed_array_map()));
|
| __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
|
| // Set FixedArray length.
|
| - __ mov(r6, Operand(r5, LSL, kSmiTagSize));
|
| + __ lsl(r6, r5, Operand(kSmiTagSize));
|
| __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
|
| // Fill contents of fixed-array with undefined.
|
| __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
|
| @@ -5234,11 +5188,13 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
|
| // r3: Start of elements in FixedArray.
|
| // r5: Number of elements to fill.
|
| Label loop;
|
| - __ cmp(r5, Operand(0));
|
| + __ cmpgt(r5, Operand(0));
|
| __ bind(&loop);
|
| - __ b(le, &done); // Jump if r5 is negative or zero.
|
| - __ sub(r5, r5, Operand(1), SetCC);
|
| - __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
|
| + __ b(f, &done, Label::kNear); // Jump if r1 is negative or zero.
|
| + __ sub(r5, r5, Operand(1));
|
| + __ lsl(ip, r5, Operand(kPointerSizeLog2));
|
| + __ str(r2, MemOperand(r3, ip));
|
| + __ cmpgt(r5, Operand(0));
|
| __ jmp(&loop);
|
|
|
| __ bind(&done);
|
| @@ -5275,14 +5231,18 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
|
|
|
| // A monomorphic miss (i.e, here the cache is not uninitialized) goes
|
| // megamorphic.
|
| + Label skip;
|
| __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
|
| // MegamorphicSentinel is an immortal immovable object (undefined) so no
|
| // write-barrier is needed.
|
| - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex, ne);
|
| - __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), ne);
|
| + __ bt_near(&skip);
|
| + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
|
| + __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
|
| + __ jmp(&done);
|
|
|
| // An uninitialized cache is patched with the function.
|
| - __ str(r1, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), eq);
|
| + __ bind(&skip);
|
| + __ str(r1, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
|
| // No need for a write barrier here - cells are rescanned.
|
|
|
| __ bind(&done);
|
| @@ -5304,7 +5264,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
|
| __ ldr(r4, MemOperand(sp, argc_ * kPointerSize));
|
| // Call as function is indicated with the hole.
|
| __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
|
| - __ b(ne, &call);
|
| + __ b(ne, &call, Label::kNear);
|
| // Patch the receiver on the stack with the global receiver object.
|
| __ ldr(r3,
|
| MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
|
| @@ -5317,7 +5277,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
|
| // r1: pushed function (to be verified)
|
| __ JumpIfSmi(r1, &non_function);
|
| // Get the map of the function object.
|
| - __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
|
| + __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE, eq);
|
| __ b(ne, &slow);
|
|
|
| if (RecordCallTarget()) {
|
| @@ -5331,7 +5291,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
|
| if (ReceiverMightBeImplicit()) {
|
| Label call_as_function;
|
| __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
|
| - __ b(eq, &call_as_function);
|
| + __ b(eq, &call_as_function, Label::kNear);
|
| __ InvokeFunction(r1,
|
| actual,
|
| JUMP_FUNCTION,
|
| @@ -5392,7 +5352,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
|
| // Check that the function is not a smi.
|
| __ JumpIfSmi(r1, &non_function_call);
|
| // Check that the function is a JSFunction.
|
| - __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
|
| + __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE, eq);
|
| __ b(ne, &slow);
|
|
|
| if (RecordCallTarget()) {
|
| @@ -5402,7 +5362,8 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
|
| // Jump to the function-specific construct stub.
|
| __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
|
| __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset));
|
| - __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| + __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| + __ jmp(r2);
|
|
|
| // r0: number of arguments
|
| // r1: called object
|
| @@ -5490,10 +5451,10 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
|
|
|
| // Check for index out of range.
|
| __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
|
| - __ cmp(ip, Operand(index_));
|
| - __ b(ls, index_out_of_range_);
|
| + __ cmphi(ip, index_);
|
| + __ bf(index_out_of_range_);
|
|
|
| - __ mov(index_, Operand(index_, ASR, kSmiTagSize));
|
| + __ asr(index_, index_, Operand(kSmiTagSize));
|
|
|
| StringCharLoadGenerator::Generate(masm,
|
| object_,
|
| @@ -5501,7 +5462,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
|
| result_,
|
| &call_runtime_);
|
|
|
| - __ mov(result_, Operand(result_, LSL, kSmiTagSize));
|
| + __ lsl(result_, result_, Operand(kSmiTagSize));
|
| __ bind(&exit_);
|
| }
|
|
|
| @@ -5531,7 +5492,8 @@ void StringCharCodeAtGenerator::GenerateSlow(
|
| }
|
| // Save the conversion result before the pop instructions below
|
| // have a chance to overwrite it.
|
| - __ Move(index_, r0);
|
| + __ mov(index_, r0);
|
| +
|
| __ pop(object_);
|
| // Reload the instance type.
|
| __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
|
| @@ -5547,10 +5509,10 @@ void StringCharCodeAtGenerator::GenerateSlow(
|
| // is too complex (e.g., when the string needs to be flattened).
|
| __ bind(&call_runtime_);
|
| call_helper.BeforeCall(masm);
|
| - __ mov(index_, Operand(index_, LSL, kSmiTagSize));
|
| + __ lsl(index_, index_, Operand(kSmiTagSize));
|
| __ Push(object_, index_);
|
| __ CallRuntime(Runtime::kStringCharCodeAt, 2);
|
| - __ Move(result_, r0);
|
| + __ mov(result_, r0);
|
| call_helper.AfterCall(masm);
|
| __ jmp(&exit_);
|
|
|
| @@ -5566,6 +5528,9 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
|
| STATIC_ASSERT(kSmiTag == 0);
|
| STATIC_ASSERT(kSmiShiftSize == 0);
|
| ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
|
| +
|
| + ASSERT(!code_.is(ip) && !result_.is(ip));
|
| +
|
| __ tst(code_,
|
| Operand(kSmiTagMask |
|
| ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
|
| @@ -5574,7 +5539,8 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
|
| __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
|
| // At this point code register contains smi tagged ASCII char code.
|
| STATIC_ASSERT(kSmiTag == 0);
|
| - __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
|
| + __ lsl(ip, code_, Operand(kPointerSizeLog2 - kSmiTagSize));
|
| + __ add(result_, result_, ip);
|
| __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
|
| __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
|
| __ b(eq, &slow_case_);
|
| @@ -5591,7 +5557,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
|
| call_helper.BeforeCall(masm);
|
| __ push(code_);
|
| __ CallRuntime(Runtime::kCharFromCode, 1);
|
| - __ Move(result_, r0);
|
| + __ mov(result_, r0);
|
| call_helper.AfterCall(masm);
|
| __ jmp(&exit_);
|
|
|
| @@ -5627,20 +5593,22 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
|
| // This loop just copies one character at a time, as it is only used for very
|
| // short strings.
|
| if (!ascii) {
|
| - __ add(count, count, Operand(count), SetCC);
|
| - } else {
|
| - __ cmp(count, Operand(0, RelocInfo::NONE));
|
| + __ add(count, count, count);
|
| }
|
| - __ b(eq, &done);
|
| + __ cmp(count, Operand(0));
|
| + __ b(eq, &done, Label::kNear);
|
|
|
| __ bind(&loop);
|
| - __ ldrb(scratch, MemOperand(src, 1, PostIndex));
|
| + __ ldrb(scratch, MemOperand(src));
|
| + __ add(src, src, Operand(1));
|
| // Perform sub between load and dependent store to get the load time to
|
| // complete.
|
| - __ sub(count, count, Operand(1), SetCC);
|
| - __ strb(scratch, MemOperand(dest, 1, PostIndex));
|
| + __ cmpgt(count, Operand(1));
|
| + __ sub(count, count, Operand(1));
|
| + __ strb(scratch, MemOperand(dest));
|
| + __ add(dest, dest, Operand(1));
|
| // last iteration.
|
| - __ b(gt, &loop);
|
| + __ bt(&loop);
|
|
|
| __ bind(&done);
|
| }
|
| @@ -5673,7 +5641,6 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
|
| }
|
|
|
| const int kReadAlignment = 4;
|
| - const int kReadAlignmentMask = kReadAlignment - 1;
|
| // Ensure that reading an entire aligned word containing the last character
|
| // of a string will not read outside the allocated area (because we pad up
|
| // to kObjectAlignment).
|
| @@ -5682,115 +5649,13 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
|
| // Nothing to do for zero characters.
|
| Label done;
|
| if (!ascii) {
|
| - __ add(count, count, Operand(count), SetCC);
|
| - } else {
|
| - __ cmp(count, Operand(0, RelocInfo::NONE));
|
| + __ add(count, count, count);
|
| }
|
| - __ b(eq, &done);
|
| -
|
| - // Assume that you cannot read (or write) unaligned.
|
| - Label byte_loop;
|
| - // Must copy at least eight bytes, otherwise just do it one byte at a time.
|
| - __ cmp(count, Operand(8));
|
| - __ add(count, dest, Operand(count));
|
| - Register limit = count; // Read until src equals this.
|
| - __ b(lt, &byte_loop);
|
| -
|
| - if (!dest_always_aligned) {
|
| - // Align dest by byte copying. Copies between zero and three bytes.
|
| - __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
|
| - Label dest_aligned;
|
| - __ b(eq, &dest_aligned);
|
| - __ cmp(scratch4, Operand(2));
|
| - __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
|
| - __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
|
| - __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
|
| - __ strb(scratch1, MemOperand(dest, 1, PostIndex));
|
| - __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
|
| - __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
|
| - __ bind(&dest_aligned);
|
| - }
|
| -
|
| - Label simple_loop;
|
| -
|
| - __ sub(scratch4, dest, Operand(src));
|
| - __ and_(scratch4, scratch4, Operand(0x03), SetCC);
|
| - __ b(eq, &simple_loop);
|
| - // Shift register is number of bits in a source word that
|
| - // must be combined with bits in the next source word in order
|
| - // to create a destination word.
|
| -
|
| - // Complex loop for src/dst that are not aligned the same way.
|
| - {
|
| - Label loop;
|
| - __ mov(scratch4, Operand(scratch4, LSL, 3));
|
| - Register left_shift = scratch4;
|
| - __ and_(src, src, Operand(~3)); // Round down to load previous word.
|
| - __ ldr(scratch1, MemOperand(src, 4, PostIndex));
|
| - // Store the "shift" most significant bits of scratch in the least
|
| - // signficant bits (i.e., shift down by (32-shift)).
|
| - __ rsb(scratch2, left_shift, Operand(32));
|
| - Register right_shift = scratch2;
|
| - __ mov(scratch1, Operand(scratch1, LSR, right_shift));
|
| -
|
| - __ bind(&loop);
|
| - __ ldr(scratch3, MemOperand(src, 4, PostIndex));
|
| - __ sub(scratch5, limit, Operand(dest));
|
| - __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
|
| - __ str(scratch1, MemOperand(dest, 4, PostIndex));
|
| - __ mov(scratch1, Operand(scratch3, LSR, right_shift));
|
| - // Loop if four or more bytes left to copy.
|
| - // Compare to eight, because we did the subtract before increasing dst.
|
| - __ sub(scratch5, scratch5, Operand(8), SetCC);
|
| - __ b(ge, &loop);
|
| - }
|
| - // There is now between zero and three bytes left to copy (negative that
|
| - // number is in scratch5), and between one and three bytes already read into
|
| - // scratch1 (eight times that number in scratch4). We may have read past
|
| - // the end of the string, but because objects are aligned, we have not read
|
| - // past the end of the object.
|
| - // Find the minimum of remaining characters to move and preloaded characters
|
| - // and write those as bytes.
|
| - __ add(scratch5, scratch5, Operand(4), SetCC);
|
| - __ b(eq, &done);
|
| - __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
|
| - // Move minimum of bytes read and bytes left to copy to scratch4.
|
| - __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
|
| - // Between one and three (value in scratch5) characters already read into
|
| - // scratch ready to write.
|
| - __ cmp(scratch5, Operand(2));
|
| - __ strb(scratch1, MemOperand(dest, 1, PostIndex));
|
| - __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
|
| - __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
|
| - __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
|
| - __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
|
| - // Copy any remaining bytes.
|
| - __ b(&byte_loop);
|
| -
|
| - // Simple loop.
|
| - // Copy words from src to dst, until less than four bytes left.
|
| - // Both src and dest are word aligned.
|
| - __ bind(&simple_loop);
|
| - {
|
| - Label loop;
|
| - __ bind(&loop);
|
| - __ ldr(scratch1, MemOperand(src, 4, PostIndex));
|
| - __ sub(scratch3, limit, Operand(dest));
|
| - __ str(scratch1, MemOperand(dest, 4, PostIndex));
|
| - // Compare to 8, not 4, because we do the substraction before increasing
|
| - // dest.
|
| - __ cmp(scratch3, Operand(8));
|
| - __ b(ge, &loop);
|
| - }
|
| -
|
| - // Copy bytes from src to dst until dst hits limit.
|
| - __ bind(&byte_loop);
|
| - __ cmp(dest, Operand(limit));
|
| - __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
|
| - __ b(ge, &done);
|
| - __ strb(scratch1, MemOperand(dest, 1, PostIndex));
|
| - __ b(&byte_loop);
|
| + __ cmpeq(count, Operand(0));
|
| + __ bt_near(&done);
|
|
|
| + // Use an optimized version for sh4
|
| + __ memcpy(dest, src, count, scratch1, scratch2, scratch3, scratch4);
|
| __ bind(&done);
|
| }
|
|
|
| @@ -5811,27 +5676,29 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
|
| // different hash algorithm. Don't try to look for these in the symbol table.
|
| Label not_array_index;
|
| __ sub(scratch, c1, Operand(static_cast<int>('0')));
|
| - __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
|
| - __ b(hi, ¬_array_index);
|
| + __ cmphi(scratch, Operand(static_cast<int>('9' - '0')));
|
| + __ bt_near(¬_array_index);
|
| __ sub(scratch, c2, Operand(static_cast<int>('0')));
|
| - __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
|
| + __ cmphi(scratch, Operand(static_cast<int>('9' - '0')));
|
|
|
| // If check failed combine both characters into single halfword.
|
| // This is required by the contract of the method: code at the
|
| // not_found branch expects this combination in c1 register
|
| - __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
|
| - __ b(ls, not_found);
|
| + __ lsl(scratch1, c2, Operand(kBitsPerByte));
|
| + __ lor(c1, c1, scratch1, f);
|
| + __ b(f, not_found);
|
|
|
| __ bind(¬_array_index);
|
| // Calculate the two character string hash.
|
| Register hash = scratch1;
|
| - StringHelper::GenerateHashInit(masm, hash, c1);
|
| - StringHelper::GenerateHashAddCharacter(masm, hash, c2);
|
| - StringHelper::GenerateHashGetHash(masm, hash);
|
| + StringHelper::GenerateHashInit(masm, hash, c1, scratch);
|
| + StringHelper::GenerateHashAddCharacter(masm, hash, c2, scratch);
|
| + StringHelper::GenerateHashGetHash(masm, hash, scratch);
|
|
|
| // Collect the two characters in a register.
|
| Register chars = c1;
|
| - __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
|
| + __ lsl(scratch, c2, Operand(kBitsPerByte));
|
| + __ lor(chars, chars, scratch);
|
|
|
| // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
|
| // hash: hash of two character string.
|
| @@ -5847,7 +5714,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
|
| // Calculate capacity mask from the symbol table capacity.
|
| Register mask = scratch2;
|
| __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
|
| - __ mov(mask, Operand(mask, ASR, 1));
|
| + __ asr(mask, mask, Operand(1));
|
| __ sub(mask, mask, Operand(1));
|
|
|
| // Calculate untagged address of the first element of the symbol table.
|
| @@ -5877,20 +5744,18 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
|
| __ mov(candidate, hash);
|
| }
|
|
|
| - __ and_(candidate, candidate, Operand(mask));
|
| + __ land(candidate, candidate, mask);
|
|
|
| // Load the entry from the symble table.
|
| STATIC_ASSERT(SymbolTable::kEntrySize == 1);
|
| - __ ldr(candidate,
|
| - MemOperand(first_symbol_table_element,
|
| - candidate,
|
| - LSL,
|
| - kPointerSizeLog2));
|
| + __ lsl(scratch, candidate, Operand(kPointerSizeLog2));
|
| + __ add(scratch, first_symbol_table_element, scratch);
|
| + __ ldr(candidate, MemOperand(scratch));
|
|
|
| // If entry is undefined no string with this hash can be found.
|
| Label is_string;
|
| - __ CompareObjectType(candidate, scratch, scratch, ODDBALL_TYPE);
|
| - __ b(ne, &is_string);
|
| + __ CompareObjectType(candidate, scratch, scratch, ODDBALL_TYPE, eq);
|
| + __ b(ne, &is_string, Label::kNear);
|
|
|
| __ cmp(undefined, candidate);
|
| __ b(eq, not_found);
|
| @@ -5934,43 +5799,57 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
|
|
|
| void StringHelper::GenerateHashInit(MacroAssembler* masm,
|
| Register hash,
|
| - Register character) {
|
| + Register character,
|
| + Register scratch) {
|
| + // Added a scratch parameter for the SH4 implementation compared to ARM.
|
| // hash = character + (character << 10);
|
| __ LoadRoot(hash, Heap::kHashSeedRootIndex);
|
| // Untag smi seed and add the character.
|
| - __ add(hash, character, Operand(hash, LSR, kSmiTagSize));
|
| + __ lsr(hash, hash, Operand(kSmiTagSize));
|
| + __ add(hash, character, hash);
|
| // hash += hash << 10;
|
| - __ add(hash, hash, Operand(hash, LSL, 10));
|
| + __ lsl(scratch, hash, Operand(10));
|
| + __ add(hash, hash, scratch);
|
| // hash ^= hash >> 6;
|
| - __ eor(hash, hash, Operand(hash, LSR, 6));
|
| + __ lsr(scratch, hash, Operand(6));
|
| + __ eor(hash, hash, scratch);
|
| }
|
|
|
|
|
| void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
|
| Register hash,
|
| - Register character) {
|
| + Register character,
|
| + Register scratch) {
|
| + // Added a scratch parameter for the SH4 implementation compared to ARM.
|
| // hash += character;
|
| - __ add(hash, hash, Operand(character));
|
| + __ add(hash, hash, character);
|
| // hash += hash << 10;
|
| - __ add(hash, hash, Operand(hash, LSL, 10));
|
| + __ lsl(scratch, hash, Operand(10));
|
| + __ add(hash, hash, scratch);
|
| // hash ^= hash >> 6;
|
| - __ eor(hash, hash, Operand(hash, LSR, 6));
|
| + __ lsr(scratch, hash, Operand(6));
|
| + __ eor(hash, hash, scratch);
|
| }
|
|
|
|
|
| void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
|
| - Register hash) {
|
| + Register hash,
|
| + Register scratch) {
|
| + // Added a scratch parameter for the SH4 implementation compared to ARM.
|
| // hash += hash << 3;
|
| - __ add(hash, hash, Operand(hash, LSL, 3));
|
| + __ lsl(scratch, hash, Operand(3));
|
| + __ add(hash, hash, scratch);
|
| // hash ^= hash >> 11;
|
| - __ eor(hash, hash, Operand(hash, LSR, 11));
|
| + __ lsr(scratch, hash, Operand(11));
|
| + __ eor(hash, hash, scratch);
|
| // hash += hash << 15;
|
| - __ add(hash, hash, Operand(hash, LSL, 15));
|
| -
|
| - __ and_(hash, hash, Operand(String::kHashBitMask), SetCC);
|
| + __ lsl(scratch, hash, Operand(15));
|
| + __ add(hash, hash, scratch);
|
| + __ land(hash, hash, Operand(String::kHashBitMask));
|
| + __ cmpeq(hash, Operand(0));
|
|
|
| // if (hash == 0) hash = 27;
|
| - __ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq);
|
| + __ mov(hash, Operand(StringHasher::kZeroHash), eq);
|
| }
|
|
|
|
|
| @@ -5998,18 +5877,17 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
| STATIC_ASSERT(kFromOffset == kToOffset + 4);
|
| STATIC_ASSERT(kSmiTag == 0);
|
| STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
|
| -
|
| + __ JumpIfNotBothSmi(r2, r3, &runtime); // not in arm code
|
| // I.e., arithmetic shift right by one un-smi-tags.
|
| - __ mov(r2, Operand(r2, ASR, 1), SetCC);
|
| - __ mov(r3, Operand(r3, ASR, 1), SetCC, cc);
|
| - // If either to or from had the smi tag bit set, then carry is set now.
|
| - __ b(cs, &runtime); // Either "from" or "to" is not a smi.
|
| - // We want to bailout to runtime here if From is negative. In that case, the
|
| - // next instruction is not executed and we fall through to bailing out to
|
| - // runtime. pl is the opposite of mi.
|
| - // Both r2 and r3 are untagged integers.
|
| - __ sub(r2, r2, Operand(r3), SetCC, pl);
|
| - __ b(mi, &runtime); // Fail if from > to.
|
| + __ asr(r2, r2, Operand(1));
|
| + __ asr(r3, r3, Operand(1));
|
| + __ cmpge(r3, Operand(0));
|
| + __ bf(&runtime); // From is negative.
|
| +
|
| + // Both to and from are smis.
|
| + __ sub(r2, r2, r3);
|
| + __ cmpge(r2, Operand(0));
|
| + __ bf(&runtime); // Fail if from > to.
|
|
|
| // Make sure first argument is a string.
|
| __ ldr(r0, MemOperand(sp, kStringOffset));
|
| @@ -6023,11 +5901,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
| // r0: original string
|
| // r2: result string length
|
| __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
|
| - __ cmp(r2, Operand(r4, ASR, 1));
|
| - // Return original string.
|
| - __ b(eq, &return_r0);
|
| + __ asr(r4, r4, Operand(1));
|
| + __ cmpeq(r2, r4);
|
| + __ bt(&return_r0);
|
| // Longer than original string's length or negative: unsafe arguments.
|
| - __ b(hi, &runtime);
|
| + __ cmphi(r2, r4);
|
| + __ bt(&runtime);
|
| // Shorter than original string's length: an actual substring.
|
|
|
| // Deal with different string types: update the index if necessary
|
| @@ -6059,7 +5938,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
| // Sliced string. Fetch parent and correct start index by offset.
|
| __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
|
| __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset));
|
| - __ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index.
|
| + __ asr(r1, r4, Operand(1));
|
| + __ add(r3, r3, r1); // Add offset to index.
|
| // Update instance type.
|
| __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
|
| __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
|
| @@ -6077,9 +5957,9 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
| // r1: instance type of underlying subject string
|
| // r2: length
|
| // r3: adjusted start index (untagged)
|
| - __ cmp(r2, Operand(SlicedString::kMinLength));
|
| + __ cmpge(r2, Operand(SlicedString::kMinLength));
|
| // Short slice. Copy instead of slicing.
|
| - __ b(lt, ©_routine);
|
| + __ bf(©_routine);
|
| // Allocate new sliced string. At this point we do not reload the instance
|
| // type including the string encoding because we simply rely on the info
|
| // provided by the original string. It does not matter if the original
|
| @@ -6095,7 +5975,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
| __ bind(&two_byte_slice);
|
| __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime);
|
| __ bind(&set_slice_header);
|
| - __ mov(r3, Operand(r3, LSL, 1));
|
| + __ lsl(r3, r3, Operand(1));
|
| __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
|
| __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
|
| __ jmp(&return_r0);
|
| @@ -6156,7 +6036,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
|
|
| // Locate first character of substring to copy.
|
| STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
|
| - __ add(r5, r5, Operand(r3, LSL, 1));
|
| + __ lsl(r1, r3, Operand(1));
|
| + __ add(r5, r5, r1);
|
| // Locate first character of result.
|
| __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
|
|
|
| @@ -6193,7 +6074,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
|
| __ ldr(length, FieldMemOperand(left, String::kLengthOffset));
|
| __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
|
| __ cmp(length, scratch2);
|
| - __ b(eq, &check_zero_length);
|
| + __ b(eq, &check_zero_length, Label::kNear);
|
| __ bind(&strings_not_equal);
|
| __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
|
| __ Ret();
|
| @@ -6202,8 +6083,8 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
|
| Label compare_chars;
|
| __ bind(&check_zero_length);
|
| STATIC_ASSERT(kSmiTag == 0);
|
| - __ cmp(length, Operand(0));
|
| - __ b(ne, &compare_chars);
|
| + __ cmpeq(length, Operand(0));
|
| + __ b(ne, &compare_chars, Label::kNear);
|
| __ mov(r0, Operand(Smi::FromInt(EQUAL)));
|
| __ Ret();
|
|
|
| @@ -6226,16 +6107,18 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
|
| Register scratch2,
|
| Register scratch3,
|
| Register scratch4) {
|
| - Label result_not_equal, compare_lengths;
|
| + ASSERT(!scratch2.is(r0) && !scratch4.is(r0));
|
| + Label result_not_equal, compare_lengths, skip;
|
| // Find minimum length and length difference.
|
| __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
|
| __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
|
| - __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
|
| + __ cmpgt(scratch1, scratch2); // for cond mov below
|
| + __ sub(scratch3, scratch1, scratch2);
|
| Register length_delta = scratch3;
|
| - __ mov(scratch1, scratch2, LeaveCC, gt);
|
| + __ mov(scratch1, scratch2, t);
|
| Register min_length = scratch1;
|
| STATIC_ASSERT(kSmiTag == 0);
|
| - __ cmp(min_length, Operand(0));
|
| + __ cmpeq(min_length, Operand(0));
|
| __ b(eq, &compare_lengths);
|
|
|
| // Compare loop.
|
| @@ -6247,12 +6130,20 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
|
| __ bind(&compare_lengths);
|
| ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
|
| // Use length_delta as result if it's zero.
|
| - __ mov(r0, Operand(length_delta), SetCC);
|
| + __ mov(r0, length_delta);
|
| + __ cmpgt(r0, Operand(0));
|
| + __ mov(r0, Operand(Smi::FromInt(GREATER)), t);
|
| + __ cmpge(r0, Operand(0));
|
| + __ mov(r0, Operand(Smi::FromInt(LESS)), f);
|
| + __ Ret();
|
| +
|
| __ bind(&result_not_equal);
|
| // Conditionally update the result based either on length_delta or
|
| // the last comparion performed in the loop above.
|
| - __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
|
| - __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
|
| + __ cmpgt(scratch2, scratch4);
|
| + __ mov(r0, Operand(Smi::FromInt(GREATER)), t);
|
| + __ cmpge(scratch2, scratch4);
|
| + __ mov(r0, Operand(Smi::FromInt(LESS)), f);
|
| __ Ret();
|
| }
|
|
|
| @@ -6271,8 +6162,8 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
|
| __ SmiUntag(length);
|
| __ add(scratch1, length,
|
| Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
|
| - __ add(left, left, Operand(scratch1));
|
| - __ add(right, right, Operand(scratch1));
|
| + __ add(left, left, scratch1);
|
| + __ add(right, right, scratch1);
|
| __ rsb(length, length, Operand::Zero());
|
| Register index = length; // index = -length;
|
|
|
| @@ -6283,7 +6174,8 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
|
| __ ldrb(scratch2, MemOperand(right, index));
|
| __ cmp(scratch1, scratch2);
|
| __ b(ne, chars_not_equal);
|
| - __ add(index, index, Operand(1), SetCC);
|
| + __ add(index, index, Operand(1));
|
| + __ tst(index, index);
|
| __ b(ne, &loop);
|
| }
|
|
|
| @@ -6300,7 +6192,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
|
|
|
| Label not_same;
|
| __ cmp(r0, r1);
|
| - __ b(ne, ¬_same);
|
| + __ b(ne, ¬_same, Label::kNear);
|
| STATIC_ASSERT(EQUAL == 0);
|
| STATIC_ASSERT(kSmiTag == 0);
|
| __ mov(r0, Operand(Smi::FromInt(EQUAL)));
|
| @@ -6350,7 +6242,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
|
| STATIC_ASSERT(kStringTag == 0);
|
| // If either is not a string, go to runtime.
|
| __ tst(r4, Operand(kIsNotStringMask));
|
| - __ tst(r5, Operand(kIsNotStringMask), eq);
|
| + __ b(ne, &call_runtime);
|
| + __ tst(r5, Operand(kIsNotStringMask));
|
| __ b(ne, &call_runtime);
|
| } else {
|
| // Here at least one of the arguments is definitely a string.
|
| @@ -6374,18 +6267,21 @@ void StringAddStub::Generate(MacroAssembler* masm) {
|
| // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
|
| // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
|
| {
|
| - Label strings_not_empty;
|
| + Label strings_not_empty, string_return;
|
| // Check if either of the strings are empty. In that case return the other.
|
| __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
|
| __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
|
| STATIC_ASSERT(kSmiTag == 0);
|
| __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty.
|
| - __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second.
|
| + __ mov(r0, r1, eq); // If first is empty, return second.
|
| + __ bt_near(&string_return);
|
| STATIC_ASSERT(kSmiTag == 0);
|
| // Else test if second string is empty.
|
| - __ cmp(r3, Operand(Smi::FromInt(0)), ne);
|
| - __ b(ne, &strings_not_empty); // If either string was empty, return r0.
|
| + __ cmp(r3, Operand(Smi::FromInt(0)));
|
| + // If either string was empty, return r0.
|
| + __ b(ne, &strings_not_empty, Label::kNear);
|
|
|
| + __ bind(&string_return);
|
| __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
|
| __ add(sp, sp, Operand(2 * kPointerSize));
|
| __ Ret();
|
| @@ -6393,8 +6289,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
|
| __ bind(&strings_not_empty);
|
| }
|
|
|
| - __ mov(r2, Operand(r2, ASR, kSmiTagSize));
|
| - __ mov(r3, Operand(r3, ASR, kSmiTagSize));
|
| + __ asr(r2, r2, Operand(kSmiTagSize));
|
| + __ asr(r3, r3, Operand(kSmiTagSize));
|
| // Both strings are non-empty.
|
| // r0: first string
|
| // r1: second string
|
| @@ -6406,7 +6302,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
|
| Label string_add_flat_result, longer_than_two;
|
| // Adding two lengths can't overflow.
|
| STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
|
| - __ add(r6, r2, Operand(r3));
|
| + __ add(r6, r2, r3);
|
| // Use the symbol table when adding two one character strings, as it
|
| // helps later optimizations to return a symbol here.
|
| __ cmp(r6, Operand(2));
|
| @@ -6450,14 +6346,14 @@ void StringAddStub::Generate(MacroAssembler* masm) {
|
|
|
| __ bind(&longer_than_two);
|
| // Check if resulting string will be flat.
|
| - __ cmp(r6, Operand(ConsString::kMinLength));
|
| - __ b(lt, &string_add_flat_result);
|
| + __ cmpge(r6, Operand(ConsString::kMinLength));
|
| + __ bf(&string_add_flat_result);
|
| // Handle exceptionally long strings in the runtime system.
|
| STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
|
| ASSERT(IsPowerOf2(String::kMaxLength + 1));
|
| // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
|
| - __ cmp(r6, Operand(String::kMaxLength + 1));
|
| - __ b(hs, &call_runtime);
|
| + __ cmphs(r6, Operand(String::kMaxLength + 1));
|
| + __ bt(&call_runtime);
|
|
|
| // If result is not supposed to be flat, allocate a cons string object.
|
| // If both strings are ASCII the result is an ASCII cons string.
|
| @@ -6470,8 +6366,9 @@ void StringAddStub::Generate(MacroAssembler* masm) {
|
| Label non_ascii, allocated, ascii_data;
|
| STATIC_ASSERT(kTwoByteStringTag == 0);
|
| __ tst(r4, Operand(kStringEncodingMask));
|
| - __ tst(r5, Operand(kStringEncodingMask), ne);
|
| - __ b(eq, &non_ascii);
|
| + __ bt_near(&non_ascii);
|
| + __ tst(r5, Operand(kStringEncodingMask));
|
| + __ b(eq, &non_ascii, Label::kNear);
|
|
|
| // Allocate an ASCII cons string.
|
| __ bind(&ascii_data);
|
| @@ -6480,7 +6377,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
|
| // Fill the fields of the cons string.
|
| __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
|
| __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
|
| - __ mov(r0, Operand(r7));
|
| + __ mov(r0, r7);
|
| __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
|
| __ add(sp, sp, Operand(2 * kPointerSize));
|
| __ Ret();
|
| @@ -6491,11 +6388,12 @@ void StringAddStub::Generate(MacroAssembler* masm) {
|
| // r4: first instance type.
|
| // r5: second instance type.
|
| __ tst(r4, Operand(kAsciiDataHintMask));
|
| - __ tst(r5, Operand(kAsciiDataHintMask), ne);
|
| __ b(ne, &ascii_data);
|
| - __ eor(r4, r4, Operand(r5));
|
| + __ tst(r5, Operand(kAsciiDataHintMask));
|
| + __ b(ne, &ascii_data);
|
| + __ eor(r4, r4, r5);
|
| STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
|
| - __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
|
| + __ land(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
|
| __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
|
| __ b(eq, &ascii_data);
|
|
|
| @@ -6524,19 +6422,20 @@ void StringAddStub::Generate(MacroAssembler* masm) {
|
| }
|
|
|
| // Check whether both strings have same encoding
|
| - __ eor(r7, r4, Operand(r5));
|
| + Label skip;
|
| + __ eor(r7, r4, r5);
|
| __ tst(r7, Operand(kStringEncodingMask));
|
| __ b(ne, &call_runtime);
|
|
|
| STATIC_ASSERT(kSeqStringTag == 0);
|
| __ tst(r4, Operand(kStringRepresentationMask));
|
| + __ bf(&skip);
|
| STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
|
| __ add(r7,
|
| r0,
|
| - Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
|
| - LeaveCC,
|
| - eq);
|
| - __ b(eq, &first_prepared);
|
| + Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
|
| + __ b(&first_prepared);
|
| + __ bind(&skip);
|
| // External string: rule out short external string and load string resource.
|
| STATIC_ASSERT(kShortExternalStringTag != 0);
|
| __ tst(r4, Operand(kShortExternalStringMask));
|
| @@ -6545,14 +6444,15 @@ void StringAddStub::Generate(MacroAssembler* masm) {
|
| __ bind(&first_prepared);
|
|
|
| STATIC_ASSERT(kSeqStringTag == 0);
|
| + Label skip2;
|
| __ tst(r5, Operand(kStringRepresentationMask));
|
| + __ bf(&skip2);
|
| STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
|
| __ add(r1,
|
| r1,
|
| - Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
|
| - LeaveCC,
|
| - eq);
|
| - __ b(eq, &second_prepared);
|
| + Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
|
| + __ b(&second_prepared);
|
| + __ bind(&skip2);
|
| // External string: rule out short external string and load string resource.
|
| STATIC_ASSERT(kShortExternalStringTag != 0);
|
| __ tst(r5, Operand(kShortExternalStringMask));
|
| @@ -6624,8 +6524,8 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
|
| // First check if the argument is already a string.
|
| Label not_string, done;
|
| __ JumpIfSmi(arg, ¬_string);
|
| - __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE);
|
| - __ b(lt, &done);
|
| + __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE, ge);
|
| + __ bf(&done);
|
|
|
| // Check the number to string cache.
|
| Label not_cached;
|
| @@ -6647,10 +6547,10 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
|
| __ bind(¬_cached);
|
| __ JumpIfSmi(arg, slow);
|
| __ CompareObjectType(
|
| - arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1.
|
| + arg, scratch1, scratch2, JS_VALUE_TYPE, eq); // map -> scratch1.
|
| __ b(ne, slow);
|
| __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
|
| - __ and_(scratch2,
|
| + __ land(scratch2,
|
| scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
|
| __ cmp(scratch2,
|
| Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
|
| @@ -6670,11 +6570,13 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
|
|
|
| if (GetCondition() == eq) {
|
| // For equality we do not care about the sign of the result.
|
| - __ sub(r0, r0, r1, SetCC);
|
| + __ sub(r0, r0, r1);
|
| + __ tst(r0, r0); // TODO(stm): why setting CC? is it used?
|
| } else {
|
| // Untag before subtracting to avoid handling overflow.
|
| __ SmiUntag(r1);
|
| - __ sub(r0, r1, SmiUntagOperand(r0));
|
| + __ SmiUntag(r0);
|
| + __ sub(r0, r1, r0);
|
| }
|
| __ Ret();
|
|
|
| @@ -6689,39 +6591,50 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
|
| Label generic_stub;
|
| Label unordered, maybe_undefined1, maybe_undefined2;
|
| Label miss;
|
| - __ and_(r2, r1, Operand(r0));
|
| - __ JumpIfSmi(r2, &generic_stub);
|
| + __ land(r2, r1, r0);
|
| + __ JumpIfSmi(r2, &generic_stub, Label::kNear);
|
|
|
| - __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
|
| - __ b(ne, &maybe_undefined1);
|
| - __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
|
| - __ b(ne, &maybe_undefined2);
|
| + __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE, eq);
|
| + __ b(ne, &maybe_undefined1, Label::kNear);
|
| + __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE, eq);
|
| + __ b(ne, &maybe_undefined2, Label::kNear);
|
|
|
| // Inlining the double comparison and falling back to the general compare
|
| - // stub if NaN is involved or VFP3 is unsupported.
|
| - if (CpuFeatures::IsSupported(VFP2)) {
|
| - CpuFeatures::Scope scope(VFP2);
|
| -
|
| + // stub if NaN is involved or FPU is unsupported.
|
| + if (CpuFeatures::IsSupported(FPU)) {
|
| // Load left and right operand
|
| - __ sub(r2, r1, Operand(kHeapObjectTag));
|
| - __ vldr(d0, r2, HeapNumber::kValueOffset);
|
| - __ sub(r2, r0, Operand(kHeapObjectTag));
|
| - __ vldr(d1, r2, HeapNumber::kValueOffset);
|
| + __ sub(r2, r1, Operand(kHeapObjectTag - HeapNumber::kValueOffset));
|
| + __ dldr(dr0, MemOperand(r2, 0), r2);
|
| + __ sub(r2, r0, Operand(kHeapObjectTag - HeapNumber::kValueOffset));
|
| + __ dldr(dr2, MemOperand(r2, 0), r2);
|
| +
|
| + Label unordered;
|
| + __ dcmpeq(dr0, dr0);
|
| + __ bf_near(&unordered);
|
| + __ dcmpeq(dr2, dr2);
|
| + __ bf_near(&unordered);
|
| +
|
| + // Test for eq, lt and gt
|
| + Label equal, greater;
|
| + __ dcmpeq(dr0, dr2);
|
| + __ bt_near(&equal);
|
| + __ dcmpgt(dr0, dr2);
|
| + __ bt_near(&greater);
|
|
|
| - // Compare operands
|
| - __ VFPCompareAndSetFlags(d0, d1);
|
| + __ mov(r0, Operand(LESS));
|
| + __ rts();
|
|
|
| - // Don't base result on status bits when a NaN is involved.
|
| - __ b(vs, &unordered);
|
| + __ bind(&equal);
|
| + __ mov(r0, Operand(EQUAL));
|
| + __ rts();
|
|
|
| - // Return a result of -1, 0, or 1, based on status bits.
|
| - __ mov(r0, Operand(EQUAL), LeaveCC, eq);
|
| - __ mov(r0, Operand(LESS), LeaveCC, lt);
|
| - __ mov(r0, Operand(GREATER), LeaveCC, gt);
|
| - __ Ret();
|
| + __ bind(&greater);
|
| + __ mov(r0, Operand(GREATER));
|
| + __ rts();
|
| +
|
| + __ bind(&unordered);
|
| }
|
|
|
| - __ bind(&unordered);
|
| CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
|
| __ bind(&generic_stub);
|
| __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
|
| @@ -6730,7 +6643,7 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
|
| if (Token::IsOrderedRelationalCompareOp(op_)) {
|
| __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
|
| __ b(ne, &miss);
|
| - __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
|
| + __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE, eq);
|
| __ b(ne, &maybe_undefined2);
|
| __ jmp(&unordered);
|
| }
|
| @@ -6757,7 +6670,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
|
| Register tmp2 = r3;
|
|
|
| // Check that both operands are heap objects.
|
| - __ JumpIfEitherSmi(left, right, &miss);
|
| + __ JumpIfEitherSmi(left, right, &miss, Label::kNear);
|
|
|
| // Check that both operands are symbols.
|
| __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
|
| @@ -6765,9 +6678,9 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
|
| __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
|
| __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
|
| STATIC_ASSERT(kSymbolTag != 0);
|
| - __ and_(tmp1, tmp1, Operand(tmp2));
|
| + __ land(tmp1, tmp1, tmp2);
|
| __ tst(tmp1, Operand(kIsSymbolMask));
|
| - __ b(eq, &miss);
|
| + __ b(eq, &miss, Label::kNear);
|
|
|
| // Symbols are compared by identity.
|
| __ cmp(left, right);
|
| @@ -6776,7 +6689,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
|
| ASSERT(right.is(r0));
|
| STATIC_ASSERT(EQUAL == 0);
|
| STATIC_ASSERT(kSmiTag == 0);
|
| - __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
|
| + __ mov(r0, Operand(Smi::FromInt(EQUAL)), eq);
|
| __ Ret();
|
|
|
| __ bind(&miss);
|
| @@ -6816,7 +6729,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
|
| __ cmp(left, right);
|
| STATIC_ASSERT(EQUAL == 0);
|
| STATIC_ASSERT(kSmiTag == 0);
|
| - __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
|
| + __ mov(r0, Operand(Smi::FromInt(EQUAL)), eq);
|
| __ Ret(eq);
|
|
|
| // Handle not identical strings.
|
| @@ -6826,7 +6739,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
|
| if (equality) {
|
| ASSERT(GetCondition() == eq);
|
| STATIC_ASSERT(kSymbolTag != 0);
|
| - __ and_(tmp3, tmp1, Operand(tmp2));
|
| + __ land(tmp3, tmp1, tmp2);
|
| __ tst(tmp3, Operand(kIsSymbolMask));
|
| // Make sure r0 is non-zero. At this point input operands are
|
| // guaranteed to be non-zero.
|
| @@ -6865,16 +6778,16 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
|
| void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
|
| ASSERT(state_ == CompareIC::OBJECTS);
|
| Label miss;
|
| - __ and_(r2, r1, Operand(r0));
|
| - __ JumpIfSmi(r2, &miss);
|
| + __ land(r2, r1, r0);
|
| + __ JumpIfSmi(r2, &miss, Label::kNear);
|
|
|
| - __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
|
| - __ b(ne, &miss);
|
| - __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
|
| - __ b(ne, &miss);
|
| + __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE, eq);
|
| + __ b(ne, &miss, Label::kNear);
|
| + __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE, eq);
|
| + __ b(ne, &miss, Label::kNear);
|
|
|
| ASSERT(GetCondition() == eq);
|
| - __ sub(r0, r0, Operand(r1));
|
| + __ sub(r0, r0, r1);
|
| __ Ret();
|
|
|
| __ bind(&miss);
|
| @@ -6884,7 +6797,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
|
|
|
| void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
|
| Label miss;
|
| - __ and_(r2, r1, Operand(r0));
|
| + __ land(r2, r1, r0);
|
| __ JumpIfSmi(r2, &miss);
|
| __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
|
| __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
|
| @@ -6893,7 +6806,7 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
|
| __ cmp(r3, Operand(known_map_));
|
| __ b(ne, &miss);
|
|
|
| - __ sub(r0, r0, Operand(r1));
|
| + __ sub(r0, r0, r1);
|
| __ Ret();
|
|
|
| __ bind(&miss);
|
| @@ -6901,7 +6814,6 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
|
| }
|
|
|
|
|
| -
|
| void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
|
| {
|
| // Call the runtime system in a fresh internal frame.
|
| @@ -6910,7 +6822,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
|
|
|
| FrameScope scope(masm, StackFrame::INTERNAL);
|
| __ Push(r1, r0);
|
| - __ push(lr);
|
| + __ push(pr);
|
| __ Push(r1, r0);
|
| __ mov(ip, Operand(Smi::FromInt(op_)));
|
| __ push(ip);
|
| @@ -6918,43 +6830,50 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
|
| // Compute the entry point of the rewritten stub.
|
| __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| // Restore registers.
|
| - __ pop(lr);
|
| + __ pop(pr);
|
| __ pop(r0);
|
| __ pop(r1);
|
| }
|
|
|
| - __ Jump(r2);
|
| + __ jmp(r2);
|
| }
|
|
|
|
|
| void DirectCEntryStub::Generate(MacroAssembler* masm) {
|
| - __ ldr(pc, MemOperand(sp, 0));
|
| + __ ldr(scratch_, MemOperand(sp, 0), scratch_);
|
| + __ jmp(scratch_);
|
| }
|
|
|
|
|
| void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
|
| - ExternalReference function) {
|
| - __ mov(r2, Operand(function));
|
| - GenerateCall(masm, r2);
|
| + ExternalReference function,
|
| + Register scratch1,
|
| + Register scratch2) {
|
| + __ mov(scratch1, Operand(function));
|
| + GenerateCall(masm, scratch1, scratch2);
|
| }
|
|
|
|
|
| void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
|
| - Register target) {
|
| - __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
|
| - RelocInfo::CODE_TARGET));
|
| -
|
| - // Prevent literal pool emission during calculation of return address.
|
| - Assembler::BlockConstPoolScope block_const_pool(masm);
|
| -
|
| - // Push return address (accessible to GC through exit frame pc).
|
| - // Note that using pc with str is deprecated.
|
| + Register target,
|
| + Register scratch1) {
|
| + ASSERT(!target.is(scratch_));
|
| + ASSERT(!target.is(scratch1));
|
| + ASSERT(!scratch1.is(scratch_));
|
| + // Get pr (pointing to DirectCEntryStub::Generate) into scratch1
|
| + // pr can't be used directly as it is clobbered by addpc later
|
| + __ mov(scratch1, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
|
| + RelocInfo::CODE_TARGET));
|
| + int return_address_offset = 4 * Assembler::kInstrSize;
|
| Label start;
|
| + // Push return address (accessible to GC through exit frame pc).
|
| + __ addpc(scratch_, return_address_offset, pr);
|
| __ bind(&start);
|
| - __ add(ip, pc, Operand(Assembler::kInstrSize));
|
| - __ str(ip, MemOperand(sp, 0));
|
| - __ Jump(target); // Call the C++ function.
|
| - ASSERT_EQ(Assembler::kInstrSize + Assembler::kPcLoadDelta,
|
| + // restore the right pr (pointing to DirectCEntryStub::Generate)
|
| + __ mov(pr, scratch1);
|
| + __ str(scratch_, MemOperand(sp, 0), no_reg);
|
| + __ jmp(target); // Call the api function.
|
| + ASSERT_EQ(return_address_offset,
|
| masm->SizeOfCodeGeneratedSince(&start));
|
| }
|
|
|
| @@ -6966,6 +6885,7 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
|
| Register properties,
|
| Handle<String> name,
|
| Register scratch0) {
|
| + ASSERT(!scratch0.is(ip));
|
| // If names of slots in range from 1 to kProbes - 1 for the hash value are
|
| // not equal to the name and kProbes-th slot is not used (its name is the
|
| // undefined value), it guarantees the hash table doesn't contain the
|
| @@ -6978,18 +6898,21 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
|
| // Capacity is smi 2^n.
|
| __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
|
| __ sub(index, index, Operand(1));
|
| - __ and_(index, index, Operand(
|
| + __ land(index, index, Operand(
|
| Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
|
|
|
| // Scale the index by multiplying by the entry size.
|
| ASSERT(StringDictionary::kEntrySize == 3);
|
| - __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
|
| + __ lsl(ip, index, Operand(1));
|
| + __ add(index, index, ip); // index *= 3.
|
|
|
| Register entity_name = scratch0;
|
| // Having undefined at this place means the name is not contained.
|
| ASSERT_EQ(kSmiTagSize, 1);
|
| Register tmp = properties;
|
| - __ add(tmp, properties, Operand(index, LSL, 1));
|
| + /* use entity_name as scratch (defined just after) */
|
| + __ lsl(entity_name, index, Operand(1));
|
| + __ add(tmp, properties, entity_name);
|
| __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
|
|
|
| ASSERT(!tmp.is(entity_name));
|
| @@ -7025,16 +6948,18 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
|
| }
|
|
|
| const int spill_mask =
|
| - (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
|
| + (r6.bit() | r5.bit() | r4.bit() | r3.bit() |
|
| r2.bit() | r1.bit() | r0.bit());
|
|
|
| - __ stm(db_w, sp, spill_mask);
|
| + __ push(pr);
|
| + __ pushm(spill_mask);
|
| __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
|
| __ mov(r1, Operand(Handle<String>(name)));
|
| StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
|
| __ CallStub(&stub);
|
| __ cmp(r0, Operand(0));
|
| - __ ldm(ia_w, sp, spill_mask);
|
| + __ popm(spill_mask);
|
| + __ pop(pr);
|
|
|
| __ b(eq, done);
|
| __ b(ne, miss);
|
| @@ -7057,11 +6982,12 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
|
| ASSERT(!name.is(scratch1));
|
| ASSERT(!name.is(scratch2));
|
|
|
| - __ AssertString(name);
|
| + // Assert that name contains a string.
|
| + if (FLAG_debug_code) __ AbortIfNotString(name);
|
|
|
| // Compute the capacity mask.
|
| __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
|
| - __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize)); // convert smi to int
|
| + __ asr(scratch1, scratch1, Operand(kSmiTagSize)); // convert smi to int
|
| __ sub(scratch1, scratch1, Operand(1));
|
|
|
| // Generate an unrolled loop that performs a few probes before
|
| @@ -7079,26 +7005,30 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
|
| __ add(scratch2, scratch2, Operand(
|
| StringDictionary::GetProbeOffset(i) << String::kHashShift));
|
| }
|
| - __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift));
|
| + __ lsr(scratch2, scratch2, Operand(String::kHashShift));
|
| + __ land(scratch2, scratch1, scratch2);
|
|
|
| // Scale the index by multiplying by the element size.
|
| ASSERT(StringDictionary::kEntrySize == 3);
|
| // scratch2 = scratch2 * 3.
|
| - __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
|
| + __ lsl(ip, scratch2, Operand(1));
|
| + __ add(scratch2, scratch2, ip);
|
|
|
| // Check if the key is identical to the name.
|
| - __ add(scratch2, elements, Operand(scratch2, LSL, 2));
|
| + __ lsl(scratch2, scratch2, Operand(2));
|
| + __ add(scratch2, elements, scratch2);
|
| __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
|
| - __ cmp(name, Operand(ip));
|
| + __ cmp(name, ip);
|
| __ b(eq, done);
|
| }
|
|
|
| const int spill_mask =
|
| - (lr.bit() | r6.bit() | r5.bit() | r4.bit() |
|
| + (r6.bit() | r5.bit() | r4.bit() |
|
| r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
|
| ~(scratch1.bit() | scratch2.bit());
|
|
|
| - __ stm(db_w, sp, spill_mask);
|
| + __ push(pr);
|
| + __ pushm(spill_mask);
|
| if (name.is(r0)) {
|
| ASSERT(!elements.is(r1));
|
| __ Move(r1, name);
|
| @@ -7110,8 +7040,9 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
|
| StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
|
| __ CallStub(&stub);
|
| __ cmp(r0, Operand(0));
|
| - __ mov(scratch2, Operand(r2));
|
| - __ ldm(ia_w, sp, spill_mask);
|
| + __ mov(scratch2, r2);
|
| + __ popm(spill_mask);
|
| + __ pop(pr);
|
|
|
| __ b(ne, done);
|
| __ b(eq, miss);
|
| @@ -7142,7 +7073,7 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
|
| Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
|
|
|
| __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
|
| - __ mov(mask, Operand(mask, ASR, kSmiTagSize));
|
| + __ asr(mask, mask, Operand(kSmiTagSize));
|
| __ sub(mask, mask, Operand(1));
|
|
|
| __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
|
| @@ -7161,24 +7092,27 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
|
| __ add(index, hash, Operand(
|
| StringDictionary::GetProbeOffset(i) << String::kHashShift));
|
| } else {
|
| - __ mov(index, Operand(hash));
|
| + __ mov(index, hash);
|
| }
|
| - __ and_(index, mask, Operand(index, LSR, String::kHashShift));
|
| + __ lsr(index, index, Operand(String::kHashShift));
|
| + __ land(index, mask, index);
|
|
|
| // Scale the index by multiplying by the entry size.
|
| ASSERT(StringDictionary::kEntrySize == 3);
|
| - __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
|
| + __ lsl(ip, index, Operand(1));
|
| + __ add(index, index, ip); // index *= 3.
|
|
|
| ASSERT_EQ(kSmiTagSize, 1);
|
| - __ add(index, dictionary, Operand(index, LSL, 2));
|
| + __ lsl(index, index, Operand(2));
|
| + __ add(index, dictionary, index);
|
| __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
|
|
|
| // Having undefined at this place means the name is not contained.
|
| - __ cmp(entry_key, Operand(undefined));
|
| + __ cmp(entry_key, undefined);
|
| __ b(eq, ¬_in_dictionary);
|
|
|
| // Stop if found the property.
|
| - __ cmp(entry_key, Operand(key));
|
| + __ cmp(entry_key, key);
|
| __ b(eq, &in_dictionary);
|
|
|
| if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
|
| @@ -7205,7 +7139,7 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
|
| __ Ret();
|
|
|
| __ bind(¬_in_dictionary);
|
| - __ mov(result, Operand::Zero());
|
| + __ mov(result, Operand(0));
|
| __ Ret();
|
| }
|
|
|
| @@ -7297,7 +7231,7 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
|
|
|
|
|
| bool CodeStub::CanUseFPRegisters() {
|
| - return CpuFeatures::IsSupported(VFP2);
|
| + return CpuFeatures::IsSupported(FPU);
|
| }
|
|
|
|
|
| @@ -7317,7 +7251,6 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
|
| {
|
| // Block literal pool emission, as the position of these two instructions
|
| // is assumed by the patching code.
|
| - Assembler::BlockConstPoolScope block_const_pool(masm);
|
| __ b(&skip_to_incremental_noncompacting);
|
| __ b(&skip_to_incremental_compacting);
|
| }
|
| @@ -7339,10 +7272,11 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
|
|
|
| // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
|
| // Will be checked in IncrementalMarking::ActivateGeneratedStub.
|
| - ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
|
| - ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
|
| - PatchBranchIntoNop(masm, 0);
|
| - PatchBranchIntoNop(masm, Assembler::kInstrSize);
|
| +// TODO(STM): to check soon !
|
| +// ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
|
| +// ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
|
| +// PatchBranchIntoNop(masm, 0);
|
| +// PatchBranchIntoNop(masm, Assembler::kInstrSize);
|
| }
|
|
|
|
|
| @@ -7387,37 +7321,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
|
|
|
|
|
| void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
|
| - regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
|
| - int argument_count = 3;
|
| - __ PrepareCallCFunction(argument_count, regs_.scratch0());
|
| - Register address =
|
| - r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
|
| - ASSERT(!address.is(regs_.object()));
|
| - ASSERT(!address.is(r0));
|
| - __ Move(address, regs_.address());
|
| - __ Move(r0, regs_.object());
|
| - if (mode == INCREMENTAL_COMPACTION) {
|
| - __ Move(r1, address);
|
| - } else {
|
| - ASSERT(mode == INCREMENTAL);
|
| - __ ldr(r1, MemOperand(address, 0));
|
| - }
|
| - __ mov(r2, Operand(ExternalReference::isolate_address()));
|
| -
|
| - AllowExternalCallThatCantCauseGC scope(masm);
|
| - if (mode == INCREMENTAL_COMPACTION) {
|
| - __ CallCFunction(
|
| - ExternalReference::incremental_evacuation_record_write_function(
|
| - masm->isolate()),
|
| - argument_count);
|
| - } else {
|
| - ASSERT(mode == INCREMENTAL);
|
| - __ CallCFunction(
|
| - ExternalReference::incremental_marking_record_write_function(
|
| - masm->isolate()),
|
| - argument_count);
|
| - }
|
| - regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
|
| + __ UNIMPLEMENTED_BREAK();
|
| }
|
|
|
|
|
| @@ -7429,15 +7333,16 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
|
| Label need_incremental;
|
| Label need_incremental_pop_scratch;
|
|
|
| - __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
|
| + __ land(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
|
| __ ldr(regs_.scratch1(),
|
| MemOperand(regs_.scratch0(),
|
| MemoryChunk::kWriteBarrierCounterOffset));
|
| - __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
|
| + __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1));
|
| + __ cmpge(regs_.scratch1(), Operand(0));
|
| __ str(regs_.scratch1(),
|
| MemOperand(regs_.scratch0(),
|
| MemoryChunk::kWriteBarrierCounterOffset));
|
| - __ b(mi, &need_incremental);
|
| + __ bf(&need_incremental);
|
|
|
| // Let's look at the color of the object: If it is not black we don't have
|
| // to inform the incremental marker.
|
| @@ -7540,7 +7445,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
|
| // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
|
| __ bind(&fast_elements);
|
| __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
|
| - __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
|
| + __ lsl(r6, r3, Operand(kPointerSizeLog2 - kSmiTagSize));
|
| + __ add(r6, r5, r6);
|
| __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
| __ str(r0, MemOperand(r6, 0));
|
| // Update the write barrier for the array store.
|
| @@ -7552,7 +7458,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
|
| // and value is Smi.
|
| __ bind(&smi_element);
|
| __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
|
| - __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
|
| + __ lsl(r6, r3, Operand(kPointerSizeLog2 - kSmiTagSize));
|
| + __ add(r6, r5, r6);
|
| __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
|
| __ Ret();
|
|
|
| @@ -7569,7 +7476,6 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
|
|
|
| void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
|
| if (entry_hook_ != NULL) {
|
| - PredictableCodeSizeScope predictable(masm);
|
| ProfileEntryHookStub stub;
|
| __ push(lr);
|
| __ CallStub(&stub);
|
| @@ -7581,48 +7487,38 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
|
| void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
|
| // The entry hook is a "push lr" instruction, followed by a call.
|
| const int32_t kReturnAddressDistanceFromFunctionStart =
|
| - 3 * Assembler::kInstrSize;
|
| + Assembler::kCallTargetAddressOffset + Assembler::kInstrSize;
|
|
|
| // Save live volatile registers.
|
| - __ Push(lr, r5, r1);
|
| + __ Push(pr, r5, r1);
|
| const int32_t kNumSavedRegs = 3;
|
|
|
| // Compute the function's address for the first argument.
|
| - __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
|
| + __ sub(r0, pr, Operand(kReturnAddressDistanceFromFunctionStart));
|
|
|
| // The caller's return address is above the saved temporaries.
|
| // Grab that for the second argument to the hook.
|
| __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize));
|
|
|
| // Align the stack if necessary.
|
| - int frame_alignment = masm->ActivationFrameAlignment();
|
| + int frame_alignment = OS::ActivationFrameAlignment();
|
| if (frame_alignment > kPointerSize) {
|
| __ mov(r5, sp);
|
| ASSERT(IsPowerOf2(frame_alignment));
|
| - __ and_(sp, sp, Operand(-frame_alignment));
|
| + __ land(sp, sp, Operand(-frame_alignment));
|
| }
|
|
|
| -#if defined(V8_HOST_ARCH_ARM)
|
| __ mov(ip, Operand(reinterpret_cast<int32_t>(&entry_hook_)));
|
| __ ldr(ip, MemOperand(ip));
|
| -#else
|
| - // Under the simulator we need to indirect the entry hook through a
|
| - // trampoline function at a known address.
|
| - Address trampoline_address = reinterpret_cast<Address>(
|
| - reinterpret_cast<intptr_t>(EntryHookTrampoline));
|
| - ApiFunction dispatcher(trampoline_address);
|
| - __ mov(ip, Operand(ExternalReference(&dispatcher,
|
| - ExternalReference::BUILTIN_CALL,
|
| - masm->isolate())));
|
| -#endif
|
| - __ Call(ip);
|
| +
|
| + __ jsr(ip);
|
|
|
| // Restore the stack pointer if needed.
|
| if (frame_alignment > kPointerSize) {
|
| __ mov(sp, r5);
|
| }
|
|
|
| - __ Pop(lr, r5, r1);
|
| + __ Pop(pr, r5, r1);
|
| __ Ret();
|
| }
|
|
|
| @@ -7630,4 +7526,4 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
|
|
|
| } } // namespace v8::internal
|
|
|
| -#endif // V8_TARGET_ARCH_ARM
|
| +#endif // V8_TARGET_ARCH_SH4
|
|
|