Index: src/sh4/stub-cache-sh4.cc |
diff --git a/src/arm/stub-cache-arm.cc b/src/sh4/stub-cache-sh4.cc |
similarity index 89% |
copy from src/arm/stub-cache-arm.cc |
copy to src/sh4/stub-cache-sh4.cc |
index d3b58624c8b24058eda3cdec1742bc7ccc36c739..9f41198a2f1e88e81cea7b3dd610dbd08fd05682 100644 |
--- a/src/arm/stub-cache-arm.cc |
+++ b/src/sh4/stub-cache-sh4.cc |
@@ -1,4 +1,4 @@ |
-// Copyright 2012 the V8 project authors. All rights reserved. |
+// Copyright 2011-2012 the V8 project authors. All rights reserved. |
// Redistribution and use in source and binary forms, with or without |
// modification, are permitted provided that the following conditions are |
// met: |
@@ -27,7 +27,7 @@ |
#include "v8.h" |
-#if defined(V8_TARGET_ARCH_ARM) |
+#if defined(V8_TARGET_ARCH_SH4) |
#include "ic-inl.h" |
#include "codegen.h" |
@@ -38,6 +38,7 @@ namespace internal { |
#define __ ACCESS_MASM(masm) |
+#include "map-sh4.h" // Define register map |
static void ProbeTable(Isolate* isolate, |
MacroAssembler* masm, |
@@ -66,16 +67,22 @@ static void ProbeTable(Isolate* isolate, |
ASSERT((map_off_addr - key_off_addr) % 4 == 0); |
ASSERT((map_off_addr - key_off_addr) < (256 * 4)); |
+ // Check that ip is not used |
+ ASSERT(!name.is(ip) && !offset.is(ip) && !scratch.is(ip) && !scratch2.is(ip)); |
+ |
+ |
Label miss; |
Register base_addr = scratch; |
scratch = no_reg; |
// Multiply by 3 because there are 3 fields per entry (name, code, map). |
- __ add(offset_scratch, offset, Operand(offset, LSL, 1)); |
+ __ lsl(offset_scratch, offset, Operand(1)); |
+ __ add(offset_scratch, offset, offset_scratch); |
// Calculate the base address of the entry. |
__ mov(base_addr, Operand(key_offset)); |
- __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2)); |
+ __ lsl(offset_scratch, offset_scratch, Operand(kPointerSizeLog2)); |
+ __ add(base_addr, base_addr, offset_scratch); |
// Check that the key in the entry matches the name. |
__ ldr(ip, MemOperand(base_addr, 0)); |
@@ -98,28 +105,13 @@ static void ProbeTable(Isolate* isolate, |
base_addr = no_reg; |
__ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset)); |
// It's a nice optimization if this constant is encodable in the bic insn. |
- |
- uint32_t mask = Code::kFlagsNotUsedInLookup; |
- ASSERT(__ ImmediateFitsAddrMode1Instruction(mask)); |
- __ bic(flags_reg, flags_reg, Operand(mask)); |
- // Using cmn and the negative instead of cmp means we can use movw. |
- if (flags < 0) { |
- __ cmn(flags_reg, Operand(-flags)); |
- } else { |
- __ cmp(flags_reg, Operand(flags)); |
- } |
+ // TODO(STM): to check soon |
+ __ cmp(flags_reg, Operand(flags)); |
__ b(ne, &miss); |
-#ifdef DEBUG |
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { |
- __ jmp(&miss); |
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { |
- __ jmp(&miss); |
- } |
-#endif |
- |
// Jump to the first instruction in the code stub. |
- __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag)); |
+ __ add(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag)); |
+ __ jmp(offset); |
// Miss: fall through. |
__ bind(&miss); |
@@ -156,8 +148,8 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm, |
// Check that receiver is a JSObject. |
__ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
- __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE)); |
- __ b(lt, miss_label); |
+ __ cmpge(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE)); |
+ __ bf(miss_label); |
// Load properties array. |
Register properties = scratch0; |
@@ -220,6 +212,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm, |
ASSERT(!extra2.is(no_reg)); |
ASSERT(!extra3.is(no_reg)); |
+ // Check that ip is not used |
+ ASSERT(!receiver.is(ip) && !name.is(ip) && !scratch.is(ip) && |
+ !extra.is(ip) && !extra2.is(ip)); |
+ |
Counters* counters = masm->isolate()->counters(); |
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, |
extra2, extra3); |
@@ -230,16 +226,14 @@ void StubCache::GenerateProbe(MacroAssembler* masm, |
// Get the map of the receiver and compute the hash. |
__ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset)); |
__ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
- __ add(scratch, scratch, Operand(ip)); |
+ __ add(scratch, scratch, ip); |
uint32_t mask = kPrimaryTableSize - 1; |
// We shift out the last two bits because they are not part of the hash and |
// they are always 01 for maps. |
- __ mov(scratch, Operand(scratch, LSR, kHeapObjectTagSize)); |
- // Mask down the eor argument to the minimum to keep the immediate |
- // ARM-encodable. |
+ __ lsr(scratch, scratch, Operand(kHeapObjectTagSize)); |
+ // Mask down the eor argument to the minimum to keep the immediate small |
__ eor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask)); |
- // Prefer and_ to ubfx here because ubfx takes 2 cycles. |
- __ and_(scratch, scratch, Operand(mask)); |
+ __ land(scratch, scratch, Operand(mask)); |
// Probe the primary table. |
ProbeTable(isolate, |
@@ -254,10 +248,11 @@ void StubCache::GenerateProbe(MacroAssembler* masm, |
extra3); |
// Primary miss: Compute hash for secondary probe. |
- __ sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize)); |
+ __ lsr(extra3, name, Operand(kHeapObjectTagSize)); |
+ __ sub(scratch, scratch, extra3); |
uint32_t mask2 = kSecondaryTableSize - 1; |
__ add(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2)); |
- __ and_(scratch, scratch, Operand(mask2)); |
+ __ land(scratch, scratch, Operand(mask2)); |
// Probe the secondary table. |
ProbeTable(isolate, |
@@ -351,7 +346,7 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm, |
__ JumpIfSmi(receiver, miss_label); |
// Check that the object is a JS array. |
- __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE); |
+ __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE, eq); |
__ b(ne, miss_label); |
// Load length directly from the JS array. |
@@ -369,13 +364,15 @@ static void GenerateStringCheck(MacroAssembler* masm, |
Register scratch2, |
Label* smi, |
Label* non_string_object) { |
+ ASSERT(!receiver.is(ip) && !scratch1.is(ip) && !scratch2.is(ip)); |
+ |
// Check that the receiver isn't a smi. |
__ JumpIfSmi(receiver, smi); |
// Check that the object is a string. |
__ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
__ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); |
- __ and_(scratch2, scratch1, Operand(kIsNotStringMask)); |
+ __ land(scratch2, scratch1, Operand(kIsNotStringMask)); |
// The cast is to resolve the overload for the argument of 0x0. |
__ cmp(scratch2, Operand(static_cast<int32_t>(kStringTag))); |
__ b(ne, non_string_object); |
@@ -392,6 +389,7 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm, |
Register scratch2, |
Label* miss, |
bool support_wrappers) { |
+ ASSERT(!receiver.is(ip) && !scratch1.is(ip) && !scratch2.is(ip)); |
Label check_wrapper; |
// Check if the object is a string leaving the instance type in the |
@@ -599,7 +597,7 @@ static void GenerateCallFunction(MacroAssembler* masm, |
// Check that the function really is a function. |
__ JumpIfSmi(r1, miss); |
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE); |
+ __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE, eq); |
__ b(ne, miss); |
// Patch the receiver on the stack with the global proxy if |
@@ -705,8 +703,10 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, |
__ Move(r6, call_data); |
} |
__ mov(r7, Operand(ExternalReference::isolate_address())); |
- // Store JS function, call data and isolate. |
- __ stm(ib, sp, r5.bit() | r6.bit() | r7.bit()); |
+ // Store js function and call data. |
+ __ str(r5, MemOperand(sp, 4)); |
+ __ str(r6, MemOperand(sp, 8)); |
+ __ str(r7, MemOperand(sp, 12)); |
// Prepare arguments. |
__ add(r2, sp, Operand(3 * kPointerSize)); |
@@ -980,7 +980,7 @@ static void GenerateCheckPropertyCells(MacroAssembler* masm, |
// Convert and store int passed in register ival to IEEE 754 single precision |
// floating point value at memory location (dst + 4 * wordoffset) |
-// If VFP3 is available use it for conversion. |
+// If FPU is available use it for conversion. |
static void StoreIntAsFloat(MacroAssembler* masm, |
Register dst, |
Register wordoffset, |
@@ -988,12 +988,12 @@ static void StoreIntAsFloat(MacroAssembler* masm, |
Register fval, |
Register scratch1, |
Register scratch2) { |
- if (CpuFeatures::IsSupported(VFP2)) { |
- CpuFeatures::Scope scope(VFP2); |
- __ vmov(s0, ival); |
- __ add(scratch1, dst, Operand(wordoffset, LSL, 2)); |
- __ vcvt_f32_s32(s0, s0); |
- __ vstr(s0, scratch1, 0); |
+ if (CpuFeatures::IsSupported(FPU)) { |
+ __ dfloat(dr0, ival); |
+ __ fcnvds(fr0, dr0); |
+ __ lsl(scratch1, wordoffset, Operand(2)); |
+ __ add(scratch1, dst, scratch1); |
+ __ fstr(fr0, MemOperand(scratch1, 0)); |
} else { |
Label not_special, done; |
// Move sign bit from source to destination. This works because the sign |
@@ -1001,21 +1001,25 @@ static void StoreIntAsFloat(MacroAssembler* masm, |
// as the 2's complement sign bit in a Smi. |
ASSERT(kBinary32SignMask == 0x80000000u); |
- __ and_(fval, ival, Operand(kBinary32SignMask), SetCC); |
+ __ land(fval, ival, Operand(kBinary32SignMask)); |
+ __ cmp(fval, Operand(0)); |
// Negate value if it is negative. |
- __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne); |
+ __ rsb(ip, ival, Operand(0, RelocInfo::NONE)); |
+ __ mov(ival, ip, ne); |
// We have -1, 0 or 1, which we treat specially. Register ival contains |
// absolute value: it is either equal to 1 (special case of -1 and 1), |
// greater than 1 (not a special case) or less than 1 (special case of 0). |
- __ cmp(ival, Operand(1)); |
- __ b(gt, ¬_special); |
+ __ cmpgt(ival, Operand(1)); |
+ __ b(t, ¬_special); |
// For 1 or -1 we need to or in the 0 exponent (biased). |
static const uint32_t exponent_word_for_1 = |
kBinary32ExponentBias << kBinary32ExponentShift; |
- __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq); |
+ __ cmp(ival, Operand(1)); |
+ __ orr(ip, fval, Operand(exponent_word_for_1)); |
+ __ mov(fval, ip, eq); |
__ b(&done); |
__ bind(¬_special); |
@@ -1029,21 +1033,24 @@ static void StoreIntAsFloat(MacroAssembler* masm, |
zeros, |
Operand((kBitsPerInt - 1) + kBinary32ExponentBias)); |
+ __ lsl(ip, scratch1, Operand(kBinary32ExponentShift)); |
__ orr(fval, |
fval, |
- Operand(scratch1, LSL, kBinary32ExponentShift)); |
+ ip); |
// Shift up the source chopping the top bit off. |
__ add(zeros, zeros, Operand(1)); |
// This wouldn't work for 1 and -1 as the shift would be 32 which means 0. |
- __ mov(ival, Operand(ival, LSL, zeros)); |
+ __ lsl(ival, ival, zeros); |
// And the top (top 20 bits). |
+ __ lsr(ip, ival, Operand(kBitsPerInt - kBinary32MantissaBits)); |
__ orr(fval, |
fval, |
- Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits)); |
+ ip); |
__ bind(&done); |
- __ str(fval, MemOperand(dst, wordoffset, LSL, 2)); |
+ __ lsl(ip, wordoffset, Operand(2)); |
+ __ str(fval, MemOperand(dst, ip)); |
} |
} |
@@ -1058,6 +1065,7 @@ static void GenerateUInt2Double(MacroAssembler* masm, |
Register loword, |
Register scratch, |
int leading_zeroes) { |
+ ASSERT(!scratch.is(hiword)); |
const int meaningful_bits = kBitsPerInt - leading_zeroes - 1; |
const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits; |
@@ -1069,11 +1077,13 @@ static void GenerateUInt2Double(MacroAssembler* masm, |
__ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); |
if (mantissa_shift_for_hi_word > 0) { |
- __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word)); |
- __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word)); |
+ __ lsl(loword, hiword, Operand(mantissa_shift_for_lo_word)); |
+ __ lsr(hiword, hiword, Operand(mantissa_shift_for_hi_word)); |
+ __ orr(hiword, scratch, hiword); |
} else { |
__ mov(loword, Operand(0, RelocInfo::NONE)); |
- __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word)); |
+ __ lsl(hiword, hiword, Operand(mantissa_shift_for_hi_word)); |
+ __ orr(hiword, scratch, hiword); |
} |
// If least significant bit of biased exponent was not 1 it was corrupted |
@@ -1399,7 +1409,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, |
Label interceptor_failed; |
__ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex); |
__ cmp(r0, scratch1); |
- __ b(eq, &interceptor_failed); |
+ __ b(eq, &interceptor_failed, Label::kNear); |
frame_scope.GenerateLeaveFrame(); |
__ Ret(); |
@@ -1520,7 +1530,7 @@ void CallStubCompiler::GenerateLoadFunctionFromCell( |
// function can all use this call IC. Before we load through the |
// function, we have to verify that it still is a function. |
__ JumpIfSmi(r1, miss); |
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE); |
+ __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE, eq); |
__ b(ne, miss); |
// Check the shared function info. Make sure it hasn't changed. |
@@ -1643,8 +1653,8 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( |
__ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
// Check if we could survive without allocation. |
- __ cmp(r0, r4); |
- __ b(gt, &attempt_to_grow_elements); |
+ __ cmpgt(r0, r4); |
+ __ bt_near(&attempt_to_grow_elements); |
// Check if value is a smi. |
Label with_write_barrier; |
@@ -1657,8 +1667,8 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( |
// Store the value. |
// We may need a register containing the address end_elements below, |
// so write back the value in end_elements. |
- __ add(end_elements, elements, |
- Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); |
+ __ lsl(ip, r0, Operand(kPointerSizeLog2 - kSmiTagSize)); |
+ __ add(end_elements, elements, ip); |
const int kEndElementsOffset = |
FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize; |
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); |
@@ -1711,8 +1721,8 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( |
// Store the value. |
// We may need a register containing the address end_elements below, |
// so write back the value in end_elements. |
- __ add(end_elements, elements, |
- Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); |
+ __ lsl(end_elements, r0, Operand(kPointerSizeLog2 - kSmiTagSize)); |
+ __ add(end_elements, elements, end_elements); |
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); |
__ RecordWrite(elements, |
@@ -1750,8 +1760,8 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( |
const int kAllocationDelta = 4; |
// Load top and check if it is the end of elements. |
- __ add(end_elements, elements, |
- Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); |
+ __ lsl(end_elements, r0, Operand(kPointerSizeLog2 - kSmiTagSize)); |
+ __ add(end_elements, elements, end_elements); |
__ add(end_elements, end_elements, Operand(kEndElementsOffset)); |
__ mov(r7, Operand(new_space_allocation_top)); |
__ ldr(r3, MemOperand(r7)); |
@@ -1761,8 +1771,8 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( |
__ mov(r9, Operand(new_space_allocation_limit)); |
__ ldr(r9, MemOperand(r9)); |
__ add(r3, r3, Operand(kAllocationDelta * kPointerSize)); |
- __ cmp(r3, r9); |
- __ b(hi, &call_builtin); |
+ __ cmphi(r3, r9); |
+ __ b(eq, &call_builtin); |
// We fit and could grow elements. |
// Update new_space_allocation_top. |
@@ -1844,8 +1854,9 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall( |
// Get the array's length into r4 and calculate new length. |
__ ldr(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
- __ sub(r4, r4, Operand(Smi::FromInt(1)), SetCC); |
- __ b(lt, &return_undefined); |
+ __ cmpge(r4, Operand(Smi::FromInt(1))); // for branch below |
+ __ sub(r4, r4, Operand(Smi::FromInt(1))); |
+ __ bf_near(&return_undefined); |
// Get the last element. |
__ LoadRoot(r6, Heap::kTheHoleValueRootIndex); |
@@ -1853,8 +1864,9 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall( |
STATIC_ASSERT(kSmiTag == 0); |
// We can't address the last element in one operation. Compute the more |
// expensive shift first, and use an offset later on. |
- __ add(elements, elements, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize)); |
- __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize)); |
+ __ lsl(r0, r4, Operand(kPointerSizeLog2 - kSmiTagSize)); |
+ __ add(elements, elements, r0); |
+ __ ldr(r0, MemOperand(elements, FixedArray::kHeaderSize)); |
__ cmp(r0, r6); |
__ b(eq, &call_builtin); |
@@ -2097,7 +2109,7 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall( |
__ JumpIfNotSmi(code, &slow); |
// Convert the smi code to uint16. |
- __ and_(code, code, Operand(Smi::FromInt(0xffff))); |
+ __ land(code, code, Operand(Smi::FromInt(0xffff))); |
StringCharFromCodeGenerator generator(code, r0); |
generator.GenerateFast(masm()); |
@@ -2136,135 +2148,11 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall( |
// -- sp[argc * 4] : receiver |
// ----------------------------------- |
- if (!CpuFeatures::IsSupported(VFP2)) { |
+ // TODO(STM): implement this using FPU |
+ // if (!CpuFeatures::IsSupported(FPU)) |
+ { |
return Handle<Code>::null(); |
} |
- |
- CpuFeatures::Scope scope_vfp2(VFP2); |
- const int argc = arguments().immediate(); |
- // If the object is not a JSObject or we got an unexpected number of |
- // arguments, bail out to the regular call. |
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null(); |
- |
- Label miss, slow; |
- GenerateNameCheck(name, &miss); |
- |
- if (cell.is_null()) { |
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); |
- STATIC_ASSERT(kSmiTag == 0); |
- __ JumpIfSmi(r1, &miss); |
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, |
- name, &miss); |
- } else { |
- ASSERT(cell->value() == *function); |
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name, |
- &miss); |
- GenerateLoadFunctionFromCell(cell, function, &miss); |
- } |
- |
- // Load the (only) argument into r0. |
- __ ldr(r0, MemOperand(sp, 0 * kPointerSize)); |
- |
- // If the argument is a smi, just return. |
- STATIC_ASSERT(kSmiTag == 0); |
- __ tst(r0, Operand(kSmiTagMask)); |
- __ Drop(argc + 1, eq); |
- __ Ret(eq); |
- |
- __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK); |
- |
- Label wont_fit_smi, no_vfp_exception, restore_fpscr_and_return; |
- |
- // If vfp3 is enabled, we use the fpu rounding with the RM (round towards |
- // minus infinity) mode. |
- |
- // Load the HeapNumber value. |
- // We will need access to the value in the core registers, so we load it |
- // with ldrd and move it to the fpu. It also spares a sub instruction for |
- // updating the HeapNumber value address, as vldr expects a multiple |
- // of 4 offset. |
- __ Ldrd(r4, r5, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
- __ vmov(d1, r4, r5); |
- |
- // Backup FPSCR. |
- __ vmrs(r3); |
- // Set custom FPCSR: |
- // - Set rounding mode to "Round towards Minus Infinity" |
- // (i.e. bits [23:22] = 0b10). |
- // - Clear vfp cumulative exception flags (bits [3:0]). |
- // - Make sure Flush-to-zero mode control bit is unset (bit 22). |
- __ bic(r9, r3, |
- Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask)); |
- __ orr(r9, r9, Operand(kRoundToMinusInf)); |
- __ vmsr(r9); |
- |
- // Convert the argument to an integer. |
- __ vcvt_s32_f64(s0, d1, kFPSCRRounding); |
- |
- // Use vcvt latency to start checking for special cases. |
- // Get the argument exponent and clear the sign bit. |
- __ bic(r6, r5, Operand(HeapNumber::kSignMask)); |
- __ mov(r6, Operand(r6, LSR, HeapNumber::kMantissaBitsInTopWord)); |
- |
- // Retrieve FPSCR and check for vfp exceptions. |
- __ vmrs(r9); |
- __ tst(r9, Operand(kVFPExceptionMask)); |
- __ b(&no_vfp_exception, eq); |
- |
- // Check for NaN, Infinity, and -Infinity. |
- // They are invariant through a Math.Floor call, so just |
- // return the original argument. |
- __ sub(r7, r6, Operand(HeapNumber::kExponentMask |
- >> HeapNumber::kMantissaBitsInTopWord), SetCC); |
- __ b(&restore_fpscr_and_return, eq); |
- // We had an overflow or underflow in the conversion. Check if we |
- // have a big exponent. |
- __ cmp(r7, Operand(HeapNumber::kMantissaBits)); |
- // If greater or equal, the argument is already round and in r0. |
- __ b(&restore_fpscr_and_return, ge); |
- __ b(&wont_fit_smi); |
- |
- __ bind(&no_vfp_exception); |
- // Move the result back to general purpose register r0. |
- __ vmov(r0, s0); |
- // Check if the result fits into a smi. |
- __ add(r1, r0, Operand(0x40000000), SetCC); |
- __ b(&wont_fit_smi, mi); |
- // Tag the result. |
- STATIC_ASSERT(kSmiTag == 0); |
- __ mov(r0, Operand(r0, LSL, kSmiTagSize)); |
- |
- // Check for -0. |
- __ cmp(r0, Operand(0, RelocInfo::NONE)); |
- __ b(&restore_fpscr_and_return, ne); |
- // r5 already holds the HeapNumber exponent. |
- __ tst(r5, Operand(HeapNumber::kSignMask)); |
- // If our HeapNumber is negative it was -0, so load its address and return. |
- // Else r0 is loaded with 0, so we can also just return. |
- __ ldr(r0, MemOperand(sp, 0 * kPointerSize), ne); |
- |
- __ bind(&restore_fpscr_and_return); |
- // Restore FPSCR and return. |
- __ vmsr(r3); |
- __ Drop(argc + 1); |
- __ Ret(); |
- |
- __ bind(&wont_fit_smi); |
- // Restore FPCSR and fall to slow case. |
- __ vmsr(r3); |
- |
- __ bind(&slow); |
- // Tail call the full function. We do not have to patch the receiver |
- // because the function makes no use of it. |
- __ InvokeFunction( |
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); |
- |
- __ bind(&miss); |
- // r2: function name. |
- GenerateMissBranch(); |
- |
- // Return the generated code. |
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name); |
} |
@@ -2312,15 +2200,18 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall( |
// Do bitwise not or do nothing depending on the sign of the |
// argument. |
- __ eor(r1, r0, Operand(r0, ASR, kBitsPerInt - 1)); |
+ __ asr(r1, r0, Operand(kBitsPerInt - 1)); |
+ __ eor(r1, r0, r1); |
// Add 1 or do nothing depending on the sign of the argument. |
- __ sub(r0, r1, Operand(r0, ASR, kBitsPerInt - 1), SetCC); |
+ __ asr(r0, r0, Operand(kBitsPerInt - 1)); |
+ __ sub(r0, r1, r0); |
+ __ cmpge(r0, Operand(0)); |
// If the result is still negative, go to the slow case. |
// This only happens for the most negative smi. |
Label slow; |
- __ b(mi, &slow); |
+ __ b(f, &slow); |
// Smi case done. |
__ Drop(argc + 1); |
@@ -2336,7 +2227,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall( |
// just return it. |
Label negative_sign; |
__ tst(r1, Operand(HeapNumber::kSignMask)); |
- __ b(ne, &negative_sign); |
+ __ b(ne, &negative_sign, Label::kNear); |
__ Drop(argc + 1); |
__ Ret(); |
@@ -2470,8 +2361,8 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object, |
case STRING_CHECK: |
if (function->IsBuiltin() || !function->shared()->is_classic_mode()) { |
// Check that the object is a two-byte string or a symbol. |
- __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE); |
- __ b(ge, &miss); |
+ __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE, ge); |
+ __ b(eq, &miss); |
// Check that the maps starting from the prototype haven't changed. |
GenerateDirectLoadGlobalFunctionPrototype( |
masm(), Context::STRING_FUNCTION_INDEX, r0, &miss); |
@@ -2490,7 +2381,7 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object, |
Label fast; |
// Check that the object is a smi or a heap number. |
__ JumpIfSmi(r1, &fast); |
- __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE); |
+ __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE, eq); |
__ b(ne, &miss); |
__ bind(&fast); |
// Check that the maps starting from the prototype haven't changed. |
@@ -2621,10 +2512,13 @@ Handle<Code> CallStubCompiler::CompileCallGlobal( |
// Jump to the cached code (tail call). |
Counters* counters = masm()->isolate()->counters(); |
__ IncrementCounter(counters->call_global_inline(), 1, r3, r4); |
+ ASSERT(function->is_compiled()); |
+ Handle<Code> code(function->code()); |
ParameterCount expected(function->shared()->formal_parameter_count()); |
CallKind call_kind = CallICBase::Contextual::decode(extra_state_) |
? CALL_AS_FUNCTION |
: CALL_AS_METHOD; |
+ // TODO(STM): does it works without UseCrankshaft |
// We call indirectly through the code field in the function to |
// allow recompilation to take effect without changing any of the |
// call sites. |
@@ -3183,7 +3077,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor( |
Label miss; |
// Check the key is the cached one. |
- __ cmp(r0, Operand(name)); |
+ __ cmp(r0, Operand(name), ip); |
__ b(ne, &miss); |
LookupResult lookup(isolate()); |
@@ -3207,7 +3101,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength( |
Label miss; |
// Check the key is the cached one. |
- __ cmp(r0, Operand(name)); |
+ __ cmp(r0, Operand(name), ip); |
__ b(ne, &miss); |
GenerateLoadArrayLength(masm(), r1, r2, &miss); |
@@ -3231,7 +3125,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength( |
__ IncrementCounter(counters->keyed_load_string_length(), 1, r2, r3); |
// Check the key is the cached one. |
- __ cmp(r0, Operand(name)); |
+ __ cmp(r0, Operand(name), ip); |
__ b(ne, &miss); |
GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true); |
@@ -3257,7 +3151,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype( |
__ IncrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3); |
// Check the name hasn't changed. |
- __ cmp(r0, Operand(name)); |
+ __ cmp(r0, Operand(name), ip); |
__ b(ne, &miss); |
GenerateLoadFunctionPrototype(masm(), r1, r2, r3, &miss); |
@@ -3303,14 +3197,17 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic( |
int receiver_count = receiver_maps->length(); |
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); |
for (int current = 0; current < receiver_count; ++current) { |
+ Label skip; |
__ mov(ip, Operand(receiver_maps->at(current))); |
__ cmp(r2, ip); |
- __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET, eq); |
+ __ bf_near(&skip); |
+ __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET); |
+ __ bind(&skip); |
} |
__ bind(&miss); |
Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss(); |
- __ Jump(miss_ic, RelocInfo::CODE_TARGET, al); |
+ __ Jump(miss_ic, RelocInfo::CODE_TARGET); |
// Return the generated code. |
return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC); |
@@ -3402,19 +3299,22 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic( |
__ mov(ip, Operand(receiver_maps->at(i))); |
__ cmp(r3, ip); |
if (transitioned_maps->at(i).is_null()) { |
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq); |
+ Label skip; |
+ __ bf(&skip); |
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET); |
+ __ bind(&skip); |
} else { |
Label next_map; |
__ b(ne, &next_map); |
__ mov(r3, Operand(transitioned_maps->at(i))); |
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al); |
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET); |
__ bind(&next_map); |
} |
} |
__ bind(&miss); |
Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss(); |
- __ Jump(miss_ic, RelocInfo::CODE_TARGET, al); |
+ __ Jump(miss_ic, RelocInfo::CODE_TARGET); |
// Return the generated code. |
return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC); |
@@ -3449,8 +3349,8 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub( |
// r7: undefined |
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); |
__ JumpIfSmi(r2, &generic_stub_call); |
- __ CompareObjectType(r2, r3, r4, MAP_TYPE); |
- __ b(ne, &generic_stub_call); |
+ __ CompareObjectType(r2, r3, r4, MAP_TYPE, eq); |
+ __ b(f, &generic_stub_call); |
#ifdef DEBUG |
// Cannot construct functions this way. |
@@ -3458,7 +3358,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub( |
// r1: constructor function |
// r2: initial map |
// r7: undefined |
- __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE); |
+ __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE, eq); |
__ Check(ne, "Function constructed by construct stub."); |
#endif |
@@ -3489,7 +3389,8 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub( |
// Calculate the location of the first argument. The stack contains only the |
// argc arguments. |
- __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2)); |
+ __ lsl(r1, r0, Operand(kPointerSizeLog2)); |
+ __ add(r1, sp, r1); |
// Fill all the in-object properties with undefined. |
// r0: argc |
@@ -3506,12 +3407,12 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub( |
Label not_passed, next; |
// Check if the argument assigned to the property is actually passed. |
int arg_number = shared->GetThisPropertyAssignmentArgument(i); |
- __ cmp(r0, Operand(arg_number)); |
- __ b(le, ¬_passed); |
+ __ cmpgt(r0, Operand(arg_number)); |
+ __ b(f, ¬_passed, Label::kNear); |
// Argument passed - find it on the stack. |
__ ldr(r2, MemOperand(r1, (arg_number + 1) * -kPointerSize)); |
__ str(r2, MemOperand(r5, kPointerSize, PostIndex)); |
- __ b(&next); |
+ __ b_near(&next); |
__ bind(¬_passed); |
// Set the property to undefined. |
__ str(r7, MemOperand(r5, kPointerSize, PostIndex)); |
@@ -3542,12 +3443,13 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub( |
// r0: JSObject |
// r1: argc |
// Remove caller arguments and receiver from the stack and return. |
- __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2)); |
+ __ lsl(ip, r1, Operand(kPointerSizeLog2)); |
+ __ add(sp, sp, ip); |
__ add(sp, sp, Operand(kPointerSize)); |
Counters* counters = masm()->isolate()->counters(); |
__ IncrementCounter(counters->constructed_objects(), 1, r1, r2); |
__ IncrementCounter(counters->constructed_objects_stub(), 1, r1, r2); |
- __ Jump(lr); |
+ __ Ret(); |
// Jump to the generic stub in case the specialized code cannot handle the |
// construction. |
@@ -3577,7 +3479,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( |
Register receiver = r1; |
__ JumpIfNotSmi(key, &miss_force_generic); |
- __ mov(r2, Operand(key, ASR, kSmiTagSize)); |
+ __ asr(r2, key, Operand(kSmiTagSize)); |
__ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
__ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5); |
__ Ret(); |
@@ -3646,33 +3548,12 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm, |
Register scratch0, |
Register scratch1, |
DwVfpRegister double_scratch0, |
- DwVfpRegister double_scratch1, |
Label* fail) { |
- if (CpuFeatures::IsSupported(VFP2)) { |
- CpuFeatures::Scope scope(VFP2); |
- Label key_ok; |
- // Check for smi or a smi inside a heap number. We convert the heap |
- // number and check if the conversion is exact and fits into the smi |
- // range. |
- __ JumpIfSmi(key, &key_ok); |
- __ CheckMap(key, |
- scratch0, |
- Heap::kHeapNumberMapRootIndex, |
- fail, |
- DONT_DO_SMI_CHECK); |
- __ sub(ip, key, Operand(kHeapObjectTag)); |
- __ vldr(double_scratch0, ip, HeapNumber::kValueOffset); |
- __ EmitVFPTruncate(kRoundToZero, |
- scratch0, |
- double_scratch0, |
- scratch1, |
- double_scratch1, |
- kCheckForInexactConversion); |
- __ b(ne, fail); |
- __ TrySmiTag(scratch0, fail, scratch1); |
- __ mov(key, scratch0); |
- __ bind(&key_ok); |
- } else { |
+ // TODO(STM): FPU support |
+// if (CpuFeatures::IsSupported(FPU)) { |
+// |
+// } else { |
+ { |
// Check that the key is a smi. |
__ JumpIfNotSmi(key, fail); |
} |
@@ -3696,16 +3577,16 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( |
// have been verified by the caller to not be a smi. |
// Check that the key is a smi or a heap number convertible to a smi. |
- GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic); |
+ GenerateSmiKeyCheck(masm, key, r4, r5, dr0, &miss_force_generic); |
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
// r3: elements array |
// Check that the index is in range. |
__ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); |
- __ cmp(key, ip); |
+ __ cmphs(key, ip); |
// Unsigned comparison catches both negative and too-large values. |
- __ b(hs, &miss_force_generic); |
+ __ bt(&miss_force_generic); |
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); |
// r3: base pointer of external storage |
@@ -3717,38 +3598,44 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( |
Register value = r2; |
switch (elements_kind) { |
case EXTERNAL_BYTE_ELEMENTS: |
- __ ldrsb(value, MemOperand(r3, key, LSR, 1)); |
+ __ lsr(value, key, Operand(1)); |
+ __ ldrsb(value, MemOperand(r3, value)); |
break; |
case EXTERNAL_PIXEL_ELEMENTS: |
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: |
- __ ldrb(value, MemOperand(r3, key, LSR, 1)); |
+ __ lsr(value, key, Operand(1)); |
+ __ ldrb(value, MemOperand(r3, value)); |
break; |
case EXTERNAL_SHORT_ELEMENTS: |
- __ ldrsh(value, MemOperand(r3, key, LSL, 0)); |
+ __ ldrsh(value, MemOperand(r3, key)); |
break; |
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: |
- __ ldrh(value, MemOperand(r3, key, LSL, 0)); |
+ __ lsl(value, key, Operand(0)); |
+ __ ldrh(value, MemOperand(r3, value)); |
break; |
case EXTERNAL_INT_ELEMENTS: |
case EXTERNAL_UNSIGNED_INT_ELEMENTS: |
- __ ldr(value, MemOperand(r3, key, LSL, 1)); |
+ __ lsl(value, key, Operand(1)); |
+ __ ldr(value, MemOperand(r3, value)); |
break; |
case EXTERNAL_FLOAT_ELEMENTS: |
- if (CpuFeatures::IsSupported(VFP2)) { |
- CpuFeatures::Scope scope(VFP2); |
- __ add(r2, r3, Operand(key, LSL, 1)); |
- __ vldr(s0, r2, 0); |
+ if (CpuFeatures::IsSupported(FPU)) { |
+ __ lsl(r2, key, Operand(1)); |
+ __ add(r2, r3, r2); |
+ __ fldr(fr0, MemOperand(r2, 0)); |
} else { |
- __ ldr(value, MemOperand(r3, key, LSL, 1)); |
+ __ lsl(value, key, Operand(1)); |
+ __ ldr(value, MemOperand(r3, value)); |
} |
break; |
case EXTERNAL_DOUBLE_ELEMENTS: |
- if (CpuFeatures::IsSupported(VFP2)) { |
- CpuFeatures::Scope scope(VFP2); |
- __ add(r2, r3, Operand(key, LSL, 2)); |
- __ vldr(d0, r2, 0); |
+ if (CpuFeatures::IsSupported(FPU)) { |
+ __ lsl(r2, key, Operand(2)); |
+ __ add(r2, r3, r2); |
+ __ dldr(dr0, MemOperand(r2, 0), r2); |
} else { |
- __ add(r4, r3, Operand(key, LSL, 2)); |
+ __ lsl(r4, key, Operand(2)); |
+ __ add(r4, r3, r4); |
// r4: pointer to the beginning of the double we want to load. |
__ ldr(r2, MemOperand(r4, 0)); |
__ ldr(r3, MemOperand(r4, Register::kSizeInBytes)); |
@@ -3769,46 +3656,41 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( |
// For integer array types: |
// r2: value |
// For float array type: |
- // s0: value (if VFP3 is supported) |
- // r2: value (if VFP3 is not supported) |
+ // s0: value (if FPU is supported) |
+ // r2: value (if FPU is not supported) |
// For double array type: |
- // d0: value (if VFP3 is supported) |
- // r2/r3: value (if VFP3 is not supported) |
+ // d0: value (if FPU is supported) |
+ // r2/r3: value (if FPU is not supported) |
if (elements_kind == EXTERNAL_INT_ELEMENTS) { |
// For the Int and UnsignedInt array types, we need to see whether |
// the value can be represented in a Smi. If not, we need to convert |
// it to a HeapNumber. |
Label box_int; |
- __ cmp(value, Operand(0xC0000000)); |
- __ b(mi, &box_int); |
+ // TODO(STM): why is it different with ARM code ? |
+ __ add(r3, value, Operand(0x40000000)); // Non-smi value gives neg result |
+ __ cmpge(r3, Operand(0)); |
+ __ bf_near(&box_int); |
// Tag integer as smi and return it. |
- __ mov(r0, Operand(value, LSL, kSmiTagSize)); |
+ __ lsl(r0, value, Operand(kSmiTagSize)); |
__ Ret(); |
__ bind(&box_int); |
- if (CpuFeatures::IsSupported(VFP2)) { |
- CpuFeatures::Scope scope(VFP2); |
- // Allocate a HeapNumber for the result and perform int-to-double |
- // conversion. Don't touch r0 or r1 as they are needed if allocation |
- // fails. |
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
- |
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT); |
- // Now we can use r0 for the result as key is not needed any more. |
- __ add(r0, r5, Operand(kHeapObjectTag)); |
- __ vmov(s0, value); |
- __ vcvt_f64_s32(d0, s0); |
- __ vstr(d0, r5, HeapNumber::kValueOffset); |
+ // Allocate a HeapNumber for the result and perform int-to-double |
+ // conversion. Don't touch r0 or r1 as they are needed if allocation |
+ // fails. |
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
+ __ AllocateHeapNumber(r5, r3, r4, r6, &slow); |
+ // Now we can use r0 for the result as key is not needed any more. |
+ __ mov(r0, r5); |
+ |
+ if (CpuFeatures::IsSupported(FPU)) { |
+ __ dfloat(dr0, value); |
+ ASSERT(Operand(kHeapObjectTag - HeapNumber::kValueOffset).is_int8()); |
+ __ sub(r3, r0, Operand(kHeapObjectTag - HeapNumber::kValueOffset)); |
+ __ dstr(dr0, MemOperand(r3, 0), r3); |
__ Ret(); |
} else { |
- // Allocate a HeapNumber for the result and perform int-to-double |
- // conversion. Don't touch r0 or r1 as they are needed if allocation |
- // fails. |
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT); |
- // Now we can use r0 for the result as key is not needed any more. |
- __ mov(r0, r5); |
Register dst1 = r1; |
Register dst2 = r3; |
FloatingPointHelper::Destination dest = |
@@ -3816,11 +3698,11 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( |
FloatingPointHelper::ConvertIntToDouble(masm, |
value, |
dest, |
- d0, |
+ dr0, |
dst1, |
dst2, |
r9, |
- s0); |
+ no_freg); |
__ str(dst1, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); |
__ str(dst2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
__ Ret(); |
@@ -3829,37 +3711,37 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( |
// The test is different for unsigned int values. Since we need |
// the value to be in the range of a positive smi, we can't |
// handle either of the top two bits being set in the value. |
- if (CpuFeatures::IsSupported(VFP2)) { |
- CpuFeatures::Scope scope(VFP2); |
+ if (CpuFeatures::IsSupported(FPU)) { |
Label box_int, done; |
__ tst(value, Operand(0xC0000000)); |
__ b(ne, &box_int); |
// Tag integer as smi and return it. |
- __ mov(r0, Operand(value, LSL, kSmiTagSize)); |
+ __ lsl(r0, value, Operand(kSmiTagSize)); |
__ Ret(); |
__ bind(&box_int); |
- __ vmov(s0, value); |
+ __ dufloat(dr0, value, dr2, sh4_rtmp); |
// Allocate a HeapNumber for the result and perform int-to-double |
// conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all |
// registers - also when jumping due to exhausted young space. |
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT); |
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow); |
- __ vcvt_f64_u32(d0, s0); |
- __ vstr(d0, r2, HeapNumber::kValueOffset); |
+ __ sub(r1, r2, Operand(kHeapObjectTag)); |
+ __ dstr(dr0, MemOperand(r1, HeapNumber::kValueOffset)); |
- __ add(r0, r2, Operand(kHeapObjectTag)); |
+ __ mov(r0, r2); |
__ Ret(); |
+ |
} else { |
// Check whether unsigned integer fits into smi. |
Label box_int_0, box_int_1, done; |
__ tst(value, Operand(0x80000000)); |
- __ b(ne, &box_int_0); |
+ __ b(ne, &box_int_0, Label::kNear); |
__ tst(value, Operand(0x40000000)); |
__ b(ne, &box_int_1); |
// Tag integer as smi and return it. |
- __ mov(r0, Operand(value, LSL, kSmiTagSize)); |
+ __ lsl(r0, value, Operand(kSmiTagSize)); |
__ Ret(); |
Register hiword = value; // r2. |
@@ -3881,7 +3763,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( |
// clobbers all registers - also when jumping due to exhausted young |
// space. |
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
- __ AllocateHeapNumber(r4, r5, r7, r6, &slow, TAG_RESULT); |
+ __ AllocateHeapNumber(r4, r5, r7, r6, &slow); |
__ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset)); |
__ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); |
@@ -3892,24 +3774,25 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( |
} else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
// For the floating-point array type, we need to always allocate a |
// HeapNumber. |
- if (CpuFeatures::IsSupported(VFP2)) { |
- CpuFeatures::Scope scope(VFP2); |
+ if (CpuFeatures::IsSupported(FPU)) { |
// Allocate a HeapNumber for the result. Don't use r0 and r1 as |
// AllocateHeapNumber clobbers all registers - also when jumping due to |
// exhausted young space. |
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT); |
- __ vcvt_f64_f32(d0, s0); |
- __ vstr(d0, r2, HeapNumber::kValueOffset); |
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow); |
+ __ fcnvsd(dr0, fr0); |
+ ASSERT(Operand(kHeapObjectTag - HeapNumber::kValueOffset).is_int8()); |
+ __ sub(r1, r2, Operand(kHeapObjectTag - HeapNumber::kValueOffset)); |
+ __ dstr(dr0, MemOperand(r1, 0), r1); |
- __ add(r0, r2, Operand(kHeapObjectTag)); |
+ __ mov(r0, r2); |
__ Ret(); |
} else { |
// Allocate a HeapNumber for the result. Don't use r0 and r1 as |
// AllocateHeapNumber clobbers all registers - also when jumping due to |
// exhausted young space. |
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
- __ AllocateHeapNumber(r3, r4, r5, r6, &slow, TAG_RESULT); |
+ __ AllocateHeapNumber(r3, r4, r5, r6, &slow); |
// VFP is not available, do manual single to double conversion. |
// r2: floating point value (binary32) |
@@ -3917,19 +3800,19 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( |
// Extract mantissa to r0. OK to clobber r0 now as there are no jumps to |
// the slow case from here. |
- __ and_(r0, value, Operand(kBinary32MantissaMask)); |
+ __ land(r0, value, Operand(kBinary32MantissaMask)); |
// Extract exponent to r1. OK to clobber r1 now as there are no jumps to |
// the slow case from here. |
- __ mov(r1, Operand(value, LSR, kBinary32MantissaBits)); |
- __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); |
+ __ lsr(r1, value, Operand(kBinary32MantissaBits)); |
+ __ land(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); |
Label exponent_rebiased; |
__ teq(r1, Operand(0x00)); |
__ b(eq, &exponent_rebiased); |
__ teq(r1, Operand(0xff)); |
- __ mov(r1, Operand(0x7ff), LeaveCC, eq); |
+ __ mov(r1, Operand(0x7ff), eq); |
__ b(eq, &exponent_rebiased); |
// Rebias exponent. |
@@ -3938,9 +3821,10 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( |
Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); |
__ bind(&exponent_rebiased); |
- __ and_(r2, value, Operand(kBinary32SignMask)); |
+ __ land(r2, value, Operand(kBinary32SignMask)); |
value = no_reg; |
- __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord)); |
+ __ lsl(ip, r1, Operand(HeapNumber::kMantissaBitsInTopWord)); |
+ __ orr(r2, r2, ip); |
// Shift mantissa. |
static const int kMantissaShiftForHiWord = |
@@ -3949,8 +3833,9 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( |
static const int kMantissaShiftForLoWord = |
kBitsPerInt - kMantissaShiftForHiWord; |
- __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord)); |
- __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord)); |
+ __ lsr(ip, r0, Operand(kMantissaShiftForHiWord)); |
+ __ orr(r2, r2, ip); |
+ __ lsl(r0, r0, Operand(kMantissaShiftForLoWord)); |
__ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset)); |
__ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset)); |
@@ -3959,23 +3844,24 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( |
__ Ret(); |
} |
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
- if (CpuFeatures::IsSupported(VFP2)) { |
- CpuFeatures::Scope scope(VFP2); |
+ if (CpuFeatures::IsSupported(FPU)) { |
// Allocate a HeapNumber for the result. Don't use r0 and r1 as |
// AllocateHeapNumber clobbers all registers - also when jumping due to |
// exhausted young space. |
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT); |
- __ vstr(d0, r2, HeapNumber::kValueOffset); |
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow); |
+ ASSERT(Operand(kHeapObjectTag - HeapNumber::kValueOffset).is_int8()); |
+ __ sub(r1, r2, Operand(kHeapObjectTag - HeapNumber::kValueOffset)); |
+ __ dstr(dr0, MemOperand(r1, 0), r1); |
- __ add(r0, r2, Operand(kHeapObjectTag)); |
+ __ mov(r0, r2); |
__ Ret(); |
} else { |
// Allocate a HeapNumber for the result. Don't use r0 and r1 as |
// AllocateHeapNumber clobbers all registers - also when jumping due to |
// exhausted young space. |
__ LoadRoot(r7, Heap::kHeapNumberMapRootIndex); |
- __ AllocateHeapNumber(r4, r5, r6, r7, &slow, TAG_RESULT); |
+ __ AllocateHeapNumber(r4, r5, r6, r7, &slow); |
__ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); |
__ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset)); |
@@ -3985,7 +3871,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( |
} else { |
// Tag integer as smi and return it. |
- __ mov(r0, Operand(value, LSL, kSmiTagSize)); |
+ __ lsl(r0, value, Operand(kSmiTagSize)); |
__ Ret(); |
} |
@@ -4033,15 +3919,15 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( |
// have been verified by the caller to not be a smi. |
// Check that the key is a smi or a heap number convertible to a smi. |
- GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic); |
+ GenerateSmiKeyCheck(masm, key, r4, r5, dr0, &miss_force_generic); |
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
// Check that the index is in range |
__ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); |
- __ cmp(key, ip); |
+ __ cmphs(key, ip); |
// Unsigned comparison catches both negative and too-large values. |
- __ b(hs, &miss_force_generic); |
+ __ bt(&miss_force_generic); |
// Handle both smis and HeapNumbers in the fast path. Go to the |
// runtime for all other kinds of values. |
@@ -4060,20 +3946,23 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( |
switch (elements_kind) { |
case EXTERNAL_PIXEL_ELEMENTS: |
// Clamp the value to [0..255]. |
- __ Usat(r5, 8, Operand(r5)); |
- __ strb(r5, MemOperand(r3, key, LSR, 1)); |
+ __ Usat(r5, 8, r5); |
+ __ lsr(r4, key, Operand(1)); |
+ __ strb(r5, MemOperand(r3, r4)); |
break; |
case EXTERNAL_BYTE_ELEMENTS: |
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: |
- __ strb(r5, MemOperand(r3, key, LSR, 1)); |
+ __ lsr(r4, key, Operand(1)); |
+ __ strb(r5, MemOperand(r3, r4)); |
break; |
case EXTERNAL_SHORT_ELEMENTS: |
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: |
- __ strh(r5, MemOperand(r3, key, LSL, 0)); |
+ __ strh(r5, MemOperand(r3, key)); |
break; |
case EXTERNAL_INT_ELEMENTS: |
case EXTERNAL_UNSIGNED_INT_ELEMENTS: |
- __ str(r5, MemOperand(r3, key, LSL, 1)); |
+ __ lsl(r4, key, Operand(1)); |
+ __ str(r5, MemOperand(r3, r4)); |
break; |
case EXTERNAL_FLOAT_ELEMENTS: |
// Perform int-to-float conversion and store to memory. |
@@ -4081,21 +3970,21 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( |
StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9); |
break; |
case EXTERNAL_DOUBLE_ELEMENTS: |
- __ add(r3, r3, Operand(key, LSL, 2)); |
+ __ lsl(r4, key, Operand(2)); |
+ __ add(r3, r3, r4); |
// r3: effective address of the double element |
FloatingPointHelper::Destination destination; |
- if (CpuFeatures::IsSupported(VFP2)) { |
+ if (CpuFeatures::IsSupported(FPU)) { |
destination = FloatingPointHelper::kVFPRegisters; |
} else { |
destination = FloatingPointHelper::kCoreRegisters; |
} |
FloatingPointHelper::ConvertIntToDouble( |
masm, r5, destination, |
- d0, r6, r7, // These are: double_dst, dst1, dst2. |
- r4, s2); // These are: scratch2, single_scratch. |
+ dr0, r6, r7, // These are: double_dst, dst1, dst2. |
+ r4, /*s2*/no_freg); // These are: scratch2, single_scratch. |
if (destination == FloatingPointHelper::kVFPRegisters) { |
- CpuFeatures::Scope scope(VFP2); |
- __ vstr(d0, r3, 0); |
+ __ dstr(dr0, MemOperand(r3, 0)); |
} else { |
__ str(r6, MemOperand(r3, 0)); |
__ str(r7, MemOperand(r3, Register::kSizeInBytes)); |
@@ -4119,7 +4008,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( |
if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) { |
// r3: external array. |
__ bind(&check_heap_number); |
- __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE); |
+ __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE, eq); |
__ b(ne, &slow); |
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); |
@@ -4129,41 +4018,45 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( |
// The WebGL specification leaves the behavior of storing NaN and |
// +/-Infinity into integer arrays basically undefined. For more |
// reproducible behavior, convert these to zero. |
- if (CpuFeatures::IsSupported(VFP2)) { |
- CpuFeatures::Scope scope(VFP2); |
- |
+ if (CpuFeatures::IsSupported(FPU)) { |
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
// vldr requires offset to be a multiple of 4 so we can not |
// include -kHeapObjectTag into it. |
- __ sub(r5, r0, Operand(kHeapObjectTag)); |
- __ vldr(d0, r5, HeapNumber::kValueOffset); |
- __ add(r5, r3, Operand(key, LSL, 1)); |
- __ vcvt_f32_f64(s0, d0); |
- __ vstr(s0, r5, 0); |
+ ASSERT(Operand(kHeapObjectTag - HeapNumber::kValueOffset).is_int8()); |
+ __ sub(r5, r0, Operand(kHeapObjectTag - HeapNumber::kValueOffset)); |
+ __ dldr(dr0, MemOperand(r5, 0), r5); |
+ __ lsl(r5, key, Operand(1)); |
+ __ add(r5, r3, r5); |
+ __ fcnvds(fr0, dr0); |
+ __ fstr(fr0, MemOperand(r5, 0)); |
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
- __ sub(r5, r0, Operand(kHeapObjectTag)); |
- __ vldr(d0, r5, HeapNumber::kValueOffset); |
- __ add(r5, r3, Operand(key, LSL, 2)); |
- __ vstr(d0, r5, 0); |
+ ASSERT(Operand(kHeapObjectTag - HeapNumber::kValueOffset).is_int8()); |
+ __ sub(r5, r0, Operand(kHeapObjectTag - HeapNumber::kValueOffset)); |
+ __ dldr(dr0, MemOperand(r5, 0), r5); |
+ __ lsl(r5, key, Operand(2)); |
+ __ add(r5, r3, r5); |
+ __ dstr(dr0, MemOperand(r5, 0), r5); |
} else { |
// Hoisted load. vldr requires offset to be a multiple of 4 so we can |
// not include -kHeapObjectTag into it. |
__ sub(r5, value, Operand(kHeapObjectTag)); |
- __ vldr(d0, r5, HeapNumber::kValueOffset); |
- __ EmitECMATruncate(r5, d0, s2, r6, r7, r9); |
+ __ dldr(dr0, MemOperand(r5, HeapNumber::kValueOffset)); |
+ __ EmitECMATruncate(r5, dr0, fr2, r6, r7, r9); |
switch (elements_kind) { |
case EXTERNAL_BYTE_ELEMENTS: |
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: |
- __ strb(r5, MemOperand(r3, key, LSR, 1)); |
+ __ lsr(ip, key, Operand(1)); |
+ __ strb(r5, MemOperand(r3, ip)); |
break; |
case EXTERNAL_SHORT_ELEMENTS: |
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: |
- __ strh(r5, MemOperand(r3, key, LSL, 0)); |
+ __ strh(r5, MemOperand(r3, key)); |
break; |
case EXTERNAL_INT_ELEMENTS: |
case EXTERNAL_UNSIGNED_INT_ELEMENTS: |
- __ str(r5, MemOperand(r3, key, LSL, 1)); |
+ __ lsl(ip, key, Operand(1)); |
+ __ str(r5, MemOperand(r3, ip)); |
break; |
case EXTERNAL_PIXEL_ELEMENTS: |
case EXTERNAL_FLOAT_ELEMENTS: |
@@ -4200,49 +4093,62 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( |
// Test for all special exponent values: zeros, subnormal numbers, NaNs |
// and infinities. All these should be converted to 0. |
__ mov(r7, Operand(HeapNumber::kExponentMask)); |
- __ and_(r9, r5, Operand(r7), SetCC); |
+ __ land(r9, r5, r7); |
+ __ cmpeq(r9, Operand(0)); |
__ b(eq, &nan_or_infinity_or_zero); |
- __ teq(r9, Operand(r7)); |
- __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq); |
+ __ teq(r9, r7); |
+ __ mov(r9, Operand(kBinary32ExponentMask), eq); |
__ b(eq, &nan_or_infinity_or_zero); |
// Rebias exponent. |
- __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); |
+ __ lsr(r9, r9, Operand(HeapNumber::kExponentShift)); |
__ add(r9, |
r9, |
Operand(kBinary32ExponentBias - HeapNumber::kExponentBias)); |
- __ cmp(r9, Operand(kBinary32MaxExponent)); |
- __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt); |
- __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt); |
- __ b(gt, &done); |
+ Label skip1, skip2; |
+ __ cmpgt(r9, Operand(kBinary32MaxExponent)); |
+ __ bf_near(&skip1); |
+ __ land(r5, r5, Operand(HeapNumber::kSignMask)); |
+ __ orr(r5, r5, Operand(kBinary32ExponentMask)); |
+ __ b(&done); |
+ __ bind(&skip1); |
- __ cmp(r9, Operand(kBinary32MinExponent)); |
- __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt); |
- __ b(lt, &done); |
+ __ cmpge(r9, Operand(kBinary32MinExponent)); |
+ __ bt_near(&skip2); |
+ __ land(r5, r5, Operand(HeapNumber::kSignMask)); |
+ __ b(&done); |
+ __ bind(&skip2); |
- __ and_(r7, r5, Operand(HeapNumber::kSignMask)); |
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); |
- __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift)); |
- __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift)); |
- __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift)); |
+ __ land(r7, r5, Operand(HeapNumber::kSignMask)); |
+ __ land(r5, r5, Operand(HeapNumber::kMantissaMask)); |
+ __ lsl(r5, r5, Operand(kMantissaInHiWordShift)); |
+ __ orr(r7, r7, r5); |
+ __ lsr(r5, r6, Operand(kMantissaInLoWordShift)); |
+ __ orr(r7, r7, r5); |
+ __ lsl(r5, r9, Operand(kBinary32ExponentShift)); |
+ __ orr(r5, r7, r5); |
__ bind(&done); |
- __ str(r5, MemOperand(r3, key, LSL, 1)); |
+ __ lsl(ip, key, Operand(1)); |
+ __ str(r5, MemOperand(r3, ip)); |
// Entry registers are intact, r0 holds the value which is the return |
// value. |
__ Ret(); |
__ bind(&nan_or_infinity_or_zero); |
- __ and_(r7, r5, Operand(HeapNumber::kSignMask)); |
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); |
+ __ land(r7, r5, Operand(HeapNumber::kSignMask)); |
+ __ land(r5, r5, Operand(HeapNumber::kMantissaMask)); |
__ orr(r9, r9, r7); |
- __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift)); |
- __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift)); |
+ __ lsl(r5, r5, Operand(kMantissaInHiWordShift)); |
+ __ orr(r9, r9, r5); |
+ __ lsr(r5, r6, Operand(kMantissaInLoWordShift)); |
+ __ orr(r5, r9, r5); |
__ b(&done); |
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
- __ add(r7, r3, Operand(key, LSL, 2)); |
+ __ lsl(r7, key, Operand(2)); |
+ __ add(r7, r3, r7); |
// r7: effective address of destination element. |
__ str(r6, MemOperand(r7, 0)); |
__ str(r5, MemOperand(r7, Register::kSizeInBytes)); |
@@ -4257,56 +4163,65 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( |
// Test for all special exponent values: zeros, subnormal numbers, NaNs |
// and infinities. All these should be converted to 0. |
__ mov(r7, Operand(HeapNumber::kExponentMask)); |
- __ and_(r9, r5, Operand(r7), SetCC); |
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq); |
+ __ land(r9, r5, r7); |
+ __ tst(r9, r9); |
+ __ mov(r5, Operand(0, RelocInfo::NONE), eq); |
__ b(eq, &done); |
- __ teq(r9, Operand(r7)); |
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq); |
+ __ teq(r9, r7); |
+ __ mov(r5, Operand(0, RelocInfo::NONE), eq); |
__ b(eq, &done); |
// Unbias exponent. |
- __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); |
- __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC); |
+ __ lsr(r9, r9, Operand(HeapNumber::kExponentShift)); |
+ __ sub(r9, r9, Operand(HeapNumber::kExponentBias)); |
+ __ cmpge(r9, Operand(0)); |
// If exponent is negative then result is 0. |
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi); |
- __ b(mi, &done); |
+ __ mov(r5, Operand(0, RelocInfo::NONE), f); |
+ __ bf(&done); |
// If exponent is too big then result is minimal value. |
- __ cmp(r9, Operand(meaningfull_bits - 1)); |
- __ mov(r5, Operand(min_value), LeaveCC, ge); |
- __ b(ge, &done); |
+ __ cmpge(r9, Operand(meaningfull_bits - 1)); |
+ __ mov(r5, Operand(min_value), eq); |
+ __ bt(&done); |
- __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC); |
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); |
+ __ land(r7, r5, Operand(HeapNumber::kSignMask)); |
+ __ land(r5, r5, Operand(HeapNumber::kMantissaMask)); |
__ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord)); |
- __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); |
- __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl); |
- __ b(pl, &sign); |
+ __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord)); |
+ __ cmpge(r9, Operand(0)); |
+ Label skip; |
+ __ bf_near(&skip); |
+ __ lsr(r5, r5, r9); |
+ __ b(&sign); |
+ __ bind(&skip); |
__ rsb(r9, r9, Operand(0, RelocInfo::NONE)); |
- __ mov(r5, Operand(r5, LSL, r9)); |
+ __ lsl(r5, r5, r9); |
__ rsb(r9, r9, Operand(meaningfull_bits)); |
- __ orr(r5, r5, Operand(r6, LSR, r9)); |
+ __ lsr(ip, r6, r9); |
+ __ orr(r5, r5, ip); |
__ bind(&sign); |
__ teq(r7, Operand(0, RelocInfo::NONE)); |
- __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne); |
+ __ rsb(r5, r5, Operand(0, RelocInfo::NONE), ne); |
__ bind(&done); |
switch (elements_kind) { |
case EXTERNAL_BYTE_ELEMENTS: |
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: |
- __ strb(r5, MemOperand(r3, key, LSR, 1)); |
+ __ lsr(ip, key, Operand(1)); |
+ __ strb(r5, MemOperand(r3, ip)); |
break; |
case EXTERNAL_SHORT_ELEMENTS: |
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: |
- __ strh(r5, MemOperand(r3, key, LSL, 0)); |
+ __ strh(r5, MemOperand(r3, key)); |
break; |
case EXTERNAL_INT_ELEMENTS: |
case EXTERNAL_UNSIGNED_INT_ELEMENTS: |
- __ str(r5, MemOperand(r3, key, LSL, 1)); |
+ __ lsl(ip, key, Operand(1)); |
+ __ str(r5, MemOperand(r3, ip)); |
break; |
case EXTERNAL_PIXEL_ELEMENTS: |
case EXTERNAL_FLOAT_ELEMENTS: |
@@ -4368,7 +4283,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) { |
// have been verified by the caller to not be a smi. |
// Check that the key is a smi or a heap number convertible to a smi. |
- GenerateSmiKeyCheck(masm, r0, r4, r5, d1, d2, &miss_force_generic); |
+ GenerateSmiKeyCheck(masm, r0, r4, r5, dr0, &miss_force_generic); |
// Get the elements array. |
__ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset)); |
@@ -4376,14 +4291,14 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) { |
// Check that the key is within bounds. |
__ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset)); |
- __ cmp(r0, Operand(r3)); |
- __ b(hs, &miss_force_generic); |
+ __ cmphs(r0, r3); |
+ __ bt(&miss_force_generic); |
// Load the result and make sure it's not the hole. |
__ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); |
- __ ldr(r4, |
- MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); |
+ __ lsl(r4, r0, Operand(kPointerSizeLog2 - kSmiTagSize)); |
+ __ ldr(r4, MemOperand(r3, r4)); |
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
__ cmp(r4, ip); |
__ b(eq, &miss_force_generic); |
@@ -4420,7 +4335,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( |
// have been verified by the caller to not be a smi. |
// Check that the key is a smi or a heap number convertible to a smi. |
- GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic); |
+ GenerateSmiKeyCheck(masm, key_reg, r4, r5, dr0, &miss_force_generic); |
// Get the elements array. |
__ ldr(elements_reg, |
@@ -4428,21 +4343,21 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( |
// Check that the key is within bounds. |
__ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); |
- __ cmp(key_reg, Operand(scratch)); |
- __ b(hs, &miss_force_generic); |
+ __ cmphs(key_reg, scratch); |
+ __ bt(&miss_force_generic); |
// Load the upper word of the double in the fixed array and test for NaN. |
- __ add(indexed_double_offset, elements_reg, |
- Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); |
+ __ lsl(scratch, key_reg, Operand(kDoubleSizeLog2 - kSmiTagSize)); |
+ __ add(indexed_double_offset, elements_reg, scratch); |
uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32); |
__ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset)); |
- __ cmp(scratch, Operand(kHoleNanUpper32)); |
- __ b(&miss_force_generic, eq); |
+ __ cmpeq(scratch, Operand(kHoleNanUpper32)); |
+ __ bt(&miss_force_generic); |
// Non-NaN. Allocate a new heap number and copy the double value into it. |
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
__ AllocateHeapNumber(heap_number_reg, scratch2, scratch3, |
- heap_number_map, &slow_allocate_heapnumber, TAG_RESULT); |
+ heap_number_map, &slow_allocate_heapnumber); |
// Don't need to reload the upper 32 bits of the double, it's already in |
// scratch. |
@@ -4496,7 +4411,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( |
// have been verified by the caller to not be a smi. |
// Check that the key is a smi or a heap number convertible to a smi. |
- GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic); |
+ GenerateSmiKeyCheck(masm, key_reg, r4, r5, dr0, &miss_force_generic); |
if (IsFastSmiElementsKind(elements_kind)) { |
__ JumpIfNotSmi(value_reg, &transition_elements_kind); |
@@ -4511,11 +4426,11 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( |
__ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); |
} |
// Compare smis. |
- __ cmp(key_reg, scratch); |
+ __ cmphs(key_reg, scratch); |
if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) { |
- __ b(hs, &grow); |
+ __ b(eq, &grow); |
} else { |
- __ b(hs, &miss_force_generic); |
+ __ b(eq, &miss_force_generic); |
} |
// Make sure elements is a fast element array, not 'cow'. |
@@ -4531,9 +4446,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( |
elements_reg, |
Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); |
- __ add(scratch, |
- scratch, |
- Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); |
+ __ lsl(scratch2, key_reg, Operand(kPointerSizeLog2 - kSmiTagSize)); |
+ __ add(scratch, scratch, scratch2); |
__ str(value_reg, MemOperand(scratch)); |
} else { |
ASSERT(IsFastObjectElementsKind(elements_kind)); |
@@ -4541,9 +4455,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( |
elements_reg, |
Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); |
- __ add(scratch, |
- scratch, |
- Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); |
+ __ lsl(scratch2, key_reg, Operand(kPointerSizeLog2 - kSmiTagSize)); |
+ __ add(scratch, scratch, scratch2); |
__ str(value_reg, MemOperand(scratch)); |
__ mov(receiver_reg, value_reg); |
__ RecordWrite(elements_reg, // Object. |
@@ -4619,8 +4532,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( |
DONT_DO_SMI_CHECK); |
__ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); |
- __ cmp(length_reg, scratch); |
- __ b(hs, &slow); |
+ __ cmphs(length_reg, scratch); |
+ __ bt(&slow); |
// Grow the array and finish the store. |
__ add(length_reg, length_reg, Operand(Smi::FromInt(1))); |
@@ -4664,7 +4577,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( |
// have been verified by the caller to not be a smi. |
// Check that the key is a smi or a heap number convertible to a smi. |
- GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic); |
+ GenerateSmiKeyCheck(masm, key_reg, r4, r5, dr0, &miss_force_generic); |
__ ldr(elements_reg, |
FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); |
@@ -4678,11 +4591,11 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( |
} |
// Compare smis, unsigned compare catches both negative and out-of-bound |
// indexes. |
- __ cmp(key_reg, scratch1); |
+ __ cmphs(key_reg, scratch1); |
if (grow_mode == ALLOW_JSARRAY_GROWTH) { |
- __ b(hs, &grow); |
+ __ b(eq, &grow); |
} else { |
- __ b(hs, &miss_force_generic); |
+ __ b(eq, &miss_force_generic); |
} |
__ bind(&finish_store); |
@@ -4764,8 +4677,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( |
// Make sure that the backing store can hold additional elements. |
__ ldr(scratch1, |
FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset)); |
- __ cmp(length_reg, scratch1); |
- __ b(hs, &slow); |
+ __ cmphs(length_reg, scratch1); |
+ __ b(eq, &slow); |
// Grow the array and finish the store. |
__ add(length_reg, length_reg, Operand(Smi::FromInt(1))); |
@@ -4783,4 +4696,4 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( |
} } // namespace v8::internal |
-#endif // V8_TARGET_ARCH_ARM |
+#endif // V8_TARGET_ARCH_SH4 |