| Index: src/arm/code-stubs-arm.cc
|
| ===================================================================
|
| --- src/arm/code-stubs-arm.cc (revision 6800)
|
| +++ src/arm/code-stubs-arm.cc (working copy)
|
| @@ -1,4 +1,4 @@
|
| -// Copyright 2010 the V8 project authors. All rights reserved.
|
| +// Copyright 2011 the V8 project authors. All rights reserved.
|
| // Redistribution and use in source and binary forms, with or without
|
| // modification, are permitted provided that the following conditions are
|
| // met:
|
| @@ -41,7 +41,7 @@
|
|
|
| static void EmitIdenticalObjectComparison(MacroAssembler* masm,
|
| Label* slow,
|
| - Condition cc,
|
| + Condition cond,
|
| bool never_nan_nan);
|
| static void EmitSmiNonsmiComparison(MacroAssembler* masm,
|
| Register lhs,
|
| @@ -49,12 +49,32 @@
|
| Label* lhs_not_nan,
|
| Label* slow,
|
| bool strict);
|
| -static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
|
| +static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond);
|
| static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
|
| Register lhs,
|
| Register rhs);
|
|
|
|
|
| +void ToNumberStub::Generate(MacroAssembler* masm) {
|
| + // The ToNumber stub takes one argument in eax.
|
| + Label check_heap_number, call_builtin;
|
| + __ tst(r0, Operand(kSmiTagMask));
|
| + __ b(ne, &check_heap_number);
|
| + __ Ret();
|
| +
|
| + __ bind(&check_heap_number);
|
| + __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
|
| + __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
|
| + __ cmp(r1, ip);
|
| + __ b(ne, &call_builtin);
|
| + __ Ret();
|
| +
|
| + __ bind(&call_builtin);
|
| + __ push(r0);
|
| + __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_JS);
|
| +}
|
| +
|
| +
|
| void FastNewClosureStub::Generate(MacroAssembler* masm) {
|
| // Create a new closure from the given function info in new
|
| // space. Set the context to the current context in cp.
|
| @@ -344,6 +364,203 @@
|
| }
|
|
|
|
|
| +class FloatingPointHelper : public AllStatic {
|
| + public:
|
| +
|
| + enum Destination {
|
| + kVFPRegisters,
|
| + kCoreRegisters
|
| + };
|
| +
|
| +
|
| + // Loads smis from r0 and r1 (right and left in binary operations) into
|
| + // floating point registers. Depending on the destination the values ends up
|
| + // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
|
| + // floating point registers VFP3 must be supported. If core registers are
|
| + // requested when VFP3 is supported d6 and d7 will be scratched.
|
| + static void LoadSmis(MacroAssembler* masm,
|
| + Destination destination,
|
| + Register scratch1,
|
| + Register scratch2);
|
| +
|
| + // Loads objects from r0 and r1 (right and left in binary operations) into
|
| + // floating point registers. Depending on the destination the values ends up
|
| + // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
|
| + // floating point registers VFP3 must be supported. If core registers are
|
| + // requested when VFP3 is supported d6 and d7 will still be scratched. If
|
| + // either r0 or r1 is not a number (not smi and not heap number object) the
|
| + // not_number label is jumped to with r0 and r1 intact.
|
| + static void LoadOperands(MacroAssembler* masm,
|
| + FloatingPointHelper::Destination destination,
|
| + Register heap_number_map,
|
| + Register scratch1,
|
| + Register scratch2,
|
| + Label* not_number);
|
| +
|
| + // Loads the number from object into dst as a 32-bit integer if possible. If
|
| + // the object is not a 32-bit integer control continues at the label
|
| + // not_int32. If VFP is supported double_scratch is used but not scratch2.
|
| + static void LoadNumberAsInteger(MacroAssembler* masm,
|
| + Register object,
|
| + Register dst,
|
| + Register heap_number_map,
|
| + Register scratch1,
|
| + Register scratch2,
|
| + DwVfpRegister double_scratch,
|
| + Label* not_int32);
|
| +
|
| + private:
|
| + static void LoadNumber(MacroAssembler* masm,
|
| + FloatingPointHelper::Destination destination,
|
| + Register object,
|
| + DwVfpRegister dst,
|
| + Register dst1,
|
| + Register dst2,
|
| + Register heap_number_map,
|
| + Register scratch1,
|
| + Register scratch2,
|
| + Label* not_number);
|
| +};
|
| +
|
| +
|
| +void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
|
| + FloatingPointHelper::Destination destination,
|
| + Register scratch1,
|
| + Register scratch2) {
|
| + if (CpuFeatures::IsSupported(VFP3)) {
|
| + CpuFeatures::Scope scope(VFP3);
|
| + __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
|
| + __ vmov(d7.high(), scratch1);
|
| + __ vcvt_f64_s32(d7, d7.high());
|
| + __ mov(scratch1, Operand(r1, ASR, kSmiTagSize));
|
| + __ vmov(d6.high(), scratch1);
|
| + __ vcvt_f64_s32(d6, d6.high());
|
| + if (destination == kCoreRegisters) {
|
| + __ vmov(r2, r3, d7);
|
| + __ vmov(r0, r1, d6);
|
| + }
|
| + } else {
|
| + ASSERT(destination == kCoreRegisters);
|
| + // Write Smi from r0 to r3 and r2 in double format.
|
| + __ mov(scratch1, Operand(r0));
|
| + ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
|
| + __ push(lr);
|
| + __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
|
| + // Write Smi from r1 to r1 and r0 in double format. r9 is scratch.
|
| + __ mov(scratch1, Operand(r1));
|
| + ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
|
| + __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
|
| + __ pop(lr);
|
| + }
|
| +}
|
| +
|
| +
|
| +void FloatingPointHelper::LoadOperands(
|
| + MacroAssembler* masm,
|
| + FloatingPointHelper::Destination destination,
|
| + Register heap_number_map,
|
| + Register scratch1,
|
| + Register scratch2,
|
| + Label* slow) {
|
| +
|
| + // Load right operand (r0) to d6 or r2/r3.
|
| + LoadNumber(masm, destination,
|
| + r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
|
| +
|
| + // Load left operand (r1) to d7 or r0/r1.
|
| + LoadNumber(masm, destination,
|
| + r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
|
| +}
|
| +
|
| +
|
| +void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
|
| + Destination destination,
|
| + Register object,
|
| + DwVfpRegister dst,
|
| + Register dst1,
|
| + Register dst2,
|
| + Register heap_number_map,
|
| + Register scratch1,
|
| + Register scratch2,
|
| + Label* not_number) {
|
| + if (FLAG_debug_code) {
|
| + __ AbortIfNotRootValue(heap_number_map,
|
| + Heap::kHeapNumberMapRootIndex,
|
| + "HeapNumberMap register clobbered.");
|
| + }
|
| +
|
| + Label is_smi, done;
|
| +
|
| + __ JumpIfSmi(object, &is_smi);
|
| + __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
|
| +
|
| + // Handle loading a double from a heap number.
|
| + if (CpuFeatures::IsSupported(VFP3) && destination == kVFPRegisters) {
|
| + CpuFeatures::Scope scope(VFP3);
|
| + // Load the double from tagged HeapNumber to double register.
|
| + __ sub(scratch1, object, Operand(kHeapObjectTag));
|
| + __ vldr(dst, scratch1, HeapNumber::kValueOffset);
|
| + } else {
|
| + ASSERT(destination == kCoreRegisters);
|
| + // Load the double from heap number to dst1 and dst2 in double format.
|
| + __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
|
| + }
|
| + __ jmp(&done);
|
| +
|
| + // Handle loading a double from a smi.
|
| + __ bind(&is_smi);
|
| + if (CpuFeatures::IsSupported(VFP3)) {
|
| + CpuFeatures::Scope scope(VFP3);
|
| + // Convert smi to double using VFP instructions.
|
| + __ SmiUntag(scratch1, object);
|
| + __ vmov(dst.high(), scratch1);
|
| + __ vcvt_f64_s32(dst, dst.high());
|
| + if (destination == kCoreRegisters) {
|
| + // Load the converted smi to dst1 and dst2 in double format.
|
| + __ vmov(dst1, dst2, dst);
|
| + }
|
| + } else {
|
| + ASSERT(destination == kCoreRegisters);
|
| + // Write smi to dst1 and dst2 double format.
|
| + __ mov(scratch1, Operand(object));
|
| + ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
|
| + __ push(lr);
|
| + __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
|
| + __ pop(lr);
|
| + }
|
| +
|
| + __ bind(&done);
|
| +}
|
| +
|
| +
|
| +void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
|
| + Register object,
|
| + Register dst,
|
| + Register heap_number_map,
|
| + Register scratch1,
|
| + Register scratch2,
|
| + DwVfpRegister double_scratch,
|
| + Label* not_int32) {
|
| + if (FLAG_debug_code) {
|
| + __ AbortIfNotRootValue(heap_number_map,
|
| + Heap::kHeapNumberMapRootIndex,
|
| + "HeapNumberMap register clobbered.");
|
| + }
|
| + Label is_smi, done;
|
| + __ JumpIfSmi(object, &is_smi);
|
| + __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
|
| + __ cmp(scratch1, heap_number_map);
|
| + __ b(ne, not_int32);
|
| + __ ConvertToInt32(
|
| + object, dst, scratch1, scratch2, double_scratch, not_int32);
|
| + __ jmp(&done);
|
| + __ bind(&is_smi);
|
| + __ SmiUntag(dst, object);
|
| + __ bind(&done);
|
| +}
|
| +
|
| +
|
| +
|
| // See comment for class.
|
| void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
|
| Label max_negative_int;
|
| @@ -395,7 +612,7 @@
|
| // for "identity and not NaN".
|
| static void EmitIdenticalObjectComparison(MacroAssembler* masm,
|
| Label* slow,
|
| - Condition cc,
|
| + Condition cond,
|
| bool never_nan_nan) {
|
| Label not_identical;
|
| Label heap_number, return_equal;
|
| @@ -404,31 +621,31 @@
|
|
|
| // The two objects are identical. If we know that one of them isn't NaN then
|
| // we now know they test equal.
|
| - if (cc != eq || !never_nan_nan) {
|
| + if (cond != eq || !never_nan_nan) {
|
| // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
|
| // so we do the second best thing - test it ourselves.
|
| // They are both equal and they are not both Smis so both of them are not
|
| // Smis. If it's not a heap number, then return equal.
|
| - if (cc == lt || cc == gt) {
|
| + if (cond == lt || cond == gt) {
|
| __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
|
| __ b(ge, slow);
|
| } else {
|
| __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
|
| __ b(eq, &heap_number);
|
| // Comparing JS objects with <=, >= is complicated.
|
| - if (cc != eq) {
|
| + if (cond != eq) {
|
| __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
|
| __ b(ge, slow);
|
| // Normally here we fall through to return_equal, but undefined is
|
| // special: (undefined == undefined) == true, but
|
| // (undefined <= undefined) == false! See ECMAScript 11.8.5.
|
| - if (cc == le || cc == ge) {
|
| + if (cond == le || cond == ge) {
|
| __ cmp(r4, Operand(ODDBALL_TYPE));
|
| __ b(ne, &return_equal);
|
| __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
|
| __ cmp(r0, r2);
|
| __ b(ne, &return_equal);
|
| - if (cc == le) {
|
| + if (cond == le) {
|
| // undefined <= undefined should fail.
|
| __ mov(r0, Operand(GREATER));
|
| } else {
|
| @@ -442,20 +659,20 @@
|
| }
|
|
|
| __ bind(&return_equal);
|
| - if (cc == lt) {
|
| + if (cond == lt) {
|
| __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
|
| - } else if (cc == gt) {
|
| + } else if (cond == gt) {
|
| __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
|
| } else {
|
| __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
|
| }
|
| __ Ret();
|
|
|
| - if (cc != eq || !never_nan_nan) {
|
| + if (cond != eq || !never_nan_nan) {
|
| // For less and greater we don't have to check for NaN since the result of
|
| // x < x is false regardless. For the others here is some code to check
|
| // for NaN.
|
| - if (cc != lt && cc != gt) {
|
| + if (cond != lt && cond != gt) {
|
| __ bind(&heap_number);
|
| // It is a heap number, so return non-equal if it's NaN and equal if it's
|
| // not NaN.
|
| @@ -479,10 +696,10 @@
|
| // if all bits in mantissa are zero (it's an Infinity) and non-zero if
|
| // not (it's a NaN). For <= and >= we need to load r0 with the failing
|
| // value if it's a NaN.
|
| - if (cc != eq) {
|
| + if (cond != eq) {
|
| // All-zero means Infinity means equal.
|
| __ Ret(eq);
|
| - if (cc == le) {
|
| + if (cond == le) {
|
| __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
|
| } else {
|
| __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
|
| @@ -589,7 +806,7 @@
|
| }
|
|
|
|
|
| -void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
|
| +void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
|
| bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
|
| Register rhs_exponent = exp_first ? r0 : r1;
|
| Register lhs_exponent = exp_first ? r2 : r3;
|
| @@ -629,7 +846,7 @@
|
| __ bind(&one_is_nan);
|
| // NaN comparisons always fail.
|
| // Load whatever we need in r0 to make the comparison fail.
|
| - if (cc == lt || cc == le) {
|
| + if (cond == lt || cond == le) {
|
| __ mov(r0, Operand(GREATER));
|
| } else {
|
| __ mov(r0, Operand(LESS));
|
| @@ -641,7 +858,8 @@
|
|
|
|
|
| // See comment at call site.
|
| -static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
|
| +static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
|
| + Condition cond) {
|
| bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
|
| Register rhs_exponent = exp_first ? r0 : r1;
|
| Register lhs_exponent = exp_first ? r2 : r3;
|
| @@ -649,7 +867,7 @@
|
| Register lhs_mantissa = exp_first ? r3 : r2;
|
|
|
| // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
|
| - if (cc == eq) {
|
| + if (cond == eq) {
|
| // Doubles are not equal unless they have the same bit pattern.
|
| // Exception: 0 and -0.
|
| __ cmp(rhs_mantissa, Operand(lhs_mantissa));
|
| @@ -835,7 +1053,7 @@
|
| Label is_smi;
|
| Label load_result_from_cache;
|
| if (!object_is_smi) {
|
| - __ BranchOnSmi(object, &is_smi);
|
| + __ JumpIfSmi(object, &is_smi);
|
| if (CpuFeatures::IsSupported(VFP3)) {
|
| CpuFeatures::Scope scope(VFP3);
|
| __ CheckMap(object,
|
| @@ -861,13 +1079,12 @@
|
| Register probe = mask;
|
| __ ldr(probe,
|
| FieldMemOperand(scratch1, FixedArray::kHeaderSize));
|
| - __ BranchOnSmi(probe, not_found);
|
| + __ JumpIfSmi(probe, not_found);
|
| __ sub(scratch2, object, Operand(kHeapObjectTag));
|
| __ vldr(d0, scratch2, HeapNumber::kValueOffset);
|
| __ sub(probe, probe, Operand(kHeapObjectTag));
|
| __ vldr(d1, probe, HeapNumber::kValueOffset);
|
| - __ vcmp(d0, d1);
|
| - __ vmrs(pc);
|
| + __ VFPCompareAndSetFlags(d0, d1);
|
| __ b(ne, not_found); // The cache did not contain this value.
|
| __ b(&load_result_from_cache);
|
| } else {
|
| @@ -939,7 +1156,7 @@
|
| } else if (FLAG_debug_code) {
|
| __ orr(r2, r1, r0);
|
| __ tst(r2, Operand(kSmiTagMask));
|
| - __ Assert(nz, "CompareStub: unexpected smi operands.");
|
| + __ Assert(ne, "CompareStub: unexpected smi operands.");
|
| }
|
|
|
| // NOTICE! This code is only reached after a smi-fast-case check, so
|
| @@ -975,8 +1192,7 @@
|
| CpuFeatures::Scope scope(VFP3);
|
| Label no_nan;
|
| // ARMv7 VFP3 instructions to implement double precision comparison.
|
| - __ vcmp(d7, d6);
|
| - __ vmrs(pc); // Move vector status bits to normal status bits.
|
| + __ VFPCompareAndSetFlags(d7, d6);
|
| Label nan;
|
| __ b(vs, &nan);
|
| __ mov(r0, Operand(EQUAL), LeaveCC, eq);
|
| @@ -1082,7 +1298,7 @@
|
| void ToBooleanStub::Generate(MacroAssembler* masm) {
|
| Label false_result;
|
| Label not_heap_number;
|
| - Register scratch = r7;
|
| + Register scratch = r9.is(tos_) ? r7 : r9;
|
|
|
| __ LoadRoot(ip, Heap::kNullValueRootIndex);
|
| __ cmp(tos_, ip);
|
| @@ -1096,8 +1312,7 @@
|
|
|
| __ sub(ip, tos_, Operand(kHeapObjectTag));
|
| __ vldr(d1, ip, HeapNumber::kValueOffset);
|
| - __ vcmp(d1, 0.0);
|
| - __ vmrs(pc);
|
| + __ VFPCompareAndSetFlags(d1, 0.0);
|
| // "tos_" is a register, and contains a non zero value by default.
|
| // Hence we only need to overwrite "tos_" with zero to return false for
|
| // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
|
| @@ -1378,7 +1593,7 @@
|
| __ sub(r0, r5, Operand(kHeapObjectTag));
|
| __ vstr(d5, r0, HeapNumber::kValueOffset);
|
| __ add(r0, r0, Operand(kHeapObjectTag));
|
| - __ mov(pc, lr);
|
| + __ Ret();
|
| } else {
|
| // If we did not inline the operation, then the arguments are in:
|
| // r0: Left value (least significant part of mantissa).
|
| @@ -1508,7 +1723,7 @@
|
| __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
|
| __ cmp(r4, heap_number_map);
|
| __ b(ne, &slow);
|
| - __ ConvertToInt32(lhs, r3, r5, r4, &slow);
|
| + __ ConvertToInt32(lhs, r3, r5, r4, d0, &slow);
|
| __ jmp(&done_checking_lhs);
|
| __ bind(&lhs_is_smi);
|
| __ mov(r3, Operand(lhs, ASR, 1));
|
| @@ -1519,7 +1734,7 @@
|
| __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
|
| __ cmp(r4, heap_number_map);
|
| __ b(ne, &slow);
|
| - __ ConvertToInt32(rhs, r2, r5, r4, &slow);
|
| + __ ConvertToInt32(rhs, r2, r5, r4, d0, &slow);
|
| __ jmp(&done_checking_rhs);
|
| __ bind(&rhs_is_smi);
|
| __ mov(r2, Operand(rhs, ASR, 1));
|
| @@ -1963,7 +2178,7 @@
|
| Label not_smi;
|
| if (ShouldGenerateSmiCode() && specialized_on_rhs_) {
|
| Label lhs_is_unsuitable;
|
| - __ BranchOnNotSmi(lhs, ¬_smi);
|
| + __ JumpIfNotSmi(lhs, ¬_smi);
|
| if (IsPowerOf2(constant_rhs_)) {
|
| if (op_ == Token::MOD) {
|
| __ and_(rhs,
|
| @@ -2210,11 +2425,641 @@
|
| Handle<Code> GetTypeRecordingBinaryOpStub(int key,
|
| TRBinaryOpIC::TypeInfo type_info,
|
| TRBinaryOpIC::TypeInfo result_type_info) {
|
| + TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
|
| + return stub.GetCode();
|
| +}
|
| +
|
| +
|
| +void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
|
| + Label get_result;
|
| +
|
| + __ Push(r1, r0);
|
| +
|
| + __ mov(r2, Operand(Smi::FromInt(MinorKey())));
|
| + __ mov(r1, Operand(Smi::FromInt(op_)));
|
| + __ mov(r0, Operand(Smi::FromInt(operands_type_)));
|
| + __ Push(r2, r1, r0);
|
| +
|
| + __ TailCallExternalReference(
|
| + ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
|
| + 5,
|
| + 1);
|
| +}
|
| +
|
| +
|
| +void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
|
| + MacroAssembler* masm) {
|
| UNIMPLEMENTED();
|
| - return Handle<Code>::null();
|
| }
|
|
|
|
|
| +void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
|
| + switch (operands_type_) {
|
| + case TRBinaryOpIC::UNINITIALIZED:
|
| + GenerateTypeTransition(masm);
|
| + break;
|
| + case TRBinaryOpIC::SMI:
|
| + GenerateSmiStub(masm);
|
| + break;
|
| + case TRBinaryOpIC::INT32:
|
| + GenerateInt32Stub(masm);
|
| + break;
|
| + case TRBinaryOpIC::HEAP_NUMBER:
|
| + GenerateHeapNumberStub(masm);
|
| + break;
|
| + case TRBinaryOpIC::STRING:
|
| + GenerateStringStub(masm);
|
| + break;
|
| + case TRBinaryOpIC::GENERIC:
|
| + GenerateGeneric(masm);
|
| + break;
|
| + default:
|
| + UNREACHABLE();
|
| + }
|
| +}
|
| +
|
| +
|
| +const char* TypeRecordingBinaryOpStub::GetName() {
|
| + if (name_ != NULL) return name_;
|
| + const int kMaxNameLength = 100;
|
| + name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
|
| + if (name_ == NULL) return "OOM";
|
| + const char* op_name = Token::Name(op_);
|
| + const char* overwrite_name;
|
| + switch (mode_) {
|
| + case NO_OVERWRITE: overwrite_name = "Alloc"; break;
|
| + case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
|
| + case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
|
| + default: overwrite_name = "UnknownOverwrite"; break;
|
| + }
|
| +
|
| + OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
|
| + "TypeRecordingBinaryOpStub_%s_%s_%s",
|
| + op_name,
|
| + overwrite_name,
|
| + TRBinaryOpIC::GetName(operands_type_));
|
| + return name_;
|
| +}
|
| +
|
| +
|
| +void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
|
| + MacroAssembler* masm) {
|
| + Register left = r1;
|
| + Register right = r0;
|
| + Register scratch1 = r7;
|
| + Register scratch2 = r9;
|
| +
|
| + ASSERT(right.is(r0));
|
| + STATIC_ASSERT(kSmiTag == 0);
|
| +
|
| + Label not_smi_result;
|
| + switch (op_) {
|
| + case Token::ADD:
|
| + __ add(right, left, Operand(right), SetCC); // Add optimistically.
|
| + __ Ret(vc);
|
| + __ sub(right, right, Operand(left)); // Revert optimistic add.
|
| + break;
|
| + case Token::SUB:
|
| + __ sub(right, left, Operand(right), SetCC); // Subtract optimistically.
|
| + __ Ret(vc);
|
| + __ sub(right, left, Operand(right)); // Revert optimistic subtract.
|
| + break;
|
| + case Token::MUL:
|
| + // Remove tag from one of the operands. This way the multiplication result
|
| + // will be a smi if it fits the smi range.
|
| + __ SmiUntag(ip, right);
|
| + // Do multiplication
|
| + // scratch1 = lower 32 bits of ip * left.
|
| + // scratch2 = higher 32 bits of ip * left.
|
| + __ smull(scratch1, scratch2, left, ip);
|
| + // Check for overflowing the smi range - no overflow if higher 33 bits of
|
| + // the result are identical.
|
| + __ mov(ip, Operand(scratch1, ASR, 31));
|
| + __ cmp(ip, Operand(scratch2));
|
| + __ b(ne, ¬_smi_result);
|
| + // Go slow on zero result to handle -0.
|
| + __ tst(scratch1, Operand(scratch1));
|
| + __ mov(right, Operand(scratch1), LeaveCC, ne);
|
| + __ Ret(ne);
|
| + // We need -0 if we were multiplying a negative number with 0 to get 0.
|
| + // We know one of them was zero.
|
| + __ add(scratch2, right, Operand(left), SetCC);
|
| + __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
|
| + __ Ret(pl); // Return smi 0 if the non-zero one was positive.
|
| + // We fall through here if we multiplied a negative number with 0, because
|
| + // that would mean we should produce -0.
|
| + break;
|
| + case Token::DIV:
|
| + // Check for power of two on the right hand side.
|
| + __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result);
|
| + // Check for positive and no remainder (scratch1 contains right - 1).
|
| + __ orr(scratch2, scratch1, Operand(0x80000000u));
|
| + __ tst(left, scratch2);
|
| + __ b(ne, ¬_smi_result);
|
| +
|
| + // Perform division by shifting.
|
| + __ CountLeadingZeros(scratch1, scratch1, scratch2);
|
| + __ rsb(scratch1, scratch1, Operand(31));
|
| + __ mov(right, Operand(left, LSR, scratch1));
|
| + __ Ret();
|
| + break;
|
| + case Token::MOD:
|
| + // Check for two positive smis.
|
| + __ orr(scratch1, left, Operand(right));
|
| + __ tst(scratch1, Operand(0x80000000u | kSmiTagMask));
|
| + __ b(ne, ¬_smi_result);
|
| +
|
| + // Check for power of two on the right hand side.
|
| + __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result);
|
| +
|
| + // Perform modulus by masking.
|
| + __ and_(right, left, Operand(scratch1));
|
| + __ Ret();
|
| + break;
|
| + case Token::BIT_OR:
|
| + __ orr(right, left, Operand(right));
|
| + __ Ret();
|
| + break;
|
| + case Token::BIT_AND:
|
| + __ and_(right, left, Operand(right));
|
| + __ Ret();
|
| + break;
|
| + case Token::BIT_XOR:
|
| + __ eor(right, left, Operand(right));
|
| + __ Ret();
|
| + break;
|
| + case Token::SAR:
|
| + // Remove tags from right operand.
|
| + __ GetLeastBitsFromSmi(scratch1, right, 5);
|
| + __ mov(right, Operand(left, ASR, scratch1));
|
| + // Smi tag result.
|
| + __ bic(right, right, Operand(kSmiTagMask));
|
| + __ Ret();
|
| + break;
|
| + case Token::SHR:
|
| + // Remove tags from operands. We can't do this on a 31 bit number
|
| + // because then the 0s get shifted into bit 30 instead of bit 31.
|
| + __ SmiUntag(scratch1, left);
|
| + __ GetLeastBitsFromSmi(scratch2, right, 5);
|
| + __ mov(scratch1, Operand(scratch1, LSR, scratch2));
|
| + // Unsigned shift is not allowed to produce a negative number, so
|
| + // check the sign bit and the sign bit after Smi tagging.
|
| + __ tst(scratch1, Operand(0xc0000000));
|
| + __ b(ne, ¬_smi_result);
|
| + // Smi tag result.
|
| + __ SmiTag(right, scratch1);
|
| + __ Ret();
|
| + break;
|
| + case Token::SHL:
|
| + // Remove tags from operands.
|
| + __ SmiUntag(scratch1, left);
|
| + __ GetLeastBitsFromSmi(scratch2, right, 5);
|
| + __ mov(scratch1, Operand(scratch1, LSL, scratch2));
|
| + // Check that the signed result fits in a Smi.
|
| + __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
|
| + __ b(mi, ¬_smi_result);
|
| + __ SmiTag(right, scratch1);
|
| + __ Ret();
|
| + break;
|
| + default:
|
| + UNREACHABLE();
|
| + }
|
| + __ bind(¬_smi_result);
|
| +}
|
| +
|
| +
|
| +void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
|
| + bool smi_operands,
|
| + Label* not_numbers,
|
| + Label* gc_required) {
|
| + Register left = r1;
|
| + Register right = r0;
|
| + Register scratch1 = r7;
|
| + Register scratch2 = r9;
|
| +
|
| + ASSERT(smi_operands || (not_numbers != NULL));
|
| + if (smi_operands && FLAG_debug_code) {
|
| + __ AbortIfNotSmi(left);
|
| + __ AbortIfNotSmi(right);
|
| + }
|
| +
|
| + Register heap_number_map = r6;
|
| + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
|
| +
|
| + switch (op_) {
|
| + case Token::ADD:
|
| + case Token::SUB:
|
| + case Token::MUL:
|
| + case Token::DIV:
|
| + case Token::MOD: {
|
| + // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
|
| + // depending on whether VFP3 is available or not.
|
| + FloatingPointHelper::Destination destination =
|
| + CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ?
|
| + FloatingPointHelper::kVFPRegisters :
|
| + FloatingPointHelper::kCoreRegisters;
|
| +
|
| + // Allocate new heap number for result.
|
| + Register result = r5;
|
| + __ AllocateHeapNumber(
|
| + result, scratch1, scratch2, heap_number_map, gc_required);
|
| +
|
| + // Load the operands.
|
| + if (smi_operands) {
|
| + FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
|
| + } else {
|
| + FloatingPointHelper::LoadOperands(masm,
|
| + destination,
|
| + heap_number_map,
|
| + scratch1,
|
| + scratch2,
|
| + not_numbers);
|
| + }
|
| +
|
| + // Calculate the result.
|
| + if (destination == FloatingPointHelper::kVFPRegisters) {
|
| + // Using VFP registers:
|
| + // d6: Left value
|
| + // d7: Right value
|
| + CpuFeatures::Scope scope(VFP3);
|
| + switch (op_) {
|
| + case Token::ADD:
|
| + __ vadd(d5, d6, d7);
|
| + break;
|
| + case Token::SUB:
|
| + __ vsub(d5, d6, d7);
|
| + break;
|
| + case Token::MUL:
|
| + __ vmul(d5, d6, d7);
|
| + break;
|
| + case Token::DIV:
|
| + __ vdiv(d5, d6, d7);
|
| + break;
|
| + default:
|
| + UNREACHABLE();
|
| + }
|
| +
|
| + __ sub(r0, result, Operand(kHeapObjectTag));
|
| + __ vstr(d5, r0, HeapNumber::kValueOffset);
|
| + __ add(r0, r0, Operand(kHeapObjectTag));
|
| + __ Ret();
|
| + } else {
|
| + // Using core registers:
|
| + // r0: Left value (least significant part of mantissa).
|
| + // r1: Left value (sign, exponent, top of mantissa).
|
| + // r2: Right value (least significant part of mantissa).
|
| + // r3: Right value (sign, exponent, top of mantissa).
|
| +
|
| + // Push the current return address before the C call. Return will be
|
| + // through pop(pc) below.
|
| + __ push(lr);
|
| + __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
|
| + // Call C routine that may not cause GC or other trouble. r5 is callee
|
| + // save.
|
| + __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
|
| + // Store answer in the overwritable heap number.
|
| +#if !defined(USE_ARM_EABI)
|
| + // Double returned in fp coprocessor register 0 and 1, encoded as
|
| + // register cr8. Offsets must be divisible by 4 for coprocessor so we
|
| + // need to substract the tag from r5.
|
| + __ sub(scratch1, result, Operand(kHeapObjectTag));
|
| + __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
|
| +#else
|
| + // Double returned in registers 0 and 1.
|
| + __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset));
|
| +#endif
|
| + // Plase result in r0 and return to the pushed return address.
|
| + __ mov(r0, Operand(result));
|
| + __ pop(pc);
|
| + }
|
| + break;
|
| + }
|
| + case Token::BIT_OR:
|
| + case Token::BIT_XOR:
|
| + case Token::BIT_AND:
|
| + case Token::SAR:
|
| + case Token::SHR:
|
| + case Token::SHL: {
|
| + if (smi_operands) {
|
| + __ SmiUntag(r3, left);
|
| + __ SmiUntag(r2, right);
|
| + } else {
|
| + // Convert operands to 32-bit integers. Right in r2 and left in r3.
|
| + FloatingPointHelper::LoadNumberAsInteger(masm,
|
| + left,
|
| + r3,
|
| + heap_number_map,
|
| + scratch1,
|
| + scratch2,
|
| + d0,
|
| + not_numbers);
|
| + FloatingPointHelper::LoadNumberAsInteger(masm,
|
| + right,
|
| + r2,
|
| + heap_number_map,
|
| + scratch1,
|
| + scratch2,
|
| + d0,
|
| + not_numbers);
|
| + }
|
| +
|
| + Label result_not_a_smi;
|
| + switch (op_) {
|
| + case Token::BIT_OR:
|
| + __ orr(r2, r3, Operand(r2));
|
| + break;
|
| + case Token::BIT_XOR:
|
| + __ eor(r2, r3, Operand(r2));
|
| + break;
|
| + case Token::BIT_AND:
|
| + __ and_(r2, r3, Operand(r2));
|
| + break;
|
| + case Token::SAR:
|
| + // Use only the 5 least significant bits of the shift count.
|
| + __ and_(r2, r2, Operand(0x1f));
|
| + __ GetLeastBitsFromInt32(r2, r2, 5);
|
| + __ mov(r2, Operand(r3, ASR, r2));
|
| + break;
|
| + case Token::SHR:
|
| + // Use only the 5 least significant bits of the shift count.
|
| + __ GetLeastBitsFromInt32(r2, r2, 5);
|
| + __ mov(r2, Operand(r3, LSR, r2), SetCC);
|
| + // SHR is special because it is required to produce a positive answer.
|
| + // The code below for writing into heap numbers isn't capable of
|
| + // writing the register as an unsigned int so we go to slow case if we
|
| + // hit this case.
|
| + if (CpuFeatures::IsSupported(VFP3)) {
|
| + __ b(mi, &result_not_a_smi);
|
| + } else {
|
| + __ b(mi, not_numbers);
|
| + }
|
| + break;
|
| + case Token::SHL:
|
| + // Use only the 5 least significant bits of the shift count.
|
| + __ GetLeastBitsFromInt32(r2, r2, 5);
|
| + __ mov(r2, Operand(r3, LSL, r2));
|
| + break;
|
| + default:
|
| + UNREACHABLE();
|
| + }
|
| +
|
| + // Check that the *signed* result fits in a smi.
|
| + __ add(r3, r2, Operand(0x40000000), SetCC);
|
| + __ b(mi, &result_not_a_smi);
|
| + __ SmiTag(r0, r2);
|
| + __ Ret();
|
| +
|
| + // Allocate new heap number for result.
|
| + __ bind(&result_not_a_smi);
|
| + __ AllocateHeapNumber(
|
| + r5, scratch1, scratch2, heap_number_map, gc_required);
|
| +
|
| + // r2: Answer as signed int32.
|
| + // r5: Heap number to write answer into.
|
| +
|
| + // Nothing can go wrong now, so move the heap number to r0, which is the
|
| + // result.
|
| + __ mov(r0, Operand(r5));
|
| +
|
| + if (CpuFeatures::IsSupported(VFP3)) {
|
| + // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
|
| + // mentioned above SHR needs to always produce a positive result.
|
| + CpuFeatures::Scope scope(VFP3);
|
| + __ vmov(s0, r2);
|
| + if (op_ == Token::SHR) {
|
| + __ vcvt_f64_u32(d0, s0);
|
| + } else {
|
| + __ vcvt_f64_s32(d0, s0);
|
| + }
|
| + __ sub(r3, r0, Operand(kHeapObjectTag));
|
| + __ vstr(d0, r3, HeapNumber::kValueOffset);
|
| + __ Ret();
|
| + } else {
|
| + // Tail call that writes the int32 in r2 to the heap number in r0, using
|
| + // r3 as scratch. r0 is preserved and returned.
|
| + WriteInt32ToHeapNumberStub stub(r2, r0, r3);
|
| + __ TailCallStub(&stub);
|
| + }
|
| + break;
|
| + }
|
| + default:
|
| + UNREACHABLE();
|
| + }
|
| +}
|
| +
|
| +
|
| +// Generate the smi code. If the operation on smis are successful this return is
|
| +// generated. If the result is not a smi and heap number allocation is not
|
| +// requested the code falls through. If number allocation is requested but a
|
| +// heap number cannot be allocated the code jumps to the lable gc_required.
|
| +void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
|
| + Label* gc_required,
|
| + SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
|
| + Label not_smis;
|
| +
|
| + Register left = r1;
|
| + Register right = r0;
|
| + Register scratch1 = r7;
|
| + Register scratch2 = r9;
|
| +
|
| + // Perform combined smi check on both operands.
|
| + __ orr(scratch1, left, Operand(right));
|
| + STATIC_ASSERT(kSmiTag == 0);
|
| + __ tst(scratch1, Operand(kSmiTagMask));
|
| + __ b(ne, ¬_smis);
|
| +
|
| + // If the smi-smi operation results in a smi return is generated.
|
| + GenerateSmiSmiOperation(masm);
|
| +
|
| + // If heap number results are possible generate the result in an allocated
|
| + // heap number.
|
| + if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
|
| + GenerateFPOperation(masm, true, NULL, gc_required);
|
| + }
|
| + __ bind(¬_smis);
|
| +}
|
| +
|
| +
|
| +void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
|
| + Label not_smis, call_runtime;
|
| +
|
| + if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
|
| + result_type_ == TRBinaryOpIC::SMI) {
|
| + // Only allow smi results.
|
| + GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
|
| + } else {
|
| + // Allow heap number result and don't make a transition if a heap number
|
| + // cannot be allocated.
|
| + GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
|
| + }
|
| +
|
| + // Code falls through if the result is not returned as either a smi or heap
|
| + // number.
|
| + GenerateTypeTransition(masm);
|
| +
|
| + __ bind(&call_runtime);
|
| + GenerateCallRuntime(masm);
|
| +}
|
| +
|
| +
|
| +void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
|
| + ASSERT(operands_type_ == TRBinaryOpIC::STRING);
|
| + ASSERT(op_ == Token::ADD);
|
| + // Try to add arguments as strings, otherwise, transition to the generic
|
| + // TRBinaryOpIC type.
|
| + GenerateAddStrings(masm);
|
| + GenerateTypeTransition(masm);
|
| +}
|
| +
|
| +
|
| +void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
| + ASSERT(operands_type_ == TRBinaryOpIC::INT32);
|
| +
|
| + GenerateTypeTransition(masm);
|
| +}
|
| +
|
| +
|
| +void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
|
| + Label not_numbers, call_runtime;
|
| + ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
|
| +
|
| + GenerateFPOperation(masm, false, ¬_numbers, &call_runtime);
|
| +
|
| + __ bind(¬_numbers);
|
| + GenerateTypeTransition(masm);
|
| +
|
| + __ bind(&call_runtime);
|
| + GenerateCallRuntime(masm);
|
| +}
|
| +
|
| +
|
| +void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
|
| + Label call_runtime;
|
| +
|
| + GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
|
| +
|
| + // If all else fails, use the runtime system to get the correct
|
| + // result.
|
| + __ bind(&call_runtime);
|
| +
|
| + // Try to add strings before calling runtime.
|
| + if (op_ == Token::ADD) {
|
| + GenerateAddStrings(masm);
|
| + }
|
| +
|
| + GenericBinaryOpStub stub(op_, mode_, r1, r0);
|
| + __ TailCallStub(&stub);
|
| +}
|
| +
|
| +
|
| +void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
|
| + ASSERT(op_ == Token::ADD);
|
| +
|
| + Register left = r1;
|
| + Register right = r0;
|
| + Label call_runtime;
|
| +
|
| + // Check if first argument is a string.
|
| + __ JumpIfSmi(left, &call_runtime);
|
| + __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
|
| + __ b(ge, &call_runtime);
|
| +
|
| + // First argument is a a string, test second.
|
| + __ JumpIfSmi(right, &call_runtime);
|
| + __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
|
| + __ b(ge, &call_runtime);
|
| +
|
| + // First and second argument are strings.
|
| + StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
|
| + GenerateRegisterArgsPush(masm);
|
| + __ TailCallStub(&string_add_stub);
|
| +
|
| + // At least one argument is not a string.
|
| + __ bind(&call_runtime);
|
| +}
|
| +
|
| +
|
| +void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
|
| + GenerateRegisterArgsPush(masm);
|
| + switch (op_) {
|
| + case Token::ADD:
|
| + __ InvokeBuiltin(Builtins::ADD, JUMP_JS);
|
| + break;
|
| + case Token::SUB:
|
| + __ InvokeBuiltin(Builtins::SUB, JUMP_JS);
|
| + break;
|
| + case Token::MUL:
|
| + __ InvokeBuiltin(Builtins::MUL, JUMP_JS);
|
| + break;
|
| + case Token::DIV:
|
| + __ InvokeBuiltin(Builtins::DIV, JUMP_JS);
|
| + break;
|
| + case Token::MOD:
|
| + __ InvokeBuiltin(Builtins::MOD, JUMP_JS);
|
| + break;
|
| + case Token::BIT_OR:
|
| + __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
|
| + break;
|
| + case Token::BIT_AND:
|
| + __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
|
| + break;
|
| + case Token::BIT_XOR:
|
| + __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
|
| + break;
|
| + case Token::SAR:
|
| + __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
|
| + break;
|
| + case Token::SHR:
|
| + __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
|
| + break;
|
| + case Token::SHL:
|
| + __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
|
| + break;
|
| + default:
|
| + UNREACHABLE();
|
| + }
|
| +}
|
| +
|
| +
|
| +void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
|
| + MacroAssembler* masm,
|
| + Register result,
|
| + Register heap_number_map,
|
| + Register scratch1,
|
| + Register scratch2,
|
| + Label* gc_required) {
|
| +
|
| + // Code below will scratch result if allocation fails. To keep both arguments
|
| + // intact for the runtime call result cannot be one of these.
|
| + ASSERT(!result.is(r0) && !result.is(r1));
|
| +
|
| + if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
|
| + Label skip_allocation, allocated;
|
| + Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0;
|
| + // If the overwritable operand is already an object, we skip the
|
| + // allocation of a heap number.
|
| + __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
|
| + // Allocate a heap number for the result.
|
| + __ AllocateHeapNumber(
|
| + result, scratch1, scratch2, heap_number_map, gc_required);
|
| + __ b(&allocated);
|
| + __ bind(&skip_allocation);
|
| + // Use object holding the overwritable operand for result.
|
| + __ mov(result, Operand(overwritable_operand));
|
| + __ bind(&allocated);
|
| + } else {
|
| + ASSERT(mode_ == NO_OVERWRITE);
|
| + __ AllocateHeapNumber(
|
| + result, scratch1, scratch2, heap_number_map, gc_required);
|
| + }
|
| +}
|
| +
|
| +
|
| +void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
|
| + __ Push(r1, r0);
|
| +}
|
| +
|
| +
|
| void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
|
| // Argument is a number and is on stack and in r0.
|
| Label runtime_call;
|
| @@ -2223,7 +3068,7 @@
|
|
|
| if (CpuFeatures::IsSupported(VFP3)) {
|
| // Load argument and check if it is a smi.
|
| - __ BranchOnNotSmi(r0, &input_not_smi);
|
| + __ JumpIfNotSmi(r0, &input_not_smi);
|
|
|
| CpuFeatures::Scope scope(VFP3);
|
| // Input is a smi. Convert to double and load the low and high words
|
| @@ -2377,7 +3222,7 @@
|
| } else if (op_ == Token::BIT_NOT) {
|
| if (include_smi_code_) {
|
| Label non_smi;
|
| - __ BranchOnNotSmi(r0, &non_smi);
|
| + __ JumpIfNotSmi(r0, &non_smi);
|
| __ mvn(r0, Operand(r0));
|
| // Bit-clear inverted smi-tag.
|
| __ bic(r0, r0, Operand(kSmiTagMask));
|
| @@ -2395,7 +3240,7 @@
|
| __ b(ne, &slow);
|
|
|
| // Convert the heap number is r0 to an untagged integer in r1.
|
| - __ ConvertToInt32(r0, r1, r2, r3, &slow);
|
| + __ ConvertToInt32(r0, r1, r2, r3, d0, &slow);
|
|
|
| // Do the bitwise operation (move negated) and check if the result
|
| // fits in a smi.
|
| @@ -2454,105 +3299,13 @@
|
|
|
|
|
| void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
|
| - // r0 holds the exception.
|
| -
|
| - // Adjust this code if not the case.
|
| - STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
|
| -
|
| - // Drop the sp to the top of the handler.
|
| - __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
|
| - __ ldr(sp, MemOperand(r3));
|
| -
|
| - // Restore the next handler and frame pointer, discard handler state.
|
| - STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
|
| - __ pop(r2);
|
| - __ str(r2, MemOperand(r3));
|
| - STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
|
| - __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
|
| -
|
| - // Before returning we restore the context from the frame pointer if
|
| - // not NULL. The frame pointer is NULL in the exception handler of a
|
| - // JS entry frame.
|
| - __ cmp(fp, Operand(0, RelocInfo::NONE));
|
| - // Set cp to NULL if fp is NULL.
|
| - __ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
|
| - // Restore cp otherwise.
|
| - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
|
| -#ifdef DEBUG
|
| - if (FLAG_debug_code) {
|
| - __ mov(lr, Operand(pc));
|
| - }
|
| -#endif
|
| - STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
|
| - __ pop(pc);
|
| + __ Throw(r0);
|
| }
|
|
|
|
|
| void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
|
| UncatchableExceptionType type) {
|
| - // Adjust this code if not the case.
|
| - STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
|
| -
|
| - // Drop sp to the top stack handler.
|
| - __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
|
| - __ ldr(sp, MemOperand(r3));
|
| -
|
| - // Unwind the handlers until the ENTRY handler is found.
|
| - Label loop, done;
|
| - __ bind(&loop);
|
| - // Load the type of the current stack handler.
|
| - const int kStateOffset = StackHandlerConstants::kStateOffset;
|
| - __ ldr(r2, MemOperand(sp, kStateOffset));
|
| - __ cmp(r2, Operand(StackHandler::ENTRY));
|
| - __ b(eq, &done);
|
| - // Fetch the next handler in the list.
|
| - const int kNextOffset = StackHandlerConstants::kNextOffset;
|
| - __ ldr(sp, MemOperand(sp, kNextOffset));
|
| - __ jmp(&loop);
|
| - __ bind(&done);
|
| -
|
| - // Set the top handler address to next handler past the current ENTRY handler.
|
| - STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
|
| - __ pop(r2);
|
| - __ str(r2, MemOperand(r3));
|
| -
|
| - if (type == OUT_OF_MEMORY) {
|
| - // Set external caught exception to false.
|
| - ExternalReference external_caught(Top::k_external_caught_exception_address);
|
| - __ mov(r0, Operand(false));
|
| - __ mov(r2, Operand(external_caught));
|
| - __ str(r0, MemOperand(r2));
|
| -
|
| - // Set pending exception and r0 to out of memory exception.
|
| - Failure* out_of_memory = Failure::OutOfMemoryException();
|
| - __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
|
| - __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
|
| - __ str(r0, MemOperand(r2));
|
| - }
|
| -
|
| - // Stack layout at this point. See also StackHandlerConstants.
|
| - // sp -> state (ENTRY)
|
| - // fp
|
| - // lr
|
| -
|
| - // Discard handler state (r2 is not used) and restore frame pointer.
|
| - STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
|
| - __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
|
| - // Before returning we restore the context from the frame pointer if
|
| - // not NULL. The frame pointer is NULL in the exception handler of a
|
| - // JS entry frame.
|
| - __ cmp(fp, Operand(0, RelocInfo::NONE));
|
| - // Set cp to NULL if fp is NULL.
|
| - __ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
|
| - // Restore cp otherwise.
|
| - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
|
| -#ifdef DEBUG
|
| - if (FLAG_debug_code) {
|
| - __ mov(lr, Operand(pc));
|
| - }
|
| -#endif
|
| - STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
|
| - __ pop(pc);
|
| + __ ThrowUncatchable(type, r0);
|
| }
|
|
|
|
|
| @@ -2561,8 +3314,7 @@
|
| Label* throw_termination_exception,
|
| Label* throw_out_of_memory_exception,
|
| bool do_gc,
|
| - bool always_allocate,
|
| - int frame_alignment_skew) {
|
| + bool always_allocate) {
|
| // r0: result parameter for PerformGC, if any
|
| // r4: number of arguments including receiver (C callee-saved)
|
| // r5: pointer to builtin function (C callee-saved)
|
| @@ -2588,15 +3340,14 @@
|
| __ mov(r0, Operand(r4));
|
| __ mov(r1, Operand(r6));
|
|
|
| +#if defined(V8_HOST_ARCH_ARM)
|
| int frame_alignment = MacroAssembler::ActivationFrameAlignment();
|
| int frame_alignment_mask = frame_alignment - 1;
|
| -#if defined(V8_HOST_ARCH_ARM)
|
| if (FLAG_debug_code) {
|
| if (frame_alignment > kPointerSize) {
|
| Label alignment_as_expected;
|
| ASSERT(IsPowerOf2(frame_alignment));
|
| - __ sub(r2, sp, Operand(frame_alignment_skew));
|
| - __ tst(r2, Operand(frame_alignment_mask));
|
| + __ tst(sp, Operand(frame_alignment_mask));
|
| __ b(eq, &alignment_as_expected);
|
| // Don't use Check here, as it will call Runtime_Abort re-entering here.
|
| __ stop("Unexpected alignment");
|
| @@ -2605,36 +3356,21 @@
|
| }
|
| #endif
|
|
|
| - // Just before the call (jump) below lr is pushed, so the actual alignment is
|
| - // adding one to the current skew.
|
| - int alignment_before_call =
|
| - (frame_alignment_skew + kPointerSize) & frame_alignment_mask;
|
| - if (alignment_before_call > 0) {
|
| - // Push until the alignment before the call is met.
|
| - __ mov(r2, Operand(0, RelocInfo::NONE));
|
| - for (int i = alignment_before_call;
|
| - (i & frame_alignment_mask) != 0;
|
| - i += kPointerSize) {
|
| - __ push(r2);
|
| - }
|
| - }
|
| -
|
| // TODO(1242173): To let the GC traverse the return address of the exit
|
| // frames, we need to know where the return address is. Right now,
|
| - // we push it on the stack to be able to find it again, but we never
|
| + // we store it on the stack to be able to find it again, but we never
|
| // restore from it in case of changes, which makes it impossible to
|
| // support moving the C entry code stub. This should be fixed, but currently
|
| // this is OK because the CEntryStub gets generated so early in the V8 boot
|
| // sequence that it is not moving ever.
|
| - masm->add(lr, pc, Operand(4)); // Compute return address: (pc + 8) + 4
|
| - masm->push(lr);
|
| +
|
| + // Compute the return address in lr to return to after the jump below. Pc is
|
| + // already at '+ 8' from the current instruction but return is after three
|
| + // instructions so add another 4 to pc to get the return address.
|
| + masm->add(lr, pc, Operand(4));
|
| + __ str(lr, MemOperand(sp, 0));
|
| masm->Jump(r5);
|
|
|
| - // Restore sp back to before aligning the stack.
|
| - if (alignment_before_call > 0) {
|
| - __ add(sp, sp, Operand(alignment_before_call));
|
| - }
|
| -
|
| if (always_allocate) {
|
| // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
|
| // though (contain the result).
|
| @@ -2656,7 +3392,9 @@
|
| // r0:r1: result
|
| // sp: stack pointer
|
| // fp: frame pointer
|
| - __ LeaveExitFrame(save_doubles_);
|
| + // Callee-saved register r4 still holds argc.
|
| + __ LeaveExitFrame(save_doubles_, r4);
|
| + __ mov(pc, lr);
|
|
|
| // check if we should retry or throw exception
|
| Label retry;
|
| @@ -2704,9 +3442,17 @@
|
| // this by performing a garbage collection and retrying the
|
| // builtin once.
|
|
|
| + // Compute the argv pointer in a callee-saved register.
|
| + __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
|
| + __ sub(r6, r6, Operand(kPointerSize));
|
| +
|
| // Enter the exit frame that transitions from JavaScript to C++.
|
| __ EnterExitFrame(save_doubles_);
|
|
|
| + // Setup argc and the builtin function in callee-saved registers.
|
| + __ mov(r4, Operand(r0));
|
| + __ mov(r5, Operand(r1));
|
| +
|
| // r4: number of arguments (C callee-saved)
|
| // r5: pointer to builtin function (C callee-saved)
|
| // r6: pointer to first argument (C callee-saved)
|
| @@ -2721,8 +3467,7 @@
|
| &throw_termination_exception,
|
| &throw_out_of_memory_exception,
|
| false,
|
| - false,
|
| - -kPointerSize);
|
| + false);
|
|
|
| // Do space-specific GC and retry runtime call.
|
| GenerateCore(masm,
|
| @@ -2730,8 +3475,7 @@
|
| &throw_termination_exception,
|
| &throw_out_of_memory_exception,
|
| true,
|
| - false,
|
| - 0);
|
| + false);
|
|
|
| // Do full GC and retry runtime call one final time.
|
| Failure* failure = Failure::InternalError();
|
| @@ -2741,8 +3485,7 @@
|
| &throw_termination_exception,
|
| &throw_out_of_memory_exception,
|
| true,
|
| - true,
|
| - kPointerSize);
|
| + true);
|
|
|
| __ bind(&throw_out_of_memory_exception);
|
| GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
|
| @@ -2893,108 +3636,190 @@
|
| }
|
|
|
|
|
| -// Uses registers r0 to r4. Expected input is
|
| -// object in r0 (or at sp+1*kPointerSize) and function in
|
| -// r1 (or at sp), depending on whether or not
|
| -// args_in_registers() is true.
|
| +// Uses registers r0 to r4.
|
| +// Expected input (depending on whether args are in registers or on the stack):
|
| +// * object: r0 or at sp + 1 * kPointerSize.
|
| +// * function: r1 or at sp.
|
| +//
|
| +// An inlined call site may have been generated before calling this stub.
|
| +// In this case the offset to the inline site to patch is passed on the stack,
|
| +// in the safepoint slot for register r4.
|
| +// (See LCodeGen::DoInstanceOfKnownGlobal)
|
| void InstanceofStub::Generate(MacroAssembler* masm) {
|
| + // Call site inlining and patching implies arguments in registers.
|
| + ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
|
| + // ReturnTrueFalse is only implemented for inlined call sites.
|
| + ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
|
| +
|
| // Fixed register usage throughout the stub:
|
| const Register object = r0; // Object (lhs).
|
| - const Register map = r3; // Map of the object.
|
| + Register map = r3; // Map of the object.
|
| const Register function = r1; // Function (rhs).
|
| const Register prototype = r4; // Prototype of the function.
|
| + const Register inline_site = r9;
|
| const Register scratch = r2;
|
| +
|
| + const int32_t kDeltaToLoadBoolResult = 3 * kPointerSize;
|
| +
|
| Label slow, loop, is_instance, is_not_instance, not_js_object;
|
| - if (!args_in_registers()) {
|
| +
|
| + if (!HasArgsInRegisters()) {
|
| __ ldr(object, MemOperand(sp, 1 * kPointerSize));
|
| __ ldr(function, MemOperand(sp, 0));
|
| }
|
|
|
| // Check that the left hand is a JS object and load map.
|
| - __ BranchOnSmi(object, ¬_js_object);
|
| + __ JumpIfSmi(object, ¬_js_object);
|
| __ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
|
|
|
| - // Look up the function and the map in the instanceof cache.
|
| - Label miss;
|
| - __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
|
| - __ cmp(function, ip);
|
| - __ b(ne, &miss);
|
| - __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
|
| - __ cmp(map, ip);
|
| - __ b(ne, &miss);
|
| - __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
|
| - __ Ret(args_in_registers() ? 0 : 2);
|
| + // If there is a call site cache don't look in the global cache, but do the
|
| + // real lookup and update the call site cache.
|
| + if (!HasCallSiteInlineCheck()) {
|
| + Label miss;
|
| + __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
|
| + __ cmp(function, ip);
|
| + __ b(ne, &miss);
|
| + __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
|
| + __ cmp(map, ip);
|
| + __ b(ne, &miss);
|
| + __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
|
| + __ Ret(HasArgsInRegisters() ? 0 : 2);
|
|
|
| - __ bind(&miss);
|
| + __ bind(&miss);
|
| + }
|
| +
|
| + // Get the prototype of the function.
|
| __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
|
|
|
| // Check that the function prototype is a JS object.
|
| - __ BranchOnSmi(prototype, &slow);
|
| + __ JumpIfSmi(prototype, &slow);
|
| __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
|
|
|
| - __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
|
| - __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
|
| + // Update the global instanceof or call site inlined cache with the current
|
| + // map and function. The cached answer will be set when it is known below.
|
| + if (!HasCallSiteInlineCheck()) {
|
| + __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
|
| + __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
|
| + } else {
|
| + ASSERT(HasArgsInRegisters());
|
| + // Patch the (relocated) inlined map check.
|
|
|
| + // The offset was stored in r4 safepoint slot.
|
| + // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
|
| + __ ldr(scratch, MacroAssembler::SafepointRegisterSlot(r4));
|
| + __ sub(inline_site, lr, scratch);
|
| + // Get the map location in scratch and patch it.
|
| + __ GetRelocatedValueLocation(inline_site, scratch);
|
| + __ str(map, MemOperand(scratch));
|
| + }
|
| +
|
| // Register mapping: r3 is object map and r4 is function prototype.
|
| // Get prototype of object into r2.
|
| __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
|
|
|
| + // We don't need map any more. Use it as a scratch register.
|
| + Register scratch2 = map;
|
| + map = no_reg;
|
| +
|
| // Loop through the prototype chain looking for the function prototype.
|
| + __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
|
| __ bind(&loop);
|
| __ cmp(scratch, Operand(prototype));
|
| __ b(eq, &is_instance);
|
| - __ LoadRoot(ip, Heap::kNullValueRootIndex);
|
| - __ cmp(scratch, ip);
|
| + __ cmp(scratch, scratch2);
|
| __ b(eq, &is_not_instance);
|
| __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
|
| __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
|
| __ jmp(&loop);
|
|
|
| __ bind(&is_instance);
|
| - __ mov(r0, Operand(Smi::FromInt(0)));
|
| - __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
|
| - __ Ret(args_in_registers() ? 0 : 2);
|
| + if (!HasCallSiteInlineCheck()) {
|
| + __ mov(r0, Operand(Smi::FromInt(0)));
|
| + __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
|
| + } else {
|
| + // Patch the call site to return true.
|
| + __ LoadRoot(r0, Heap::kTrueValueRootIndex);
|
| + __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
|
| + // Get the boolean result location in scratch and patch it.
|
| + __ GetRelocatedValueLocation(inline_site, scratch);
|
| + __ str(r0, MemOperand(scratch));
|
|
|
| + if (!ReturnTrueFalseObject()) {
|
| + __ mov(r0, Operand(Smi::FromInt(0)));
|
| + }
|
| + }
|
| + __ Ret(HasArgsInRegisters() ? 0 : 2);
|
| +
|
| __ bind(&is_not_instance);
|
| - __ mov(r0, Operand(Smi::FromInt(1)));
|
| - __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
|
| - __ Ret(args_in_registers() ? 0 : 2);
|
| + if (!HasCallSiteInlineCheck()) {
|
| + __ mov(r0, Operand(Smi::FromInt(1)));
|
| + __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
|
| + } else {
|
| + // Patch the call site to return false.
|
| + __ LoadRoot(r0, Heap::kFalseValueRootIndex);
|
| + __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
|
| + // Get the boolean result location in scratch and patch it.
|
| + __ GetRelocatedValueLocation(inline_site, scratch);
|
| + __ str(r0, MemOperand(scratch));
|
|
|
| + if (!ReturnTrueFalseObject()) {
|
| + __ mov(r0, Operand(Smi::FromInt(1)));
|
| + }
|
| + }
|
| + __ Ret(HasArgsInRegisters() ? 0 : 2);
|
| +
|
| Label object_not_null, object_not_null_or_smi;
|
| __ bind(¬_js_object);
|
| // Before null, smi and string value checks, check that the rhs is a function
|
| // as for a non-function rhs an exception needs to be thrown.
|
| - __ BranchOnSmi(function, &slow);
|
| - __ CompareObjectType(function, map, scratch, JS_FUNCTION_TYPE);
|
| + __ JumpIfSmi(function, &slow);
|
| + __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
|
| __ b(ne, &slow);
|
|
|
| // Null is not instance of anything.
|
| __ cmp(scratch, Operand(Factory::null_value()));
|
| __ b(ne, &object_not_null);
|
| __ mov(r0, Operand(Smi::FromInt(1)));
|
| - __ Ret(args_in_registers() ? 0 : 2);
|
| + __ Ret(HasArgsInRegisters() ? 0 : 2);
|
|
|
| __ bind(&object_not_null);
|
| // Smi values are not instances of anything.
|
| - __ BranchOnNotSmi(object, &object_not_null_or_smi);
|
| + __ JumpIfNotSmi(object, &object_not_null_or_smi);
|
| __ mov(r0, Operand(Smi::FromInt(1)));
|
| - __ Ret(args_in_registers() ? 0 : 2);
|
| + __ Ret(HasArgsInRegisters() ? 0 : 2);
|
|
|
| __ bind(&object_not_null_or_smi);
|
| // String values are not instances of anything.
|
| __ IsObjectJSStringType(object, scratch, &slow);
|
| __ mov(r0, Operand(Smi::FromInt(1)));
|
| - __ Ret(args_in_registers() ? 0 : 2);
|
| + __ Ret(HasArgsInRegisters() ? 0 : 2);
|
|
|
| // Slow-case. Tail call builtin.
|
| - if (args_in_registers()) {
|
| + __ bind(&slow);
|
| + if (!ReturnTrueFalseObject()) {
|
| + if (HasArgsInRegisters()) {
|
| + __ Push(r0, r1);
|
| + }
|
| + __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
|
| + } else {
|
| + __ EnterInternalFrame();
|
| __ Push(r0, r1);
|
| + __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_JS);
|
| + __ LeaveInternalFrame();
|
| + __ cmp(r0, Operand(0));
|
| + __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
|
| + __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
|
| + __ Ret(HasArgsInRegisters() ? 0 : 2);
|
| }
|
| - __ bind(&slow);
|
| - __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
|
| }
|
|
|
|
|
| +Register InstanceofStub::left() { return r0; }
|
| +
|
| +
|
| +Register InstanceofStub::right() { return r1; }
|
| +
|
| +
|
| void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
|
| // The displacement is the offset of the last parameter (if any)
|
| // relative to the frame pointer.
|
| @@ -3003,7 +3828,7 @@
|
|
|
| // Check that the key is a smi.
|
| Label slow;
|
| - __ BranchOnNotSmi(r1, &slow);
|
| + __ JumpIfNotSmi(r1, &slow);
|
|
|
| // Check if the calling frame is an arguments adaptor frame.
|
| Label adaptor;
|
| @@ -3016,7 +3841,7 @@
|
| // through register r0. Use unsigned comparison to get negative
|
| // check for free.
|
| __ cmp(r1, r0);
|
| - __ b(cs, &slow);
|
| + __ b(hs, &slow);
|
|
|
| // Read the argument from the stack and return it.
|
| __ sub(r3, r0, r1);
|
| @@ -3207,7 +4032,7 @@
|
| __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
|
| if (FLAG_debug_code) {
|
| __ tst(regexp_data, Operand(kSmiTagMask));
|
| - __ Check(nz, "Unexpected type for RegExp data, FixedArray expected");
|
| + __ Check(ne, "Unexpected type for RegExp data, FixedArray expected");
|
| __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
|
| __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
|
| }
|
| @@ -3310,7 +4135,7 @@
|
| // Is first part a flat string?
|
| STATIC_ASSERT(kSeqStringTag == 0);
|
| __ tst(r0, Operand(kStringRepresentationMask));
|
| - __ b(nz, &runtime);
|
| + __ b(ne, &runtime);
|
|
|
| __ bind(&seq_string);
|
| // subject: Subject string
|
| @@ -3348,24 +4173,27 @@
|
| __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2);
|
|
|
| static const int kRegExpExecuteArguments = 7;
|
| - __ push(lr);
|
| - __ PrepareCallCFunction(kRegExpExecuteArguments, r0);
|
| + static const int kParameterRegisters = 4;
|
| + __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
|
|
|
| - // Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript.
|
| + // Stack pointer now points to cell where return address is to be written.
|
| + // Arguments are before that on the stack or in registers.
|
| +
|
| + // Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript.
|
| __ mov(r0, Operand(1));
|
| - __ str(r0, MemOperand(sp, 2 * kPointerSize));
|
| + __ str(r0, MemOperand(sp, 3 * kPointerSize));
|
|
|
| - // Argument 6 (sp[4]): Start (high end) of backtracking stack memory area.
|
| + // Argument 6 (sp[8]): Start (high end) of backtracking stack memory area.
|
| __ mov(r0, Operand(address_of_regexp_stack_memory_address));
|
| __ ldr(r0, MemOperand(r0, 0));
|
| __ mov(r2, Operand(address_of_regexp_stack_memory_size));
|
| __ ldr(r2, MemOperand(r2, 0));
|
| __ add(r0, r0, Operand(r2));
|
| - __ str(r0, MemOperand(sp, 1 * kPointerSize));
|
| + __ str(r0, MemOperand(sp, 2 * kPointerSize));
|
|
|
| - // Argument 5 (sp[0]): static offsets vector buffer.
|
| + // Argument 5 (sp[4]): static offsets vector buffer.
|
| __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector()));
|
| - __ str(r0, MemOperand(sp, 0 * kPointerSize));
|
| + __ str(r0, MemOperand(sp, 1 * kPointerSize));
|
|
|
| // For arguments 4 and 3 get string length, calculate start of string data and
|
| // calculate the shift of the index (0 for ASCII and 1 for two byte).
|
| @@ -3387,9 +4215,11 @@
|
|
|
| // Locate the code entry and call it.
|
| __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| - __ CallCFunction(r7, kRegExpExecuteArguments);
|
| - __ pop(lr);
|
| + DirectCEntryStub stub;
|
| + stub.GenerateCall(masm, r7);
|
|
|
| + __ LeaveExitFrame(false, no_reg);
|
| +
|
| // r0: result
|
| // subject: subject string (callee saved)
|
| // regexp_data: RegExp data (callee saved)
|
| @@ -3397,6 +4227,7 @@
|
|
|
| // Check the result.
|
| Label success;
|
| +
|
| __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
|
| __ b(eq, &success);
|
| Label failure;
|
| @@ -3409,12 +4240,26 @@
|
| // stack overflow (on the backtrack stack) was detected in RegExp code but
|
| // haven't created the exception yet. Handle that in the runtime system.
|
| // TODO(592): Rerunning the RegExp to get the stack overflow exception.
|
| - __ mov(r0, Operand(ExternalReference::the_hole_value_location()));
|
| - __ ldr(r0, MemOperand(r0, 0));
|
| - __ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address)));
|
| + __ mov(r1, Operand(ExternalReference::the_hole_value_location()));
|
| __ ldr(r1, MemOperand(r1, 0));
|
| + __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
|
| + __ ldr(r0, MemOperand(r2, 0));
|
| __ cmp(r0, r1);
|
| __ b(eq, &runtime);
|
| +
|
| + __ str(r1, MemOperand(r2, 0)); // Clear pending exception.
|
| +
|
| + // Check if the exception is a termination. If so, throw as uncatchable.
|
| + __ LoadRoot(ip, Heap::kTerminationExceptionRootIndex);
|
| + __ cmp(r0, ip);
|
| + Label termination_exception;
|
| + __ b(eq, &termination_exception);
|
| +
|
| + __ Throw(r0); // Expects thrown value in r0.
|
| +
|
| + __ bind(&termination_exception);
|
| + __ ThrowUncatchable(TERMINATION, r0); // Expects thrown value in r0.
|
| +
|
| __ bind(&failure);
|
| // For failure and exception return null.
|
| __ mov(r0, Operand(Factory::null_value()));
|
| @@ -3589,7 +4434,7 @@
|
| __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
|
|
|
| // Check if receiver is a smi (which is a number value).
|
| - __ BranchOnSmi(r1, &receiver_is_value);
|
| + __ JumpIfSmi(r1, &receiver_is_value);
|
|
|
| // Check if the receiver is a valid JS object.
|
| __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
|
| @@ -3612,7 +4457,7 @@
|
|
|
| // Check that the function is really a JavaScript function.
|
| // r1: pushed function (to be verified)
|
| - __ BranchOnSmi(r1, &slow);
|
| + __ JumpIfSmi(r1, &slow);
|
| // Get the map of the function object.
|
| __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
|
| __ b(ne, &slow);
|
| @@ -3710,14 +4555,13 @@
|
|
|
|
|
| // StringCharCodeAtGenerator
|
| -
|
| void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
|
| Label flat_string;
|
| Label ascii_string;
|
| Label got_char_code;
|
|
|
| // If the receiver is a smi trigger the non-string case.
|
| - __ BranchOnSmi(object_, receiver_not_string_);
|
| + __ JumpIfSmi(object_, receiver_not_string_);
|
|
|
| // Fetch the instance type of the receiver into result register.
|
| __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
|
| @@ -3727,7 +4571,7 @@
|
| __ b(ne, receiver_not_string_);
|
|
|
| // If the index is non-smi trigger the non-smi case.
|
| - __ BranchOnNotSmi(index_, &index_not_smi_);
|
| + __ JumpIfNotSmi(index_, &index_not_smi_);
|
|
|
| // Put smi-tagged index into scratch register.
|
| __ mov(scratch_, index_);
|
| @@ -3763,13 +4607,13 @@
|
| // If the first cons component is also non-flat, then go to runtime.
|
| STATIC_ASSERT(kSeqStringTag == 0);
|
| __ tst(result_, Operand(kStringRepresentationMask));
|
| - __ b(nz, &call_runtime_);
|
| + __ b(ne, &call_runtime_);
|
|
|
| // Check for 1-byte or 2-byte string.
|
| __ bind(&flat_string);
|
| STATIC_ASSERT(kAsciiStringTag != 0);
|
| __ tst(result_, Operand(kStringEncodingMask));
|
| - __ b(nz, &ascii_string);
|
| + __ b(ne, &ascii_string);
|
|
|
| // 2-byte string.
|
| // Load the 2-byte character code into the result register. We can
|
| @@ -3824,7 +4668,7 @@
|
| __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
|
| call_helper.AfterCall(masm);
|
| // If index is still not a smi, it must be out of range.
|
| - __ BranchOnNotSmi(scratch_, index_out_of_range_);
|
| + __ JumpIfNotSmi(scratch_, index_out_of_range_);
|
| // Otherwise, return to the fast path.
|
| __ jmp(&got_smi_index_);
|
|
|
| @@ -3854,7 +4698,7 @@
|
| __ tst(code_,
|
| Operand(kSmiTagMask |
|
| ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
|
| - __ b(nz, &slow_case_);
|
| + __ b(ne, &slow_case_);
|
|
|
| __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
|
| // At this point code register contains smi tagged ascii char code.
|
| @@ -4301,7 +5145,7 @@
|
| __ add(hash, hash, Operand(hash, LSL, 15), SetCC);
|
|
|
| // if (hash == 0) hash = 27;
|
| - __ mov(hash, Operand(27), LeaveCC, nz);
|
| + __ mov(hash, Operand(27), LeaveCC, ne);
|
| }
|
|
|
|
|
| @@ -4869,6 +5713,56 @@
|
| }
|
|
|
|
|
| +void StringCharAtStub::Generate(MacroAssembler* masm) {
|
| + // Expects two arguments (object, index) on the stack:
|
| + // lr: return address
|
| + // sp[0]: index
|
| + // sp[4]: object
|
| + Register object = r1;
|
| + Register index = r0;
|
| + Register scratch1 = r2;
|
| + Register scratch2 = r3;
|
| + Register result = r0;
|
| +
|
| + // Get object and index from the stack.
|
| + __ pop(index);
|
| + __ pop(object);
|
| +
|
| + Label need_conversion;
|
| + Label index_out_of_range;
|
| + Label done;
|
| + StringCharAtGenerator generator(object,
|
| + index,
|
| + scratch1,
|
| + scratch2,
|
| + result,
|
| + &need_conversion,
|
| + &need_conversion,
|
| + &index_out_of_range,
|
| + STRING_INDEX_IS_NUMBER);
|
| + generator.GenerateFast(masm);
|
| + __ b(&done);
|
| +
|
| + __ bind(&index_out_of_range);
|
| + // When the index is out of range, the spec requires us to return
|
| + // the empty string.
|
| + __ LoadRoot(result, Heap::kEmptyStringRootIndex);
|
| + __ jmp(&done);
|
| +
|
| + __ bind(&need_conversion);
|
| + // Move smi zero into the result register, which will trigger
|
| + // conversion.
|
| + __ mov(result, Operand(Smi::FromInt(0)));
|
| + __ b(&done);
|
| +
|
| + StubRuntimeCallHelper call_helper;
|
| + generator.GenerateSlow(masm, call_helper);
|
| +
|
| + __ bind(&done);
|
| + __ Ret();
|
| +}
|
| +
|
| +
|
| void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
|
| ASSERT(state_ == CompareIC::SMIS);
|
| Label miss;
|
| @@ -4880,10 +5774,9 @@
|
| // For equality we do not care about the sign of the result.
|
| __ sub(r0, r0, r1, SetCC);
|
| } else {
|
| - __ sub(r1, r1, r0, SetCC);
|
| - // Correct sign of result in case of overflow.
|
| - __ rsb(r1, r1, Operand(0), SetCC, vs);
|
| - __ mov(r0, r1);
|
| + // Untag before subtracting to avoid handling overflow.
|
| + __ SmiUntag(r1);
|
| + __ sub(r0, r1, SmiUntagOperand(r0));
|
| }
|
| __ Ret();
|
|
|
| @@ -4919,8 +5812,7 @@
|
| __ vldr(d1, r2, HeapNumber::kValueOffset);
|
|
|
| // Compare operands
|
| - __ vcmp(d0, d1);
|
| - __ vmrs(pc); // Move vector status bits to normal status bits.
|
| + __ VFPCompareAndSetFlags(d0, d1);
|
|
|
| // Don't base result on status bits when a NaN is involved.
|
| __ b(vs, &unordered);
|
| @@ -4986,6 +5878,185 @@
|
| }
|
|
|
|
|
| +void DirectCEntryStub::Generate(MacroAssembler* masm) {
|
| + __ ldr(pc, MemOperand(sp, 0));
|
| +}
|
| +
|
| +
|
| +void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
|
| + ApiFunction *function) {
|
| + __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
|
| + RelocInfo::CODE_TARGET));
|
| + __ mov(r2,
|
| + Operand(ExternalReference(function, ExternalReference::DIRECT_CALL)));
|
| + // Push return address (accessible to GC through exit frame pc).
|
| + __ str(pc, MemOperand(sp, 0));
|
| + __ Jump(r2); // Call the api function.
|
| +}
|
| +
|
| +
|
| +void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
|
| + Register target) {
|
| + __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
|
| + RelocInfo::CODE_TARGET));
|
| + // Push return address (accessible to GC through exit frame pc).
|
| + __ str(pc, MemOperand(sp, 0));
|
| + __ Jump(target); // Call the C++ function.
|
| +}
|
| +
|
| +
|
| +void GenerateFastPixelArrayLoad(MacroAssembler* masm,
|
| + Register receiver,
|
| + Register key,
|
| + Register elements_map,
|
| + Register elements,
|
| + Register scratch1,
|
| + Register scratch2,
|
| + Register result,
|
| + Label* not_pixel_array,
|
| + Label* key_not_smi,
|
| + Label* out_of_range) {
|
| + // Register use:
|
| + //
|
| + // receiver - holds the receiver on entry.
|
| + // Unchanged unless 'result' is the same register.
|
| + //
|
| + // key - holds the smi key on entry.
|
| + // Unchanged unless 'result' is the same register.
|
| + //
|
| + // elements - set to be the receiver's elements on exit.
|
| + //
|
| + // elements_map - set to be the map of the receiver's elements
|
| + // on exit.
|
| + //
|
| + // result - holds the result of the pixel array load on exit,
|
| + // tagged as a smi if successful.
|
| + //
|
| + // Scratch registers:
|
| + //
|
| + // scratch1 - used a scratch register in map check, if map
|
| + // check is successful, contains the length of the
|
| + // pixel array, the pointer to external elements and
|
| + // the untagged result.
|
| + //
|
| + // scratch2 - holds the untaged key.
|
| +
|
| + // Some callers already have verified that the key is a smi. key_not_smi is
|
| + // set to NULL as a sentinel for that case. Otherwise, add an explicit check
|
| + // to ensure the key is a smi must be added.
|
| + if (key_not_smi != NULL) {
|
| + __ JumpIfNotSmi(key, key_not_smi);
|
| + } else {
|
| + if (FLAG_debug_code) {
|
| + __ AbortIfNotSmi(key);
|
| + }
|
| + }
|
| + __ SmiUntag(scratch2, key);
|
| +
|
| + // Verify that the receiver has pixel array elements.
|
| + __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
| + __ CheckMap(elements, scratch1, Heap::kPixelArrayMapRootIndex,
|
| + not_pixel_array, true);
|
| +
|
| + // Key must be in range of the pixel array.
|
| + __ ldr(scratch1, FieldMemOperand(elements, PixelArray::kLengthOffset));
|
| + __ cmp(scratch2, scratch1);
|
| + __ b(hs, out_of_range); // unsigned check handles negative keys.
|
| +
|
| + // Perform the indexed load and tag the result as a smi.
|
| + __ ldr(scratch1,
|
| + FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
|
| + __ ldrb(scratch1, MemOperand(scratch1, scratch2));
|
| + __ SmiTag(r0, scratch1);
|
| + __ Ret();
|
| +}
|
| +
|
| +
|
| +void GenerateFastPixelArrayStore(MacroAssembler* masm,
|
| + Register receiver,
|
| + Register key,
|
| + Register value,
|
| + Register elements,
|
| + Register elements_map,
|
| + Register scratch1,
|
| + Register scratch2,
|
| + bool load_elements_from_receiver,
|
| + bool load_elements_map_from_elements,
|
| + Label* key_not_smi,
|
| + Label* value_not_smi,
|
| + Label* not_pixel_array,
|
| + Label* out_of_range) {
|
| + // Register use:
|
| + // receiver - holds the receiver and is unchanged unless the
|
| + // store succeeds.
|
| + // key - holds the key (must be a smi) and is unchanged.
|
| + // value - holds the value (must be a smi) and is unchanged.
|
| + // elements - holds the element object of the receiver on entry if
|
| + // load_elements_from_receiver is false, otherwise used
|
| + // internally to store the pixel arrays elements and
|
| + // external array pointer.
|
| + // elements_map - holds the map of the element object if
|
| + // load_elements_map_from_elements is false, otherwise
|
| + // loaded with the element map.
|
| + //
|
| + Register external_pointer = elements;
|
| + Register untagged_key = scratch1;
|
| + Register untagged_value = scratch2;
|
| +
|
| + if (load_elements_from_receiver) {
|
| + __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
| + }
|
| +
|
| + // By passing NULL as not_pixel_array, callers signal that they have already
|
| + // verified that the receiver has pixel array elements.
|
| + if (not_pixel_array != NULL) {
|
| + if (load_elements_map_from_elements) {
|
| + __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
|
| + }
|
| + __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
|
| + __ cmp(elements_map, ip);
|
| + __ b(ne, not_pixel_array);
|
| + } else {
|
| + if (FLAG_debug_code) {
|
| + // Map check should have already made sure that elements is a pixel array.
|
| + __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
|
| + __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
|
| + __ cmp(elements_map, ip);
|
| + __ Assert(eq, "Elements isn't a pixel array");
|
| + }
|
| + }
|
| +
|
| + // Some callers already have verified that the key is a smi. key_not_smi is
|
| + // set to NULL as a sentinel for that case. Otherwise, add an explicit check
|
| + // to ensure the key is a smi must be added.
|
| + if (key_not_smi != NULL) {
|
| + __ JumpIfNotSmi(key, key_not_smi);
|
| + } else {
|
| + if (FLAG_debug_code) {
|
| + __ AbortIfNotSmi(key);
|
| + }
|
| + }
|
| +
|
| + __ SmiUntag(untagged_key, key);
|
| +
|
| + // Perform bounds check.
|
| + __ ldr(scratch2, FieldMemOperand(elements, PixelArray::kLengthOffset));
|
| + __ cmp(untagged_key, scratch2);
|
| + __ b(hs, out_of_range); // unsigned check handles negative keys.
|
| +
|
| + __ JumpIfNotSmi(value, value_not_smi);
|
| + __ SmiUntag(untagged_value, value);
|
| +
|
| + // Clamp the value to [0..255].
|
| + __ Usat(untagged_value, 8, Operand(untagged_value));
|
| + // Get the pointer to the external array. This clobbers elements.
|
| + __ ldr(external_pointer,
|
| + FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
|
| + __ strb(untagged_value, MemOperand(external_pointer, untagged_key));
|
| + __ Ret();
|
| +}
|
| +
|
| +
|
| #undef __
|
|
|
| } } // namespace v8::internal
|
|
|