| Index: src/mips/code-stubs-mips.cc
|
| diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
|
| index 0ce551d7b3798eada3dd33f8765a082e04281c03..1aa1838befd3a305e5fe31a27434451405a07a22 100644
|
| --- a/src/mips/code-stubs-mips.cc
|
| +++ b/src/mips/code-stubs-mips.cc
|
| @@ -1718,7 +1718,6 @@ void CompareStub::Generate(MacroAssembler* masm) {
|
| }
|
|
|
|
|
| -// This stub does not handle the inlined cases (Smis, Booleans, undefined).
|
| // The stub returns zero for false, and a non-zero value for true.
|
| void ToBooleanStub::Generate(MacroAssembler* masm) {
|
| // This stub uses FPU instructions.
|
| @@ -1782,7 +1781,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
|
| // "tos_" is a register and contains a non-zero value.
|
| // Hence we implicitly return true if the greater than
|
| // condition is satisfied.
|
| - __ Ret(gt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
|
| + __ Ret(ge, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
|
|
|
| // Check for string.
|
| __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
|
| @@ -1790,7 +1789,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
|
| // "tos_" is a register and contains a non-zero value.
|
| // Hence we implicitly return true if the greater than
|
| // condition is satisfied.
|
| - __ Ret(gt, scratch0, Operand(FIRST_NONSTRING_TYPE));
|
| + __ Ret(ge, scratch0, Operand(FIRST_NONSTRING_TYPE));
|
|
|
| // String value => false iff empty, i.e., length is zero.
|
| __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
|
| @@ -2789,9 +2788,11 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
| // DIV just falls through to allocating a heap number.
|
| }
|
|
|
| + __ bind(&return_heap_number);
|
| + // Return a heap number, or fall through to type transition or runtime
|
| + // call if we can't.
|
| if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
|
| : BinaryOpIC::INT32)) {
|
| - __ bind(&return_heap_number);
|
| // We are using FPU registers so s0 is available.
|
| heap_number_result = s0;
|
| GenerateHeapResultAllocation(masm,
|
| @@ -2970,7 +2971,11 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
| UNREACHABLE();
|
| }
|
|
|
| - if (transition.is_linked()) {
|
| + // We never expect DIV to yield an integer result, so we always generate
|
| + // type transition code for DIV operations expecting an integer result: the
|
| + // code will fall through to this type transition.
|
| + if (transition.is_linked() ||
|
| + ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
|
| __ bind(&transition);
|
| GenerateTypeTransition(masm);
|
| }
|
| @@ -3542,15 +3547,10 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
|
|
|
| __ li(a2, Operand(ExternalReference::isolate_address()));
|
|
|
| - // From arm version of this function:
|
| - // TODO(1242173): To let the GC traverse the return address of the exit
|
| - // frames, we need to know where the return address is. Right now,
|
| - // we push it on the stack to be able to find it again, but we never
|
| - // restore from it in case of changes, which makes it impossible to
|
| - // support moving the C entry code stub. This should be fixed, but currently
|
| - // this is OK because the CEntryStub gets generated so early in the V8 boot
|
| - // sequence that it is not moving ever.
|
| -
|
| + // To let the GC traverse the return address of the exit frames, we need to
|
| + // know where the return address is. The CEntryStub is unmovable, so
|
| + // we can store the address on the stack to be able to find it again and
|
| + // we never have to restore it, because it will not change.
|
| { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
|
| // This branch-and-link sequence is needed to find the current PC on mips,
|
| // saved to the ra register.
|
| @@ -4075,12 +4075,253 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
|
| }
|
|
|
|
|
| -void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
|
| +void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
|
| // sp[0] : number of parameters
|
| // sp[4] : receiver displacement
|
| // sp[8] : function
|
| + // Check if the calling frame is an arguments adaptor frame.
|
| + Label runtime;
|
| + __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| + __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
|
| + __ Branch(&runtime, ne,
|
| + a2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
| +
|
| + // Patch the arguments.length and the parameters pointer in the current frame.
|
| + __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
| + __ sw(a2, MemOperand(sp, 0 * kPointerSize));
|
| + __ sll(t3, a2, 1);
|
| + __ Addu(a3, a3, Operand(t3));
|
| + __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
|
| + __ sw(a3, MemOperand(sp, 1 * kPointerSize));
|
| +
|
| + __ bind(&runtime);
|
| + __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
|
| +}
|
| +
|
| +
|
| +void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
|
| + // Stack layout:
|
| + // sp[0] : number of parameters (tagged)
|
| + // sp[4] : address of receiver argument
|
| + // sp[8] : function
|
| + // Registers used over whole function:
|
| + // t2 : allocated object (tagged)
|
| + // t5 : mapped parameter count (tagged)
|
| +
|
| + __ lw(a1, MemOperand(sp, 0 * kPointerSize));
|
| + // a1 = parameter count (tagged)
|
|
|
| // Check if the calling frame is an arguments adaptor frame.
|
| + Label runtime;
|
| + Label adaptor_frame, try_allocate;
|
| + __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| + __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
|
| + __ Branch(&adaptor_frame, eq, a2,
|
| + Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
| +
|
| + // No adaptor, parameter count = argument count.
|
| + __ mov(a2, a1);
|
| + __ b(&try_allocate);
|
| + __ nop(); // Branch delay slot nop.
|
| +
|
| + // We have an adaptor frame. Patch the parameters pointer.
|
| + __ bind(&adaptor_frame);
|
| + __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
| + __ sll(t6, a2, 1);
|
| + __ Addu(a3, a3, Operand(t6));
|
| + __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
|
| + __ sw(a3, MemOperand(sp, 1 * kPointerSize));
|
| +
|
| + // a1 = parameter count (tagged)
|
| + // a2 = argument count (tagged)
|
| + // Compute the mapped parameter count = min(a1, a2) in a1.
|
| + Label skip_min;
|
| + __ Branch(&skip_min, lt, a1, Operand(a2));
|
| + __ mov(a1, a2);
|
| + __ bind(&skip_min);
|
| +
|
| + __ bind(&try_allocate);
|
| +
|
| + // Compute the sizes of backing store, parameter map, and arguments object.
|
| + // 1. Parameter map, has 2 extra words containing context and backing store.
|
| + const int kParameterMapHeaderSize =
|
| + FixedArray::kHeaderSize + 2 * kPointerSize;
|
| + // If there are no mapped parameters, we do not need the parameter_map.
|
| + Label param_map_size;
|
| + ASSERT_EQ(0, Smi::FromInt(0));
|
| + __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg));
|
| + __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
|
| + __ sll(t5, a1, 1);
|
| + __ addiu(t5, t5, kParameterMapHeaderSize);
|
| + __ bind(¶m_map_size);
|
| +
|
| + // 2. Backing store.
|
| + __ sll(t6, a2, 1);
|
| + __ Addu(t5, t5, Operand(t6));
|
| + __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
|
| +
|
| + // 3. Arguments object.
|
| + __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
|
| +
|
| + // Do the allocation of all three objects in one go.
|
| + __ AllocateInNewSpace(t5, v0, a3, t0, &runtime, TAG_OBJECT);
|
| +
|
| + // v0 = address of new object(s) (tagged)
|
| + // a2 = argument count (tagged)
|
| + // Get the arguments boilerplate from the current (global) context into t0.
|
| + const int kNormalOffset =
|
| + Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
|
| + const int kAliasedOffset =
|
| + Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
|
| +
|
| + __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
| + __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
|
| + Label skip2_ne, skip2_eq;
|
| + __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
|
| + __ lw(t0, MemOperand(t0, kNormalOffset));
|
| + __ bind(&skip2_ne);
|
| +
|
| + __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
|
| + __ lw(t0, MemOperand(t0, kAliasedOffset));
|
| + __ bind(&skip2_eq);
|
| +
|
| + // v0 = address of new object (tagged)
|
| + // a1 = mapped parameter count (tagged)
|
| + // a2 = argument count (tagged)
|
| + // t0 = address of boilerplate object (tagged)
|
| + // Copy the JS object part.
|
| + for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
|
| + __ lw(a3, FieldMemOperand(t0, i));
|
| + __ sw(a3, FieldMemOperand(v0, i));
|
| + }
|
| +
|
| + // Setup the callee in-object property.
|
| + STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
|
| + __ lw(a3, MemOperand(sp, 2 * kPointerSize));
|
| + const int kCalleeOffset = JSObject::kHeaderSize +
|
| + Heap::kArgumentsCalleeIndex * kPointerSize;
|
| + __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
|
| +
|
| + // Use the length (smi tagged) and set that as an in-object property too.
|
| + STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
|
| + const int kLengthOffset = JSObject::kHeaderSize +
|
| + Heap::kArgumentsLengthIndex * kPointerSize;
|
| + __ sw(a2, FieldMemOperand(v0, kLengthOffset));
|
| +
|
| + // Setup the elements pointer in the allocated arguments object.
|
| + // If we allocated a parameter map, t0 will point there, otherwise
|
| + // it will point to the backing store.
|
| + __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
|
| + __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
|
| +
|
| + // v0 = address of new object (tagged)
|
| + // a1 = mapped parameter count (tagged)
|
| + // a2 = argument count (tagged)
|
| + // t0 = address of parameter map or backing store (tagged)
|
| + // Initialize parameter map. If there are no mapped arguments, we're done.
|
| + Label skip_parameter_map;
|
| + Label skip3;
|
| + __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
|
| + // Move backing store address to a3, because it is
|
| + // expected there when filling in the unmapped arguments.
|
| + __ mov(a3, t0);
|
| + __ bind(&skip3);
|
| +
|
| + __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
|
| +
|
| + __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
|
| + __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
|
| + __ Addu(t2, a1, Operand(Smi::FromInt(2)));
|
| + __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
|
| + __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
|
| + __ sll(t6, a1, 1);
|
| + __ Addu(t2, t0, Operand(t6));
|
| + __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
|
| + __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
|
| +
|
| + // Copy the parameter slots and the holes in the arguments.
|
| + // We need to fill in mapped_parameter_count slots. They index the context,
|
| + // where parameters are stored in reverse order, at
|
| + // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
|
| + // The mapped parameter thus need to get indices
|
| + // MIN_CONTEXT_SLOTS+parameter_count-1 ..
|
| + // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
|
| + // We loop from right to left.
|
| + Label parameters_loop, parameters_test;
|
| + __ mov(t2, a1);
|
| + __ lw(t5, MemOperand(sp, 0 * kPointerSize));
|
| + __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
|
| + __ Subu(t5, t5, Operand(a1));
|
| + __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
|
| + __ sll(t6, t2, 1);
|
| + __ Addu(a3, t0, Operand(t6));
|
| + __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
|
| +
|
| + // t2 = loop variable (tagged)
|
| + // a1 = mapping index (tagged)
|
| + // a3 = address of backing store (tagged)
|
| + // t0 = address of parameter map (tagged)
|
| + // t1 = temporary scratch (a.o., for address calculation)
|
| + // t3 = the hole value
|
| + __ jmp(¶meters_test);
|
| +
|
| + __ bind(¶meters_loop);
|
| + __ Subu(t2, t2, Operand(Smi::FromInt(1)));
|
| + __ sll(t1, t2, 1);
|
| + __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
|
| + __ Addu(t6, t0, t1);
|
| + __ sw(t5, MemOperand(t6));
|
| + __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
|
| + __ Addu(t6, a3, t1);
|
| + __ sw(t3, MemOperand(t6));
|
| + __ Addu(t5, t5, Operand(Smi::FromInt(1)));
|
| + __ bind(¶meters_test);
|
| + __ Branch(¶meters_loop, ne, t2, Operand(Smi::FromInt(0)));
|
| +
|
| + __ bind(&skip_parameter_map);
|
| + // a2 = argument count (tagged)
|
| + // a3 = address of backing store (tagged)
|
| + // t1 = scratch
|
| + // Copy arguments header and remaining slots (if there are any).
|
| + __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
|
| + __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
|
| + __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
|
| +
|
| + Label arguments_loop, arguments_test;
|
| + __ mov(t5, a1);
|
| + __ lw(t0, MemOperand(sp, 1 * kPointerSize));
|
| + __ sll(t6, t5, 1);
|
| + __ Subu(t0, t0, Operand(t6));
|
| + __ jmp(&arguments_test);
|
| +
|
| + __ bind(&arguments_loop);
|
| + __ Subu(t0, t0, Operand(kPointerSize));
|
| + __ lw(t2, MemOperand(t0, 0));
|
| + __ sll(t6, t5, 1);
|
| + __ Addu(t1, a3, Operand(t6));
|
| + __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
|
| + __ Addu(t5, t5, Operand(Smi::FromInt(1)));
|
| +
|
| + __ bind(&arguments_test);
|
| + __ Branch(&arguments_loop, lt, t5, Operand(a2));
|
| +
|
| + // Return and remove the on-stack parameters.
|
| + __ Addu(sp, sp, Operand(3 * kPointerSize));
|
| + __ Ret();
|
| +
|
| + // Do the runtime call to allocate the arguments object.
|
| + // a2 = argument count (taggged)
|
| + __ bind(&runtime);
|
| + __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
|
| + __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
|
| +}
|
| +
|
| +
|
| +void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
|
| + // sp[0] : number of parameters
|
| + // sp[4] : receiver displacement
|
| + // sp[8] : function
|
| + // Check if the calling frame is an arguments adaptor frame.
|
| Label adaptor_frame, try_allocate, runtime;
|
| __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
|
| @@ -4112,40 +4353,31 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
|
|
|
| __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
|
| __ bind(&add_arguments_object);
|
| - __ Addu(a1, a1, Operand(GetArgumentsObjectSize() / kPointerSize));
|
| + __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
|
|
|
| // Do the allocation of both objects in one go.
|
| - __ AllocateInNewSpace(
|
| - a1,
|
| - v0,
|
| - a2,
|
| - a3,
|
| - &runtime,
|
| - static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
|
| + __ AllocateInNewSpace(a1,
|
| + v0,
|
| + a2,
|
| + a3,
|
| + &runtime,
|
| + static_cast<AllocationFlags>(TAG_OBJECT |
|
| + SIZE_IN_WORDS));
|
|
|
| // Get the arguments boilerplate from the current (global) context.
|
| __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
| __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
|
| - __ lw(t0, MemOperand(t0,
|
| - Context::SlotOffset(GetArgumentsBoilerplateIndex())));
|
| + __ lw(t0, MemOperand(t0, Context::SlotOffset(
|
| + Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
|
|
|
| // Copy the JS object part.
|
| __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
|
|
|
| - if (type_ == NEW_NON_STRICT) {
|
| - // Setup the callee in-object property.
|
| - STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
|
| - __ lw(a3, MemOperand(sp, 2 * kPointerSize));
|
| - const int kCalleeOffset = JSObject::kHeaderSize +
|
| - Heap::kArgumentsCalleeIndex * kPointerSize;
|
| - __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
|
| - }
|
| -
|
| // Get the length (smi tagged) and set that as an in-object property too.
|
| STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
|
| __ lw(a1, MemOperand(sp, 0 * kPointerSize));
|
| __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
|
| - Heap::kArgumentsLengthIndex * kPointerSize));
|
| + Heap::kArgumentsLengthIndex * kPointerSize));
|
|
|
| Label done;
|
| __ Branch(&done, eq, a1, Operand(zero_reg));
|
| @@ -4155,12 +4387,13 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
|
|
|
| // Setup the elements pointer in the allocated arguments object and
|
| // initialize the header in the elements fixed array.
|
| - __ Addu(t0, v0, Operand(GetArgumentsObjectSize()));
|
| + __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
|
| __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
|
| __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
|
| __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
|
| __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
|
| - __ srl(a1, a1, kSmiTagSize); // Untag the length for the loop.
|
| + // Untag the length for the loop.
|
| + __ srl(a1, a1, kSmiTagSize);
|
|
|
| // Copy the fixed array slots.
|
| Label loop;
|
| @@ -4184,7 +4417,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
|
|
|
| // Do the runtime call to allocate the arguments object.
|
| __ bind(&runtime);
|
| - __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
|
| + __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
|
| }
|
|
|
|
|
|
|