OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_MIPS | 5 #if V8_TARGET_ARCH_MIPS |
6 | 6 |
7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
8 #include "src/bootstrapper.h" | 8 #include "src/bootstrapper.h" |
9 #include "src/code-stubs.h" | 9 #include "src/code-stubs.h" |
10 #include "src/codegen.h" | 10 #include "src/codegen.h" |
(...skipping 2559 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2570 __ mov(a3, a1); | 2570 __ mov(a3, a1); |
2571 ArrayConstructorStub stub(masm->isolate(), arg_count()); | 2571 ArrayConstructorStub stub(masm->isolate(), arg_count()); |
2572 __ TailCallStub(&stub); | 2572 __ TailCallStub(&stub); |
2573 } | 2573 } |
2574 | 2574 |
2575 | 2575 |
2576 void CallICStub::Generate(MacroAssembler* masm) { | 2576 void CallICStub::Generate(MacroAssembler* masm) { |
2577 // a1 - function | 2577 // a1 - function |
2578 // a3 - slot id (Smi) | 2578 // a3 - slot id (Smi) |
2579 // a2 - vector | 2579 // a2 - vector |
2580 const int with_types_offset = | |
2581 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex); | |
2582 const int generic_offset = | |
2583 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex); | |
2584 Label extra_checks_or_miss, call, call_function; | 2580 Label extra_checks_or_miss, call, call_function; |
2585 int argc = arg_count(); | 2581 int argc = arg_count(); |
2586 ParameterCount actual(argc); | 2582 ParameterCount actual(argc); |
2587 | 2583 |
2588 // The checks. First, does r1 match the recorded monomorphic target? | 2584 // The checks. First, does r1 match the recorded monomorphic target? |
2589 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize); | 2585 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize); |
2590 __ Addu(t0, a2, Operand(t0)); | 2586 __ Addu(t0, a2, Operand(t0)); |
2591 __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize)); | 2587 __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize)); |
2592 | 2588 |
2593 // We don't know that we have a weak cell. We might have a private symbol | 2589 // We don't know that we have a weak cell. We might have a private symbol |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2650 | 2646 |
2651 // We are going megamorphic. If the feedback is a JSFunction, it is fine | 2647 // We are going megamorphic. If the feedback is a JSFunction, it is fine |
2652 // to handle it here. More complex cases are dealt with in the runtime. | 2648 // to handle it here. More complex cases are dealt with in the runtime. |
2653 __ AssertNotSmi(t0); | 2649 __ AssertNotSmi(t0); |
2654 __ GetObjectType(t0, t1, t1); | 2650 __ GetObjectType(t0, t1, t1); |
2655 __ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE)); | 2651 __ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE)); |
2656 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize); | 2652 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize); |
2657 __ Addu(t0, a2, Operand(t0)); | 2653 __ Addu(t0, a2, Operand(t0)); |
2658 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex); | 2654 __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex); |
2659 __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize)); | 2655 __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize)); |
2660 // We have to update statistics for runtime profiling. | |
2661 __ lw(t0, FieldMemOperand(a2, with_types_offset)); | |
2662 __ Subu(t0, t0, Operand(Smi::FromInt(1))); | |
2663 __ sw(t0, FieldMemOperand(a2, with_types_offset)); | |
2664 __ lw(t0, FieldMemOperand(a2, generic_offset)); | |
2665 __ Addu(t0, t0, Operand(Smi::FromInt(1))); | |
2666 __ sw(t0, FieldMemOperand(a2, generic_offset)); | |
2667 | 2656 |
2668 __ bind(&call); | 2657 __ bind(&call); |
2669 __ Jump(masm->isolate()->builtins()->Call(convert_mode()), | 2658 __ Jump(masm->isolate()->builtins()->Call(convert_mode()), |
2670 RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), | 2659 RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), |
2671 USE_DELAY_SLOT); | 2660 USE_DELAY_SLOT); |
2672 __ li(a0, Operand(argc)); // In delay slot. | 2661 __ li(a0, Operand(argc)); // In delay slot. |
2673 | 2662 |
2674 __ bind(&uninitialized); | 2663 __ bind(&uninitialized); |
2675 | 2664 |
2676 // We are going monomorphic, provided we actually have a JSFunction. | 2665 // We are going monomorphic, provided we actually have a JSFunction. |
2677 __ JumpIfSmi(a1, &miss); | 2666 __ JumpIfSmi(a1, &miss); |
2678 | 2667 |
2679 // Goto miss case if we do not have a function. | 2668 // Goto miss case if we do not have a function. |
2680 __ GetObjectType(a1, t0, t0); | 2669 __ GetObjectType(a1, t0, t0); |
2681 __ Branch(&miss, ne, t0, Operand(JS_FUNCTION_TYPE)); | 2670 __ Branch(&miss, ne, t0, Operand(JS_FUNCTION_TYPE)); |
2682 | 2671 |
2683 // Make sure the function is not the Array() function, which requires special | 2672 // Make sure the function is not the Array() function, which requires special |
2684 // behavior on MISS. | 2673 // behavior on MISS. |
2685 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t0); | 2674 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t0); |
2686 __ Branch(&miss, eq, a1, Operand(t0)); | 2675 __ Branch(&miss, eq, a1, Operand(t0)); |
2687 | 2676 |
2688 // Make sure the function belongs to the same native context. | 2677 // Make sure the function belongs to the same native context. |
2689 __ lw(t0, FieldMemOperand(a1, JSFunction::kContextOffset)); | 2678 __ lw(t0, FieldMemOperand(a1, JSFunction::kContextOffset)); |
2690 __ lw(t0, ContextMemOperand(t0, Context::NATIVE_CONTEXT_INDEX)); | 2679 __ lw(t0, ContextMemOperand(t0, Context::NATIVE_CONTEXT_INDEX)); |
2691 __ lw(t1, NativeContextMemOperand()); | 2680 __ lw(t1, NativeContextMemOperand()); |
2692 __ Branch(&miss, ne, t0, Operand(t1)); | 2681 __ Branch(&miss, ne, t0, Operand(t1)); |
2693 | 2682 |
2694 // Update stats. | |
2695 __ lw(t0, FieldMemOperand(a2, with_types_offset)); | |
2696 __ Addu(t0, t0, Operand(Smi::FromInt(1))); | |
2697 __ sw(t0, FieldMemOperand(a2, with_types_offset)); | |
2698 | |
2699 // Initialize the call counter. | 2683 // Initialize the call counter. |
2700 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize); | 2684 __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize); |
2701 __ Addu(at, a2, Operand(at)); | 2685 __ Addu(at, a2, Operand(at)); |
2702 __ li(t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement))); | 2686 __ li(t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement))); |
2703 __ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize)); | 2687 __ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize)); |
2704 | 2688 |
2705 // Store the function. Use a stub since we need a frame for allocation. | 2689 // Store the function. Use a stub since we need a frame for allocation. |
2706 // a2 - vector | 2690 // a2 - vector |
2707 // a3 - slot | 2691 // a3 - slot |
2708 // a1 - function | 2692 // a1 - function |
(...skipping 2872 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5581 MemOperand(fp, 6 * kPointerSize), NULL); | 5565 MemOperand(fp, 6 * kPointerSize), NULL); |
5582 } | 5566 } |
5583 | 5567 |
5584 | 5568 |
5585 #undef __ | 5569 #undef __ |
5586 | 5570 |
5587 } // namespace internal | 5571 } // namespace internal |
5588 } // namespace v8 | 5572 } // namespace v8 |
5589 | 5573 |
5590 #endif // V8_TARGET_ARCH_MIPS | 5574 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |