| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/arm/lithium-codegen-arm.h" | 5 #include "src/arm/lithium-codegen-arm.h" |
| 6 #include "src/arm/lithium-gap-resolver-arm.h" | 6 #include "src/arm/lithium-gap-resolver-arm.h" |
| 7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
| 8 #include "src/code-factory.h" | 8 #include "src/code-factory.h" |
| 9 #include "src/code-stubs.h" | 9 #include "src/code-stubs.h" |
| 10 #include "src/cpu-profiler.h" | 10 #include "src/cpu-profiler.h" |
| (...skipping 2132 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2143 __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block)); | 2143 __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block)); |
| 2144 } else if (right_block == next_block) { | 2144 } else if (right_block == next_block) { |
| 2145 __ b(condition, chunk_->GetAssemblyLabel(left_block)); | 2145 __ b(condition, chunk_->GetAssemblyLabel(left_block)); |
| 2146 } else { | 2146 } else { |
| 2147 __ b(condition, chunk_->GetAssemblyLabel(left_block)); | 2147 __ b(condition, chunk_->GetAssemblyLabel(left_block)); |
| 2148 __ b(chunk_->GetAssemblyLabel(right_block)); | 2148 __ b(chunk_->GetAssemblyLabel(right_block)); |
| 2149 } | 2149 } |
| 2150 } | 2150 } |
| 2151 | 2151 |
| 2152 | 2152 |
| 2153 template<class InstrType> | 2153 template <class InstrType> |
| 2154 void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition) { |
| 2155 int true_block = instr->TrueDestination(chunk_); |
| 2156 __ b(condition, chunk_->GetAssemblyLabel(true_block)); |
| 2157 } |
| 2158 |
| 2159 |
| 2160 template <class InstrType> |
| 2154 void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) { | 2161 void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) { |
| 2155 int false_block = instr->FalseDestination(chunk_); | 2162 int false_block = instr->FalseDestination(chunk_); |
| 2156 __ b(condition, chunk_->GetAssemblyLabel(false_block)); | 2163 __ b(condition, chunk_->GetAssemblyLabel(false_block)); |
| 2157 } | 2164 } |
| 2158 | 2165 |
| 2159 | 2166 |
| 2160 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { | 2167 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { |
| 2161 __ stop("LBreak"); | 2168 __ stop("LBreak"); |
| 2162 } | 2169 } |
| 2163 | 2170 |
| (...skipping 554 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2718 Register temp = ToRegister(instr->temp()); | 2725 Register temp = ToRegister(instr->temp()); |
| 2719 | 2726 |
| 2720 __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); | 2727 __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 2721 __ cmp(temp, Operand(instr->map())); | 2728 __ cmp(temp, Operand(instr->map())); |
| 2722 EmitBranch(instr, eq); | 2729 EmitBranch(instr, eq); |
| 2723 } | 2730 } |
| 2724 | 2731 |
| 2725 | 2732 |
| 2726 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { | 2733 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { |
| 2727 DCHECK(ToRegister(instr->context()).is(cp)); | 2734 DCHECK(ToRegister(instr->context()).is(cp)); |
| 2728 DCHECK(ToRegister(instr->left()).is(r0)); // Object is in r0. | 2735 DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister())); |
| 2729 DCHECK(ToRegister(instr->right()).is(r1)); // Function is in r1. | 2736 DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister())); |
| 2730 | 2737 DCHECK(ToRegister(instr->result()).is(r0)); |
| 2731 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters); | 2738 InstanceOfStub stub(isolate()); |
| 2732 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 2739 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 2733 | |
| 2734 __ cmp(r0, Operand::Zero()); | |
| 2735 __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne); | |
| 2736 __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq); | |
| 2737 } | 2740 } |
| 2738 | 2741 |
| 2739 | 2742 |
| 2740 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { | 2743 void LCodeGen::DoHasInPrototypeChainAndBranch( |
| 2741 class DeferredInstanceOfKnownGlobal final : public LDeferredCode { | 2744 LHasInPrototypeChainAndBranch* instr) { |
| 2742 public: | 2745 Register const object = ToRegister(instr->object()); |
| 2743 DeferredInstanceOfKnownGlobal(LCodeGen* codegen, | 2746 Register const object_map = scratch0(); |
| 2744 LInstanceOfKnownGlobal* instr) | 2747 Register const object_prototype = object_map; |
| 2745 : LDeferredCode(codegen), instr_(instr) { } | 2748 Register const prototype = ToRegister(instr->prototype()); |
| 2746 void Generate() override { | |
| 2747 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_, | |
| 2748 &load_bool_); | |
| 2749 } | |
| 2750 LInstruction* instr() override { return instr_; } | |
| 2751 Label* map_check() { return &map_check_; } | |
| 2752 Label* load_bool() { return &load_bool_; } | |
| 2753 | 2749 |
| 2754 private: | 2750 // The {object} must be a spec object. It's sufficient to know that {object} |
| 2755 LInstanceOfKnownGlobal* instr_; | 2751 // is not a smi, since all other non-spec objects have {null} prototypes and |
| 2756 Label map_check_; | 2752 // will be ruled out below. |
| 2757 Label load_bool_; | 2753 if (instr->hydrogen()->ObjectNeedsSmiCheck()) { |
| 2758 }; | 2754 __ SmiTst(object); |
| 2755 EmitFalseBranch(instr, eq); |
| 2756 } |
| 2759 | 2757 |
| 2760 DeferredInstanceOfKnownGlobal* deferred; | 2758 // Loop through the {object}s prototype chain looking for the {prototype}. |
| 2761 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); | 2759 __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 2762 | 2760 Label loop; |
| 2763 Label done, false_result; | 2761 __ bind(&loop); |
| 2764 Register object = ToRegister(instr->value()); | 2762 __ ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset)); |
| 2765 Register temp = ToRegister(instr->temp()); | 2763 __ cmp(object_prototype, prototype); |
| 2766 Register result = ToRegister(instr->result()); | 2764 EmitTrueBranch(instr, eq); |
| 2767 | 2765 __ CompareRoot(object_prototype, Heap::kNullValueRootIndex); |
| 2768 // A Smi is not instance of anything. | 2766 EmitFalseBranch(instr, eq); |
| 2769 __ JumpIfSmi(object, &false_result); | 2767 __ ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset)); |
| 2770 | 2768 __ b(&loop); |
| 2771 // This is the inlined call site instanceof cache. The two occurences of the | |
| 2772 // hole value will be patched to the last map/result pair generated by the | |
| 2773 // instanceof stub. | |
| 2774 Label cache_miss; | |
| 2775 Register map = temp; | |
| 2776 __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); | |
| 2777 { | |
| 2778 // Block constant pool emission to ensure the positions of instructions are | |
| 2779 // as expected by the patcher. See InstanceofStub::Generate(). | |
| 2780 Assembler::BlockConstPoolScope block_const_pool(masm()); | |
| 2781 __ bind(deferred->map_check()); // Label for calculating code patching. | |
| 2782 // We use Factory::the_hole_value() on purpose instead of loading from the | |
| 2783 // root array to force relocation to be able to later patch with | |
| 2784 // the cached map. | |
| 2785 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value()); | |
| 2786 __ mov(ip, Operand(cell)); | |
| 2787 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset)); | |
| 2788 __ cmp(map, Operand(ip)); | |
| 2789 __ b(ne, &cache_miss); | |
| 2790 __ bind(deferred->load_bool()); // Label for calculating code patching. | |
| 2791 // We use Factory::the_hole_value() on purpose instead of loading from the | |
| 2792 // root array to force relocation to be able to later patch | |
| 2793 // with true or false. | |
| 2794 __ mov(result, Operand(factory()->the_hole_value())); | |
| 2795 } | |
| 2796 __ b(&done); | |
| 2797 | |
| 2798 // The inlined call site cache did not match. Check null and string before | |
| 2799 // calling the deferred code. | |
| 2800 __ bind(&cache_miss); | |
| 2801 // Null is not instance of anything. | |
| 2802 __ LoadRoot(ip, Heap::kNullValueRootIndex); | |
| 2803 __ cmp(object, Operand(ip)); | |
| 2804 __ b(eq, &false_result); | |
| 2805 | |
| 2806 // String values is not instance of anything. | |
| 2807 Condition is_string = masm_->IsObjectStringType(object, temp); | |
| 2808 __ b(is_string, &false_result); | |
| 2809 | |
| 2810 // Go to the deferred code. | |
| 2811 __ b(deferred->entry()); | |
| 2812 | |
| 2813 __ bind(&false_result); | |
| 2814 __ LoadRoot(result, Heap::kFalseValueRootIndex); | |
| 2815 | |
| 2816 // Here result has either true or false. Deferred code also produces true or | |
| 2817 // false object. | |
| 2818 __ bind(deferred->exit()); | |
| 2819 __ bind(&done); | |
| 2820 } | 2769 } |
| 2821 | 2770 |
| 2822 | 2771 |
| 2823 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, | |
| 2824 Label* map_check, | |
| 2825 Label* bool_load) { | |
| 2826 InstanceofStub::Flags flags = InstanceofStub::kNoFlags; | |
| 2827 flags = static_cast<InstanceofStub::Flags>( | |
| 2828 flags | InstanceofStub::kArgsInRegisters); | |
| 2829 flags = static_cast<InstanceofStub::Flags>( | |
| 2830 flags | InstanceofStub::kCallSiteInlineCheck); | |
| 2831 flags = static_cast<InstanceofStub::Flags>( | |
| 2832 flags | InstanceofStub::kReturnTrueFalseObject); | |
| 2833 InstanceofStub stub(isolate(), flags); | |
| 2834 | |
| 2835 PushSafepointRegistersScope scope(this); | |
| 2836 LoadContextFromDeferred(instr->context()); | |
| 2837 | |
| 2838 __ Move(InstanceofStub::right(), instr->function()); | |
| 2839 | |
| 2840 int call_size = CallCodeSize(stub.GetCode(), RelocInfo::CODE_TARGET); | |
| 2841 int additional_delta = (call_size / Assembler::kInstrSize) + 4; | |
| 2842 { | |
| 2843 // Make sure that code size is predicable, since we use specific constants | |
| 2844 // offsets in the code to find embedded values.. | |
| 2845 PredictableCodeSizeScope predictable( | |
| 2846 masm_, additional_delta * Assembler::kInstrSize); | |
| 2847 // The labels must be already bound since the code has predictabel size up | |
| 2848 // to the call instruction. | |
| 2849 DCHECK(map_check->is_bound()); | |
| 2850 DCHECK(bool_load->is_bound()); | |
| 2851 // Make sure we don't emit any additional entries in the constant pool | |
| 2852 // before the call to ensure that the CallCodeSize() calculated the | |
| 2853 // correct number of instructions for the constant pool load. | |
| 2854 { | |
| 2855 ConstantPoolUnavailableScope constant_pool_unavailable(masm_); | |
| 2856 int map_check_delta = | |
| 2857 masm_->InstructionsGeneratedSince(map_check) + additional_delta; | |
| 2858 int bool_load_delta = | |
| 2859 masm_->InstructionsGeneratedSince(bool_load) + additional_delta; | |
| 2860 Label before_push_delta; | |
| 2861 __ bind(&before_push_delta); | |
| 2862 __ BlockConstPoolFor(additional_delta); | |
| 2863 // r5 is used to communicate the offset to the location of the map check. | |
| 2864 __ mov(r5, Operand(map_check_delta * kPointerSize)); | |
| 2865 // r6 is used to communicate the offset to the location of the bool load. | |
| 2866 __ mov(r6, Operand(bool_load_delta * kPointerSize)); | |
| 2867 // The mov above can generate one or two instructions. The delta was | |
| 2868 // computed for two instructions, so we need to pad here in case of one | |
| 2869 // instruction. | |
| 2870 while (masm_->InstructionsGeneratedSince(&before_push_delta) != 4) { | |
| 2871 __ nop(); | |
| 2872 } | |
| 2873 } | |
| 2874 CallCodeGeneric(stub.GetCode(), RelocInfo::CODE_TARGET, instr, | |
| 2875 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | |
| 2876 } | |
| 2877 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); | |
| 2878 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); | |
| 2879 // Put the result value (r0) into the result register slot and | |
| 2880 // restore all registers. | |
| 2881 __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result())); | |
| 2882 } | |
| 2883 | |
| 2884 | |
| 2885 void LCodeGen::DoCmpT(LCmpT* instr) { | 2772 void LCodeGen::DoCmpT(LCmpT* instr) { |
| 2886 DCHECK(ToRegister(instr->context()).is(cp)); | 2773 DCHECK(ToRegister(instr->context()).is(cp)); |
| 2887 Token::Value op = instr->op(); | 2774 Token::Value op = instr->op(); |
| 2888 | 2775 |
| 2889 Handle<Code> ic = | 2776 Handle<Code> ic = |
| 2890 CodeFactory::CompareIC(isolate(), op, instr->strength()).code(); | 2777 CodeFactory::CompareIC(isolate(), op, instr->strength()).code(); |
| 2891 CallCode(ic, RelocInfo::CODE_TARGET, instr); | 2778 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 2892 // This instruction also signals no smi code inlined. | 2779 // This instruction also signals no smi code inlined. |
| 2893 __ cmp(r0, Operand::Zero()); | 2780 __ cmp(r0, Operand::Zero()); |
| 2894 | 2781 |
| (...skipping 3130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6025 __ push(ToRegister(instr->function())); | 5912 __ push(ToRegister(instr->function())); |
| 6026 CallRuntime(Runtime::kPushBlockContext, 2, instr); | 5913 CallRuntime(Runtime::kPushBlockContext, 2, instr); |
| 6027 RecordSafepoint(Safepoint::kNoLazyDeopt); | 5914 RecordSafepoint(Safepoint::kNoLazyDeopt); |
| 6028 } | 5915 } |
| 6029 | 5916 |
| 6030 | 5917 |
| 6031 #undef __ | 5918 #undef __ |
| 6032 | 5919 |
| 6033 } // namespace internal | 5920 } // namespace internal |
| 6034 } // namespace v8 | 5921 } // namespace v8 |
| OLD | NEW |