OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
139 __ b(eq, &ok); | 139 __ b(eq, &ok); |
140 int receiver_offset = scope()->num_parameters() * kPointerSize; | 140 int receiver_offset = scope()->num_parameters() * kPointerSize; |
141 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); | 141 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); |
142 __ str(r2, MemOperand(sp, receiver_offset)); | 142 __ str(r2, MemOperand(sp, receiver_offset)); |
143 __ bind(&ok); | 143 __ bind(&ok); |
144 } | 144 } |
145 } | 145 } |
146 | 146 |
147 info()->set_prologue_offset(masm_->pc_offset()); | 147 info()->set_prologue_offset(masm_->pc_offset()); |
148 if (NeedsEagerFrame()) { | 148 if (NeedsEagerFrame()) { |
149 PredictableCodeSizeScope predictible_code_size_scope( | 149 if (info()->IsStub()) { |
150 masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize); | 150 __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); |
151 // The following three instructions must remain together and unmodified | 151 __ Push(Smi::FromInt(StackFrame::STUB)); |
152 // for code aging to work properly. | 152 // Adjust FP to point to saved FP. |
153 __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); | 153 __ add(fp, sp, Operand(2 * kPointerSize)); |
154 // Load undefined value here, so the value is ready for the loop | 154 } else { |
155 // below. | 155 PredictableCodeSizeScope predictible_code_size_scope( |
156 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 156 masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize); |
157 // Adjust FP to point to saved FP. | 157 // The following three instructions must remain together and unmodified |
158 __ add(fp, sp, Operand(2 * kPointerSize)); | 158 // for code aging to work properly. |
| 159 __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); |
| 160 // Load undefined value here, so the value is ready for the loop |
| 161 // below. |
| 162 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
| 163 // Adjust FP to point to saved FP. |
| 164 __ add(fp, sp, Operand(2 * kPointerSize)); |
| 165 } |
159 frame_is_built_ = true; | 166 frame_is_built_ = true; |
160 } | 167 } |
161 | 168 |
162 // Reserve space for the stack slots needed by the code. | 169 // Reserve space for the stack slots needed by the code. |
163 int slots = GetStackSlotCount(); | 170 int slots = GetStackSlotCount(); |
164 if (slots > 0) { | 171 if (slots > 0) { |
165 if (FLAG_debug_code) { | 172 if (FLAG_debug_code) { |
166 __ mov(r0, Operand(slots)); | 173 __ sub(sp, sp, Operand(slots * kPointerSize)); |
167 __ mov(r2, Operand(kSlotsZapValue)); | 174 __ push(r0); |
| 175 __ push(r1); |
| 176 __ add(r0, sp, Operand(slots * kPointerSize)); |
| 177 __ mov(r1, Operand(kSlotsZapValue)); |
168 Label loop; | 178 Label loop; |
169 __ bind(&loop); | 179 __ bind(&loop); |
170 __ push(r2); | 180 __ sub(r0, r0, Operand(kPointerSize)); |
171 __ sub(r0, r0, Operand(1), SetCC); | 181 __ str(r1, MemOperand(r0, 2 * kPointerSize)); |
| 182 __ cmp(r0, sp); |
172 __ b(ne, &loop); | 183 __ b(ne, &loop); |
| 184 __ pop(r1); |
| 185 __ pop(r0); |
173 } else { | 186 } else { |
174 __ sub(sp, sp, Operand(slots * kPointerSize)); | 187 __ sub(sp, sp, Operand(slots * kPointerSize)); |
175 } | 188 } |
176 } | 189 } |
177 | 190 |
| 191 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) { |
| 192 CpuFeatures::Scope scope(VFP2); |
| 193 Comment(";;; Save clobbered callee double registers"); |
| 194 int count = 0; |
| 195 BitVector* doubles = chunk()->allocated_double_registers(); |
| 196 BitVector::Iterator save_iterator(doubles); |
| 197 while (!save_iterator.Done()) { |
| 198 __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), |
| 199 MemOperand(sp, count * kDoubleSize)); |
| 200 save_iterator.Advance(); |
| 201 count++; |
| 202 } |
| 203 } |
| 204 |
178 // Possibly allocate a local context. | 205 // Possibly allocate a local context. |
179 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; | 206 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; |
180 if (heap_slots > 0) { | 207 if (heap_slots > 0) { |
181 Comment(";;; Allocate local context"); | 208 Comment(";;; Allocate local context"); |
182 // Argument to NewContext is the function, which is in r1. | 209 // Argument to NewContext is the function, which is in r1. |
183 __ push(r1); | 210 __ push(r1); |
184 if (heap_slots <= FastNewContextStub::kMaximumSlots) { | 211 if (heap_slots <= FastNewContextStub::kMaximumSlots) { |
185 FastNewContextStub stub(heap_slots); | 212 FastNewContextStub stub(heap_slots); |
186 __ CallStub(&stub); | 213 __ CallStub(&stub); |
187 } else { | 214 } else { |
(...skipping 2625 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2813 } | 2840 } |
2814 | 2841 |
2815 | 2842 |
2816 void LCodeGen::DoReturn(LReturn* instr) { | 2843 void LCodeGen::DoReturn(LReturn* instr) { |
2817 if (FLAG_trace && info()->IsOptimizing()) { | 2844 if (FLAG_trace && info()->IsOptimizing()) { |
2818 // Push the return value on the stack as the parameter. | 2845 // Push the return value on the stack as the parameter. |
2819 // Runtime::TraceExit returns its parameter in r0. | 2846 // Runtime::TraceExit returns its parameter in r0. |
2820 __ push(r0); | 2847 __ push(r0); |
2821 __ CallRuntime(Runtime::kTraceExit, 1); | 2848 __ CallRuntime(Runtime::kTraceExit, 1); |
2822 } | 2849 } |
| 2850 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) { |
| 2851 CpuFeatures::Scope scope(VFP2); |
| 2852 ASSERT(NeedsEagerFrame()); |
| 2853 BitVector* doubles = chunk()->allocated_double_registers(); |
| 2854 BitVector::Iterator save_iterator(doubles); |
| 2855 int count = 0; |
| 2856 while (!save_iterator.Done()) { |
| 2857 __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), |
| 2858 MemOperand(sp, count * kDoubleSize)); |
| 2859 save_iterator.Advance(); |
| 2860 count++; |
| 2861 } |
| 2862 } |
2823 if (NeedsEagerFrame()) { | 2863 if (NeedsEagerFrame()) { |
2824 int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize; | 2864 int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize; |
2825 __ mov(sp, fp); | 2865 __ mov(sp, fp); |
2826 __ ldm(ia_w, sp, fp.bit() | lr.bit()); | 2866 __ ldm(ia_w, sp, fp.bit() | lr.bit()); |
2827 __ add(sp, sp, Operand(sp_delta)); | 2867 if (!info()->IsStub()) { |
| 2868 __ add(sp, sp, Operand(sp_delta)); |
| 2869 } |
2828 } | 2870 } |
2829 __ Jump(lr); | 2871 __ Jump(lr); |
2830 } | 2872 } |
2831 | 2873 |
2832 | 2874 |
2833 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { | 2875 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { |
2834 Register result = ToRegister(instr->result()); | 2876 Register result = ToRegister(instr->result()); |
2835 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell()))); | 2877 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell()))); |
2836 __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); | 2878 __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); |
2837 if (instr->hydrogen()->RequiresHoleCheck()) { | 2879 if (instr->hydrogen()->RequiresHoleCheck()) { |
(...skipping 742 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3580 } | 3622 } |
3581 | 3623 |
3582 | 3624 |
3583 void LCodeGen::DoThisFunction(LThisFunction* instr) { | 3625 void LCodeGen::DoThisFunction(LThisFunction* instr) { |
3584 Register result = ToRegister(instr->result()); | 3626 Register result = ToRegister(instr->result()); |
3585 __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | 3627 __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
3586 } | 3628 } |
3587 | 3629 |
3588 | 3630 |
3589 void LCodeGen::DoContext(LContext* instr) { | 3631 void LCodeGen::DoContext(LContext* instr) { |
| 3632 // If there is a non-return use, the context must be moved to a register. |
3590 Register result = ToRegister(instr->result()); | 3633 Register result = ToRegister(instr->result()); |
3591 __ mov(result, cp); | 3634 for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) { |
| 3635 if (!it.value()->IsReturn()) { |
| 3636 __ mov(result, cp); |
| 3637 return; |
| 3638 } |
| 3639 } |
3592 } | 3640 } |
3593 | 3641 |
3594 | 3642 |
3595 void LCodeGen::DoOuterContext(LOuterContext* instr) { | 3643 void LCodeGen::DoOuterContext(LOuterContext* instr) { |
3596 Register context = ToRegister(instr->context()); | 3644 Register context = ToRegister(instr->context()); |
3597 Register result = ToRegister(instr->result()); | 3645 Register result = ToRegister(instr->result()); |
3598 __ ldr(result, | 3646 __ ldr(result, |
3599 MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX))); | 3647 MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
3600 } | 3648 } |
3601 | 3649 |
(...skipping 898 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4500 | 4548 |
4501 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode) | 4549 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode) |
4502 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() | 4550 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() |
4503 : isolate()->builtins()->KeyedStoreIC_Initialize(); | 4551 : isolate()->builtins()->KeyedStoreIC_Initialize(); |
4504 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); | 4552 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); |
4505 } | 4553 } |
4506 | 4554 |
4507 | 4555 |
4508 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { | 4556 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { |
4509 Register object_reg = ToRegister(instr->object()); | 4557 Register object_reg = ToRegister(instr->object()); |
4510 Register new_map_reg = ToRegister(instr->new_map_temp()); | |
4511 Register scratch = scratch0(); | 4558 Register scratch = scratch0(); |
4512 | 4559 |
4513 Handle<Map> from_map = instr->original_map(); | 4560 Handle<Map> from_map = instr->original_map(); |
4514 Handle<Map> to_map = instr->transitioned_map(); | 4561 Handle<Map> to_map = instr->transitioned_map(); |
4515 ElementsKind from_kind = instr->from_kind(); | 4562 ElementsKind from_kind = instr->from_kind(); |
4516 ElementsKind to_kind = instr->to_kind(); | 4563 ElementsKind to_kind = instr->to_kind(); |
4517 | 4564 |
4518 Label not_applicable; | 4565 Label not_applicable; |
4519 __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset)); | 4566 __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset)); |
4520 __ cmp(scratch, Operand(from_map)); | 4567 __ cmp(scratch, Operand(from_map)); |
4521 __ b(ne, ¬_applicable); | 4568 __ b(ne, ¬_applicable); |
4522 __ mov(new_map_reg, Operand(to_map)); | |
4523 | 4569 |
4524 if (IsSimpleMapChangeTransition(from_kind, to_kind)) { | 4570 if (IsSimpleMapChangeTransition(from_kind, to_kind)) { |
| 4571 Register new_map_reg = ToRegister(instr->new_map_temp()); |
| 4572 __ mov(new_map_reg, Operand(to_map)); |
4525 __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); | 4573 __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); |
4526 // Write barrier. | 4574 // Write barrier. |
4527 __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, | 4575 __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, |
4528 scratch, kLRHasBeenSaved, kDontSaveFPRegs); | 4576 scratch, kLRHasBeenSaved, kDontSaveFPRegs); |
| 4577 } else if (FLAG_compiled_transitions) { |
| 4578 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
| 4579 __ Move(r0, object_reg); |
| 4580 __ Move(r1, to_map); |
| 4581 TransitionElementsKindStub stub(from_kind, to_kind); |
| 4582 __ CallStub(&stub); |
| 4583 RecordSafepointWithRegisters( |
| 4584 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); |
4529 } else if (IsFastSmiElementsKind(from_kind) && | 4585 } else if (IsFastSmiElementsKind(from_kind) && |
4530 IsFastDoubleElementsKind(to_kind)) { | 4586 IsFastDoubleElementsKind(to_kind)) { |
4531 Register fixed_object_reg = ToRegister(instr->temp()); | 4587 Register fixed_object_reg = ToRegister(instr->temp()); |
4532 ASSERT(fixed_object_reg.is(r2)); | 4588 ASSERT(fixed_object_reg.is(r2)); |
| 4589 Register new_map_reg = ToRegister(instr->new_map_temp()); |
4533 ASSERT(new_map_reg.is(r3)); | 4590 ASSERT(new_map_reg.is(r3)); |
| 4591 __ mov(new_map_reg, Operand(to_map)); |
4534 __ mov(fixed_object_reg, object_reg); | 4592 __ mov(fixed_object_reg, object_reg); |
4535 CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(), | 4593 CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(), |
4536 RelocInfo::CODE_TARGET, instr); | 4594 RelocInfo::CODE_TARGET, instr); |
4537 } else if (IsFastDoubleElementsKind(from_kind) && | 4595 } else if (IsFastDoubleElementsKind(from_kind) && |
4538 IsFastObjectElementsKind(to_kind)) { | 4596 IsFastObjectElementsKind(to_kind)) { |
4539 Register fixed_object_reg = ToRegister(instr->temp()); | 4597 Register fixed_object_reg = ToRegister(instr->temp()); |
4540 ASSERT(fixed_object_reg.is(r2)); | 4598 ASSERT(fixed_object_reg.is(r2)); |
| 4599 Register new_map_reg = ToRegister(instr->new_map_temp()); |
4541 ASSERT(new_map_reg.is(r3)); | 4600 ASSERT(new_map_reg.is(r3)); |
| 4601 __ mov(new_map_reg, Operand(to_map)); |
4542 __ mov(fixed_object_reg, object_reg); | 4602 __ mov(fixed_object_reg, object_reg); |
4543 CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(), | 4603 CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(), |
4544 RelocInfo::CODE_TARGET, instr); | 4604 RelocInfo::CODE_TARGET, instr); |
4545 } else { | 4605 } else { |
4546 UNREACHABLE(); | 4606 UNREACHABLE(); |
4547 } | 4607 } |
4548 __ bind(¬_applicable); | 4608 __ bind(¬_applicable); |
4549 } | 4609 } |
4550 | 4610 |
4551 | 4611 |
| 4612 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { |
| 4613 Register object = ToRegister(instr->object()); |
| 4614 Register temp = ToRegister(instr->temp()); |
| 4615 __ TestJSArrayForAllocationSiteInfo(object, temp); |
| 4616 DeoptimizeIf(eq, instr->environment()); |
| 4617 } |
| 4618 |
| 4619 |
4552 void LCodeGen::DoStringAdd(LStringAdd* instr) { | 4620 void LCodeGen::DoStringAdd(LStringAdd* instr) { |
4553 __ push(ToRegister(instr->left())); | 4621 __ push(ToRegister(instr->left())); |
4554 __ push(ToRegister(instr->right())); | 4622 __ push(ToRegister(instr->right())); |
4555 StringAddStub stub(NO_STRING_CHECK_IN_STUB); | 4623 StringAddStub stub(NO_STRING_CHECK_IN_STUB); |
4556 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 4624 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
4557 } | 4625 } |
4558 | 4626 |
4559 | 4627 |
4560 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { | 4628 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { |
4561 class DeferredStringCharCodeAt: public LDeferredCode { | 4629 class DeferredStringCharCodeAt: public LDeferredCode { |
(...skipping 316 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4878 private: | 4946 private: |
4879 LNumberTagD* instr_; | 4947 LNumberTagD* instr_; |
4880 }; | 4948 }; |
4881 | 4949 |
4882 DwVfpRegister input_reg = ToDoubleRegister(instr->value()); | 4950 DwVfpRegister input_reg = ToDoubleRegister(instr->value()); |
4883 Register scratch = scratch0(); | 4951 Register scratch = scratch0(); |
4884 Register reg = ToRegister(instr->result()); | 4952 Register reg = ToRegister(instr->result()); |
4885 Register temp1 = ToRegister(instr->temp()); | 4953 Register temp1 = ToRegister(instr->temp()); |
4886 Register temp2 = ToRegister(instr->temp2()); | 4954 Register temp2 = ToRegister(instr->temp2()); |
4887 | 4955 |
| 4956 bool convert_hole = false; |
| 4957 HValue* change_input = instr->hydrogen()->value(); |
| 4958 if (change_input->IsLoadKeyed()) { |
| 4959 HLoadKeyed* load = HLoadKeyed::cast(change_input); |
| 4960 convert_hole = load->UsesMustHandleHole(); |
| 4961 } |
| 4962 |
| 4963 Label no_special_nan_handling; |
| 4964 Label done; |
| 4965 if (convert_hole) { |
| 4966 if (CpuFeatures::IsSupported(VFP2)) { |
| 4967 CpuFeatures::Scope scope(VFP2); |
| 4968 DwVfpRegister input_reg = ToDoubleRegister(instr->value()); |
| 4969 __ VFPCompareAndSetFlags(input_reg, input_reg); |
| 4970 __ b(vc, &no_special_nan_handling); |
| 4971 __ vmov(reg, scratch0(), input_reg); |
| 4972 __ cmp(scratch0(), Operand(kHoleNanUpper32)); |
| 4973 Label canonicalize; |
| 4974 __ b(ne, &canonicalize); |
| 4975 __ Move(reg, factory()->the_hole_value()); |
| 4976 __ b(&done); |
| 4977 __ bind(&canonicalize); |
| 4978 __ Vmov(input_reg, |
| 4979 FixedDoubleArray::canonical_not_the_hole_nan_as_double(), |
| 4980 no_reg); |
| 4981 } else { |
| 4982 Label not_hole; |
| 4983 __ cmp(sfpd_hi, Operand(kHoleNanUpper32)); |
| 4984 __ b(ne, ¬_hole); |
| 4985 __ Move(reg, factory()->the_hole_value()); |
| 4986 __ b(&done); |
| 4987 __ bind(¬_hole); |
| 4988 __ and_(scratch, sfpd_hi, Operand(0x7ff00000)); |
| 4989 __ cmp(scratch, Operand(0x7ff00000)); |
| 4990 __ b(ne, &no_special_nan_handling); |
| 4991 Label special_nan_handling; |
| 4992 __ tst(sfpd_hi, Operand(0x000FFFFF)); |
| 4993 __ b(ne, &special_nan_handling); |
| 4994 __ cmp(sfpd_lo, Operand(0)); |
| 4995 __ b(eq, &no_special_nan_handling); |
| 4996 __ bind(&special_nan_handling); |
| 4997 double canonical_nan = |
| 4998 FixedDoubleArray::canonical_not_the_hole_nan_as_double(); |
| 4999 uint64_t casted_nan = BitCast<uint64_t>(canonical_nan); |
| 5000 __ mov(sfpd_lo, |
| 5001 Operand(static_cast<uint32_t>(casted_nan & 0xFFFFFFFF))); |
| 5002 __ mov(sfpd_hi, |
| 5003 Operand(static_cast<uint32_t>(casted_nan >> 32))); |
| 5004 } |
| 5005 } |
| 5006 |
| 5007 __ bind(&no_special_nan_handling); |
4888 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); | 5008 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); |
4889 if (FLAG_inline_new) { | 5009 if (FLAG_inline_new) { |
4890 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); | 5010 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); |
4891 // We want the untagged address first for performance | 5011 // We want the untagged address first for performance |
4892 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(), | 5012 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(), |
4893 DONT_TAG_RESULT); | 5013 DONT_TAG_RESULT); |
4894 } else { | 5014 } else { |
4895 __ jmp(deferred->entry()); | 5015 __ jmp(deferred->entry()); |
4896 } | 5016 } |
4897 __ bind(deferred->exit()); | 5017 __ bind(deferred->exit()); |
4898 if (CpuFeatures::IsSupported(VFP2)) { | 5018 if (CpuFeatures::IsSupported(VFP2)) { |
4899 CpuFeatures::Scope scope(VFP2); | 5019 CpuFeatures::Scope scope(VFP2); |
4900 __ vstr(input_reg, reg, HeapNumber::kValueOffset); | 5020 __ vstr(input_reg, reg, HeapNumber::kValueOffset); |
4901 } else { | 5021 } else { |
4902 __ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset)); | 5022 __ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset)); |
4903 __ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize)); | 5023 __ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize)); |
4904 } | 5024 } |
4905 // Now that we have finished with the object's real address tag it | 5025 // Now that we have finished with the object's real address tag it |
4906 __ add(reg, reg, Operand(kHeapObjectTag)); | 5026 __ add(reg, reg, Operand(kHeapObjectTag)); |
| 5027 __ bind(&done); |
4907 } | 5028 } |
4908 | 5029 |
4909 | 5030 |
4910 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { | 5031 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { |
4911 // TODO(3095996): Get rid of this. For now, we need to make the | 5032 // TODO(3095996): Get rid of this. For now, we need to make the |
4912 // result register contain a valid pointer because it is already | 5033 // result register contain a valid pointer because it is already |
4913 // contained in the register pointer map. | 5034 // contained in the register pointer map. |
4914 Register reg = ToRegister(instr->result()); | 5035 Register reg = ToRegister(instr->result()); |
4915 __ mov(reg, Operand::Zero()); | 5036 __ mov(reg, Operand::Zero()); |
4916 | 5037 |
(...skipping 21 matching lines...) Expand all Loading... |
4938 } else { | 5059 } else { |
4939 __ SmiUntag(result, input); | 5060 __ SmiUntag(result, input); |
4940 } | 5061 } |
4941 } | 5062 } |
4942 | 5063 |
4943 | 5064 |
4944 void LCodeGen::EmitNumberUntagD(Register input_reg, | 5065 void LCodeGen::EmitNumberUntagD(Register input_reg, |
4945 DwVfpRegister result_reg, | 5066 DwVfpRegister result_reg, |
4946 bool deoptimize_on_undefined, | 5067 bool deoptimize_on_undefined, |
4947 bool deoptimize_on_minus_zero, | 5068 bool deoptimize_on_minus_zero, |
4948 LEnvironment* env) { | 5069 LEnvironment* env, |
| 5070 NumberUntagDMode mode) { |
4949 Register scratch = scratch0(); | 5071 Register scratch = scratch0(); |
4950 SwVfpRegister flt_scratch = double_scratch0().low(); | 5072 SwVfpRegister flt_scratch = double_scratch0().low(); |
4951 ASSERT(!result_reg.is(double_scratch0())); | 5073 ASSERT(!result_reg.is(double_scratch0())); |
4952 CpuFeatures::Scope scope(VFP2); | 5074 CpuFeatures::Scope scope(VFP2); |
4953 | 5075 |
4954 Label load_smi, heap_number, done; | 5076 Label load_smi, heap_number, done; |
4955 | 5077 |
4956 // Smi check. | 5078 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
4957 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); | 5079 // Smi check. |
| 5080 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); |
4958 | 5081 |
4959 // Heap number map check. | 5082 // Heap number map check. |
4960 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 5083 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
4961 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 5084 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
4962 __ cmp(scratch, Operand(ip)); | 5085 __ cmp(scratch, Operand(ip)); |
4963 if (deoptimize_on_undefined) { | 5086 if (deoptimize_on_undefined) { |
4964 DeoptimizeIf(ne, env); | 5087 DeoptimizeIf(ne, env); |
| 5088 } else { |
| 5089 Label heap_number; |
| 5090 __ b(eq, &heap_number); |
| 5091 |
| 5092 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
| 5093 __ cmp(input_reg, Operand(ip)); |
| 5094 DeoptimizeIf(ne, env); |
| 5095 |
| 5096 // Convert undefined to NaN. |
| 5097 __ LoadRoot(ip, Heap::kNanValueRootIndex); |
| 5098 __ sub(ip, ip, Operand(kHeapObjectTag)); |
| 5099 __ vldr(result_reg, ip, HeapNumber::kValueOffset); |
| 5100 __ jmp(&done); |
| 5101 |
| 5102 __ bind(&heap_number); |
| 5103 } |
| 5104 // Heap number to double register conversion. |
| 5105 __ sub(ip, input_reg, Operand(kHeapObjectTag)); |
| 5106 __ vldr(result_reg, ip, HeapNumber::kValueOffset); |
| 5107 if (deoptimize_on_minus_zero) { |
| 5108 __ vmov(ip, result_reg.low()); |
| 5109 __ cmp(ip, Operand::Zero()); |
| 5110 __ b(ne, &done); |
| 5111 __ vmov(ip, result_reg.high()); |
| 5112 __ cmp(ip, Operand(HeapNumber::kSignMask)); |
| 5113 DeoptimizeIf(eq, env); |
| 5114 } |
| 5115 __ jmp(&done); |
| 5116 } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) { |
| 5117 __ SmiUntag(scratch, input_reg, SetCC); |
| 5118 DeoptimizeIf(cs, env); |
| 5119 } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) { |
| 5120 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); |
| 5121 __ Vmov(result_reg, |
| 5122 FixedDoubleArray::hole_nan_as_double(), |
| 5123 no_reg); |
| 5124 __ b(&done); |
4965 } else { | 5125 } else { |
4966 Label heap_number; | 5126 __ SmiUntag(scratch, input_reg); |
4967 __ b(eq, &heap_number); | 5127 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); |
4968 | |
4969 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | |
4970 __ cmp(input_reg, Operand(ip)); | |
4971 DeoptimizeIf(ne, env); | |
4972 | |
4973 // Convert undefined to NaN. | |
4974 __ LoadRoot(ip, Heap::kNanValueRootIndex); | |
4975 __ sub(ip, ip, Operand(kHeapObjectTag)); | |
4976 __ vldr(result_reg, ip, HeapNumber::kValueOffset); | |
4977 __ jmp(&done); | |
4978 | |
4979 __ bind(&heap_number); | |
4980 } | 5128 } |
4981 // Heap number to double register conversion. | |
4982 __ sub(ip, input_reg, Operand(kHeapObjectTag)); | |
4983 __ vldr(result_reg, ip, HeapNumber::kValueOffset); | |
4984 if (deoptimize_on_minus_zero) { | |
4985 __ vmov(ip, result_reg.low()); | |
4986 __ cmp(ip, Operand::Zero()); | |
4987 __ b(ne, &done); | |
4988 __ vmov(ip, result_reg.high()); | |
4989 __ cmp(ip, Operand(HeapNumber::kSignMask)); | |
4990 DeoptimizeIf(eq, env); | |
4991 } | |
4992 __ jmp(&done); | |
4993 | 5129 |
4994 // Smi to double register conversion | 5130 // Smi to double register conversion |
4995 __ bind(&load_smi); | 5131 __ bind(&load_smi); |
4996 // scratch: untagged value of input_reg | 5132 // scratch: untagged value of input_reg |
4997 __ vmov(flt_scratch, scratch); | 5133 __ vmov(flt_scratch, scratch); |
4998 __ vcvt_f64_s32(result_reg, flt_scratch); | 5134 __ vcvt_f64_s32(result_reg, flt_scratch); |
4999 __ bind(&done); | 5135 __ bind(&done); |
5000 } | 5136 } |
5001 | 5137 |
5002 | 5138 |
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5110 | 5246 |
5111 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { | 5247 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { |
5112 LOperand* input = instr->value(); | 5248 LOperand* input = instr->value(); |
5113 ASSERT(input->IsRegister()); | 5249 ASSERT(input->IsRegister()); |
5114 LOperand* result = instr->result(); | 5250 LOperand* result = instr->result(); |
5115 ASSERT(result->IsDoubleRegister()); | 5251 ASSERT(result->IsDoubleRegister()); |
5116 | 5252 |
5117 Register input_reg = ToRegister(input); | 5253 Register input_reg = ToRegister(input); |
5118 DwVfpRegister result_reg = ToDoubleRegister(result); | 5254 DwVfpRegister result_reg = ToDoubleRegister(result); |
5119 | 5255 |
| 5256 NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED; |
| 5257 HValue* value = instr->hydrogen()->value(); |
| 5258 if (value->type().IsSmi()) { |
| 5259 if (value->IsLoadKeyed()) { |
| 5260 HLoadKeyed* load = HLoadKeyed::cast(value); |
| 5261 if (load->UsesMustHandleHole()) { |
| 5262 if (load->hole_mode() == ALLOW_RETURN_HOLE) { |
| 5263 mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE; |
| 5264 } else { |
| 5265 mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE; |
| 5266 } |
| 5267 } else { |
| 5268 mode = NUMBER_CANDIDATE_IS_SMI; |
| 5269 } |
| 5270 } |
| 5271 } |
| 5272 |
5120 EmitNumberUntagD(input_reg, result_reg, | 5273 EmitNumberUntagD(input_reg, result_reg, |
5121 instr->hydrogen()->deoptimize_on_undefined(), | 5274 instr->hydrogen()->deoptimize_on_undefined(), |
5122 instr->hydrogen()->deoptimize_on_minus_zero(), | 5275 instr->hydrogen()->deoptimize_on_minus_zero(), |
5123 instr->environment()); | 5276 instr->environment(), |
| 5277 mode); |
5124 } | 5278 } |
5125 | 5279 |
5126 | 5280 |
5127 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { | 5281 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { |
5128 Register result_reg = ToRegister(instr->result()); | 5282 Register result_reg = ToRegister(instr->result()); |
5129 Register scratch1 = scratch0(); | 5283 Register scratch1 = scratch0(); |
5130 Register scratch2 = ToRegister(instr->temp()); | 5284 Register scratch2 = ToRegister(instr->temp()); |
5131 DwVfpRegister double_input = ToDoubleRegister(instr->value()); | 5285 DwVfpRegister double_input = ToDoubleRegister(instr->value()); |
5132 DwVfpRegister double_scratch = double_scratch0(); | 5286 DwVfpRegister double_scratch = double_scratch0(); |
5133 | 5287 |
(...skipping 280 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5414 __ mov(result, Operand::Zero()); | 5568 __ mov(result, Operand::Zero()); |
5415 | 5569 |
5416 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | 5570 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
5417 __ mov(r0, Operand(Smi::FromInt(instance_size))); | 5571 __ mov(r0, Operand(Smi::FromInt(instance_size))); |
5418 __ push(r0); | 5572 __ push(r0); |
5419 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr); | 5573 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr); |
5420 __ StoreToSafepointRegisterSlot(r0, result); | 5574 __ StoreToSafepointRegisterSlot(r0, result); |
5421 } | 5575 } |
5422 | 5576 |
5423 | 5577 |
| 5578 void LCodeGen::DoAllocate(LAllocate* instr) { |
| 5579 class DeferredAllocate: public LDeferredCode { |
| 5580 public: |
| 5581 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) |
| 5582 : LDeferredCode(codegen), instr_(instr) { } |
| 5583 virtual void Generate() { codegen()->DoDeferredAllocate(instr_); } |
| 5584 virtual LInstruction* instr() { return instr_; } |
| 5585 private: |
| 5586 LAllocate* instr_; |
| 5587 }; |
| 5588 |
| 5589 DeferredAllocate* deferred = |
| 5590 new(zone()) DeferredAllocate(this, instr); |
| 5591 |
| 5592 Register size = ToRegister(instr->size()); |
| 5593 Register result = ToRegister(instr->result()); |
| 5594 Register scratch = ToRegister(instr->temp1()); |
| 5595 Register scratch2 = ToRegister(instr->temp2()); |
| 5596 |
| 5597 HAllocate* original_instr = instr->hydrogen(); |
| 5598 if (original_instr->size()->IsConstant()) { |
| 5599 UNREACHABLE(); |
| 5600 } else { |
| 5601 // Allocate memory for the object. |
| 5602 AllocationFlags flags = TAG_OBJECT; |
| 5603 if (original_instr->MustAllocateDoubleAligned()) { |
| 5604 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); |
| 5605 } |
| 5606 __ AllocateInNewSpace(size, |
| 5607 result, |
| 5608 scratch, |
| 5609 scratch2, |
| 5610 deferred->entry(), |
| 5611 TAG_OBJECT); |
| 5612 } |
| 5613 |
| 5614 __ bind(deferred->exit()); |
| 5615 } |
| 5616 |
| 5617 |
| 5618 void LCodeGen::DoDeferredAllocate(LAllocate* instr) { |
| 5619 Register size = ToRegister(instr->size()); |
| 5620 Register result = ToRegister(instr->result()); |
| 5621 |
| 5622 // TODO(3095996): Get rid of this. For now, we need to make the |
| 5623 // result register contain a valid pointer because it is already |
| 5624 // contained in the register pointer map. |
| 5625 __ mov(result, Operand(Smi::FromInt(0))); |
| 5626 |
| 5627 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
| 5628 __ SmiTag(size, size); |
| 5629 __ push(size); |
| 5630 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr); |
| 5631 __ StoreToSafepointRegisterSlot(r0, result); |
| 5632 } |
| 5633 |
| 5634 |
5424 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { | 5635 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { |
5425 Handle<FixedArray> literals(instr->environment()->closure()->literals()); | 5636 Handle<FixedArray> literals(instr->environment()->closure()->literals()); |
5426 ElementsKind boilerplate_elements_kind = | 5637 ElementsKind boilerplate_elements_kind = |
5427 instr->hydrogen()->boilerplate_elements_kind(); | 5638 instr->hydrogen()->boilerplate_elements_kind(); |
5428 AllocationSiteMode allocation_site_mode = | 5639 AllocationSiteMode allocation_site_mode = |
5429 instr->hydrogen()->allocation_site_mode(); | 5640 instr->hydrogen()->allocation_site_mode(); |
5430 | 5641 |
5431 // Deopt if the array literal boilerplate ElementsKind is of a type different | 5642 // Deopt if the array literal boilerplate ElementsKind is of a type different |
5432 // than the expected one. The check isn't necessary if the boilerplate has | 5643 // than the expected one. The check isn't necessary if the boilerplate has |
5433 // already been converted to TERMINAL_FAST_ELEMENTS_KIND. | 5644 // already been converted to TERMINAL_FAST_ELEMENTS_KIND. |
(...skipping 671 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6105 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); | 6316 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); |
6106 __ ldr(result, FieldMemOperand(scratch, | 6317 __ ldr(result, FieldMemOperand(scratch, |
6107 FixedArray::kHeaderSize - kPointerSize)); | 6318 FixedArray::kHeaderSize - kPointerSize)); |
6108 __ bind(&done); | 6319 __ bind(&done); |
6109 } | 6320 } |
6110 | 6321 |
6111 | 6322 |
6112 #undef __ | 6323 #undef __ |
6113 | 6324 |
6114 } } // namespace v8::internal | 6325 } } // namespace v8::internal |
OLD | NEW |