Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(139)

Side by Side Diff: src/arm/lithium-codegen-arm.cc

Issue 11659022: Generate the TransitionElementsStub using Crankshaft (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Review feedback Created 7 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after
139 __ b(eq, &ok); 139 __ b(eq, &ok);
140 int receiver_offset = scope()->num_parameters() * kPointerSize; 140 int receiver_offset = scope()->num_parameters() * kPointerSize;
141 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); 141 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
142 __ str(r2, MemOperand(sp, receiver_offset)); 142 __ str(r2, MemOperand(sp, receiver_offset));
143 __ bind(&ok); 143 __ bind(&ok);
144 } 144 }
145 } 145 }
146 146
147 info()->set_prologue_offset(masm_->pc_offset()); 147 info()->set_prologue_offset(masm_->pc_offset());
148 if (NeedsEagerFrame()) { 148 if (NeedsEagerFrame()) {
149 PredictableCodeSizeScope predictible_code_size_scope( 149 if (info()->IsStub()) {
150 masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize); 150 __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
151 // The following three instructions must remain together and unmodified 151 __ Push(Smi::FromInt(StackFrame::STUB));
152 // for code aging to work properly. 152 // Adjust FP to point to saved FP.
153 __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); 153 __ add(fp, sp, Operand(2 * kPointerSize));
154 // Load undefined value here, so the value is ready for the loop 154 } else {
155 // below. 155 PredictableCodeSizeScope predictible_code_size_scope(
156 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 156 masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
157 // Adjust FP to point to saved FP. 157 // The following three instructions must remain together and unmodified
158 __ add(fp, sp, Operand(2 * kPointerSize)); 158 // for code aging to work properly.
159 __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
160 // Load undefined value here, so the value is ready for the loop
161 // below.
162 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
163 // Adjust FP to point to saved FP.
164 __ add(fp, sp, Operand(2 * kPointerSize));
165 }
159 frame_is_built_ = true; 166 frame_is_built_ = true;
160 } 167 }
161 168
162 // Reserve space for the stack slots needed by the code. 169 // Reserve space for the stack slots needed by the code.
163 int slots = GetStackSlotCount(); 170 int slots = GetStackSlotCount();
164 if (slots > 0) { 171 if (slots > 0) {
165 if (FLAG_debug_code) { 172 if (FLAG_debug_code) {
166 __ mov(r0, Operand(slots)); 173 __ sub(sp, sp, Operand(slots * kPointerSize));
167 __ mov(r2, Operand(kSlotsZapValue)); 174 __ push(r0);
175 __ push(r1);
176 __ add(r0, sp, Operand(slots * kPointerSize));
177 __ mov(r1, Operand(kSlotsZapValue));
168 Label loop; 178 Label loop;
169 __ bind(&loop); 179 __ bind(&loop);
170 __ push(r2); 180 __ sub(r0, r0, Operand(kPointerSize));
171 __ sub(r0, r0, Operand(1), SetCC); 181 __ str(r1, MemOperand(r0, 2 * kPointerSize));
182 __ cmp(r0, sp);
172 __ b(ne, &loop); 183 __ b(ne, &loop);
184 __ pop(r1);
185 __ pop(r0);
173 } else { 186 } else {
174 __ sub(sp, sp, Operand(slots * kPointerSize)); 187 __ sub(sp, sp, Operand(slots * kPointerSize));
175 } 188 }
176 } 189 }
177 190
191 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) {
192 CpuFeatures::Scope scope(VFP2);
193 Comment(";;; Save clobbered callee double registers");
194 int count = 0;
195 BitVector* doubles = chunk()->allocated_double_registers();
196 BitVector::Iterator save_iterator(doubles);
197 while (!save_iterator.Done()) {
198 __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
199 MemOperand(sp, count * kDoubleSize));
200 save_iterator.Advance();
201 count++;
202 }
203 }
204
178 // Possibly allocate a local context. 205 // Possibly allocate a local context.
179 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 206 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
180 if (heap_slots > 0) { 207 if (heap_slots > 0) {
181 Comment(";;; Allocate local context"); 208 Comment(";;; Allocate local context");
182 // Argument to NewContext is the function, which is in r1. 209 // Argument to NewContext is the function, which is in r1.
183 __ push(r1); 210 __ push(r1);
184 if (heap_slots <= FastNewContextStub::kMaximumSlots) { 211 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
185 FastNewContextStub stub(heap_slots); 212 FastNewContextStub stub(heap_slots);
186 __ CallStub(&stub); 213 __ CallStub(&stub);
187 } else { 214 } else {
(...skipping 2625 matching lines...) Expand 10 before | Expand all | Expand 10 after
2813 } 2840 }
2814 2841
2815 2842
2816 void LCodeGen::DoReturn(LReturn* instr) { 2843 void LCodeGen::DoReturn(LReturn* instr) {
2817 if (FLAG_trace && info()->IsOptimizing()) { 2844 if (FLAG_trace && info()->IsOptimizing()) {
2818 // Push the return value on the stack as the parameter. 2845 // Push the return value on the stack as the parameter.
2819 // Runtime::TraceExit returns its parameter in r0. 2846 // Runtime::TraceExit returns its parameter in r0.
2820 __ push(r0); 2847 __ push(r0);
2821 __ CallRuntime(Runtime::kTraceExit, 1); 2848 __ CallRuntime(Runtime::kTraceExit, 1);
2822 } 2849 }
2850 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) {
2851 CpuFeatures::Scope scope(VFP2);
2852 ASSERT(NeedsEagerFrame());
2853 BitVector* doubles = chunk()->allocated_double_registers();
2854 BitVector::Iterator save_iterator(doubles);
2855 int count = 0;
2856 while (!save_iterator.Done()) {
2857 __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
2858 MemOperand(sp, count * kDoubleSize));
2859 save_iterator.Advance();
2860 count++;
2861 }
2862 }
2823 if (NeedsEagerFrame()) { 2863 if (NeedsEagerFrame()) {
2824 int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize; 2864 int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
2825 __ mov(sp, fp); 2865 __ mov(sp, fp);
2826 __ ldm(ia_w, sp, fp.bit() | lr.bit()); 2866 __ ldm(ia_w, sp, fp.bit() | lr.bit());
2827 __ add(sp, sp, Operand(sp_delta)); 2867 if (!info()->IsStub()) {
2868 __ add(sp, sp, Operand(sp_delta));
2869 }
2828 } 2870 }
2829 __ Jump(lr); 2871 __ Jump(lr);
2830 } 2872 }
2831 2873
2832 2874
2833 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { 2875 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2834 Register result = ToRegister(instr->result()); 2876 Register result = ToRegister(instr->result());
2835 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell()))); 2877 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
2836 __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); 2878 __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
2837 if (instr->hydrogen()->RequiresHoleCheck()) { 2879 if (instr->hydrogen()->RequiresHoleCheck()) {
(...skipping 742 matching lines...) Expand 10 before | Expand all | Expand 10 after
3580 } 3622 }
3581 3623
3582 3624
3583 void LCodeGen::DoThisFunction(LThisFunction* instr) { 3625 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3584 Register result = ToRegister(instr->result()); 3626 Register result = ToRegister(instr->result());
3585 __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); 3627 __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3586 } 3628 }
3587 3629
3588 3630
3589 void LCodeGen::DoContext(LContext* instr) { 3631 void LCodeGen::DoContext(LContext* instr) {
3632 // If there is a non-return use, the context must be moved to a register.
3590 Register result = ToRegister(instr->result()); 3633 Register result = ToRegister(instr->result());
3591 __ mov(result, cp); 3634 for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) {
3635 if (!it.value()->IsReturn()) {
3636 __ mov(result, cp);
3637 return;
3638 }
3639 }
3592 } 3640 }
3593 3641
3594 3642
3595 void LCodeGen::DoOuterContext(LOuterContext* instr) { 3643 void LCodeGen::DoOuterContext(LOuterContext* instr) {
3596 Register context = ToRegister(instr->context()); 3644 Register context = ToRegister(instr->context());
3597 Register result = ToRegister(instr->result()); 3645 Register result = ToRegister(instr->result());
3598 __ ldr(result, 3646 __ ldr(result,
3599 MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX))); 3647 MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3600 } 3648 }
3601 3649
(...skipping 898 matching lines...) Expand 10 before | Expand all | Expand 10 after
4500 4548
4501 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode) 4549 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
4502 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() 4550 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4503 : isolate()->builtins()->KeyedStoreIC_Initialize(); 4551 : isolate()->builtins()->KeyedStoreIC_Initialize();
4504 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); 4552 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4505 } 4553 }
4506 4554
4507 4555
4508 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { 4556 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4509 Register object_reg = ToRegister(instr->object()); 4557 Register object_reg = ToRegister(instr->object());
4510 Register new_map_reg = ToRegister(instr->new_map_temp());
4511 Register scratch = scratch0(); 4558 Register scratch = scratch0();
4512 4559
4513 Handle<Map> from_map = instr->original_map(); 4560 Handle<Map> from_map = instr->original_map();
4514 Handle<Map> to_map = instr->transitioned_map(); 4561 Handle<Map> to_map = instr->transitioned_map();
4515 ElementsKind from_kind = instr->from_kind(); 4562 ElementsKind from_kind = instr->from_kind();
4516 ElementsKind to_kind = instr->to_kind(); 4563 ElementsKind to_kind = instr->to_kind();
4517 4564
4518 Label not_applicable; 4565 Label not_applicable;
4519 __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset)); 4566 __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4520 __ cmp(scratch, Operand(from_map)); 4567 __ cmp(scratch, Operand(from_map));
4521 __ b(ne, &not_applicable); 4568 __ b(ne, &not_applicable);
4522 __ mov(new_map_reg, Operand(to_map));
4523 4569
4524 if (IsSimpleMapChangeTransition(from_kind, to_kind)) { 4570 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4571 Register new_map_reg = ToRegister(instr->new_map_temp());
4572 __ mov(new_map_reg, Operand(to_map));
4525 __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); 4573 __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4526 // Write barrier. 4574 // Write barrier.
4527 __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, 4575 __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
4528 scratch, kLRHasBeenSaved, kDontSaveFPRegs); 4576 scratch, kLRHasBeenSaved, kDontSaveFPRegs);
4577 } else if (FLAG_compiled_transitions) {
4578 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4579 if (!object_reg.is(r0)) {
Rodolph Perfetta 2013/01/31 20:05:55 Move(r0, object_reg);
danno 2013/02/01 11:37:35 Done.
danno 2013/02/01 11:37:35 Done.
4580 __ mov(r0, object_reg);
4581 }
4582 __ Move(r1, to_map);
4583 TransitionElementsKindStub stub(from_kind, to_kind);
4584 __ CallStub(&stub);
4585 RecordSafepointWithRegisters(
4586 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4529 } else if (IsFastSmiElementsKind(from_kind) && 4587 } else if (IsFastSmiElementsKind(from_kind) &&
4530 IsFastDoubleElementsKind(to_kind)) { 4588 IsFastDoubleElementsKind(to_kind)) {
4531 Register fixed_object_reg = ToRegister(instr->temp()); 4589 Register fixed_object_reg = ToRegister(instr->temp());
4532 ASSERT(fixed_object_reg.is(r2)); 4590 ASSERT(fixed_object_reg.is(r2));
4591 Register new_map_reg = ToRegister(instr->new_map_temp());
4533 ASSERT(new_map_reg.is(r3)); 4592 ASSERT(new_map_reg.is(r3));
4593 __ mov(new_map_reg, Operand(to_map));
4534 __ mov(fixed_object_reg, object_reg); 4594 __ mov(fixed_object_reg, object_reg);
4535 CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(), 4595 CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
4536 RelocInfo::CODE_TARGET, instr); 4596 RelocInfo::CODE_TARGET, instr);
4537 } else if (IsFastDoubleElementsKind(from_kind) && 4597 } else if (IsFastDoubleElementsKind(from_kind) &&
4538 IsFastObjectElementsKind(to_kind)) { 4598 IsFastObjectElementsKind(to_kind)) {
4539 Register fixed_object_reg = ToRegister(instr->temp()); 4599 Register fixed_object_reg = ToRegister(instr->temp());
4540 ASSERT(fixed_object_reg.is(r2)); 4600 ASSERT(fixed_object_reg.is(r2));
4601 Register new_map_reg = ToRegister(instr->new_map_temp());
4541 ASSERT(new_map_reg.is(r3)); 4602 ASSERT(new_map_reg.is(r3));
4603 __ mov(new_map_reg, Operand(to_map));
4542 __ mov(fixed_object_reg, object_reg); 4604 __ mov(fixed_object_reg, object_reg);
4543 CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(), 4605 CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
4544 RelocInfo::CODE_TARGET, instr); 4606 RelocInfo::CODE_TARGET, instr);
4545 } else { 4607 } else {
4546 UNREACHABLE(); 4608 UNREACHABLE();
4547 } 4609 }
4548 __ bind(&not_applicable); 4610 __ bind(&not_applicable);
4549 } 4611 }
4550 4612
4551 4613
4614 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4615 Register object = ToRegister(instr->object());
4616 Register temp = ToRegister(instr->temp());
4617 __ TestJSArrayForAllocationSiteInfo(object, temp);
4618 DeoptimizeIf(eq, instr->environment());
4619 }
4620
4621
4552 void LCodeGen::DoStringAdd(LStringAdd* instr) { 4622 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4553 __ push(ToRegister(instr->left())); 4623 __ push(ToRegister(instr->left()));
4554 __ push(ToRegister(instr->right())); 4624 __ push(ToRegister(instr->right()));
4555 StringAddStub stub(NO_STRING_CHECK_IN_STUB); 4625 StringAddStub stub(NO_STRING_CHECK_IN_STUB);
4556 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4626 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4557 } 4627 }
4558 4628
4559 4629
4560 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { 4630 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4561 class DeferredStringCharCodeAt: public LDeferredCode { 4631 class DeferredStringCharCodeAt: public LDeferredCode {
(...skipping 316 matching lines...) Expand 10 before | Expand all | Expand 10 after
4878 private: 4948 private:
4879 LNumberTagD* instr_; 4949 LNumberTagD* instr_;
4880 }; 4950 };
4881 4951
4882 DwVfpRegister input_reg = ToDoubleRegister(instr->value()); 4952 DwVfpRegister input_reg = ToDoubleRegister(instr->value());
4883 Register scratch = scratch0(); 4953 Register scratch = scratch0();
4884 Register reg = ToRegister(instr->result()); 4954 Register reg = ToRegister(instr->result());
4885 Register temp1 = ToRegister(instr->temp()); 4955 Register temp1 = ToRegister(instr->temp());
4886 Register temp2 = ToRegister(instr->temp2()); 4956 Register temp2 = ToRegister(instr->temp2());
4887 4957
4958 bool convert_hole = false;
4959 HValue* change_input = instr->hydrogen()->value();
4960 if (change_input->IsLoadKeyed()) {
4961 HLoadKeyed* load = HLoadKeyed::cast(change_input);
4962 convert_hole = load->CanReturnHole();
4963 }
4964
4965 Label no_special_nan_handling;
4966 Label done;
4967 if (convert_hole) {
4968 if (CpuFeatures::IsSupported(VFP2)) {
4969 CpuFeatures::Scope scope(VFP2);
4970 DwVfpRegister input_reg = ToDoubleRegister(instr->value());
4971 __ VFPCompareAndSetFlags(input_reg, input_reg);
4972 __ b(vc, &no_special_nan_handling);
4973 __ vmov(reg, scratch0(), input_reg);
4974 __ cmp(scratch0(), Operand(kHoleNanUpper32));
4975 Label canonicalize;
4976 __ b(ne, &canonicalize);
4977 __ Move(reg, factory()->the_hole_value());
4978 __ b(&done);
4979 __ bind(&canonicalize);
4980 __ Vmov(input_reg,
4981 FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
4982 no_reg, vs);
Rodolph Perfetta 2013/01/31 20:05:55 Did you intend to make this vmov conditional?
danno 2013/02/01 11:37:35 Done.
danno 2013/02/01 11:37:35 Done.
4983 } else {
4984 Label not_hole;
4985 __ cmp(sfpd_hi, Operand(kHoleNanUpper32));
4986 __ b(ne, &not_hole);
4987 __ Move(reg, factory()->the_hole_value());
4988 __ b(&done);
4989 __ bind(&not_hole);
4990 __ and_(scratch, sfpd_hi, Operand(0x7ff00000));
4991 __ cmp(scratch, Operand(0x7ff00000));
4992 __ b(ne, &no_special_nan_handling);
4993 Label special_nan_handling;
4994 __ tst(sfpd_hi, Operand(0x000FFFFF));
4995 __ b(ne, &special_nan_handling);
4996 __ cmp(sfpd_lo, Operand(0));
4997 __ b(eq, &no_special_nan_handling);
4998 __ bind(&special_nan_handling);
4999 double canonical_nan =
5000 FixedDoubleArray::canonical_not_the_hole_nan_as_double();
5001 uint64_t casted_nan = BitCast<uint64_t>(canonical_nan);
5002 __ mov(sfpd_lo,
5003 Operand(static_cast<uint32_t>(casted_nan & 0xFFFFFFFF)));
5004 __ mov(sfpd_hi,
5005 Operand(static_cast<uint32_t>(casted_nan >> 32)));
5006 }
5007 }
5008
5009 __ bind(&no_special_nan_handling);
4888 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); 5010 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4889 if (FLAG_inline_new) { 5011 if (FLAG_inline_new) {
4890 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); 5012 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4891 // We want the untagged address first for performance 5013 // We want the untagged address first for performance
4892 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(), 5014 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4893 DONT_TAG_RESULT); 5015 DONT_TAG_RESULT);
4894 } else { 5016 } else {
4895 __ jmp(deferred->entry()); 5017 __ jmp(deferred->entry());
4896 } 5018 }
4897 __ bind(deferred->exit()); 5019 __ bind(deferred->exit());
4898 if (CpuFeatures::IsSupported(VFP2)) { 5020 if (CpuFeatures::IsSupported(VFP2)) {
4899 CpuFeatures::Scope scope(VFP2); 5021 CpuFeatures::Scope scope(VFP2);
4900 __ vstr(input_reg, reg, HeapNumber::kValueOffset); 5022 __ vstr(input_reg, reg, HeapNumber::kValueOffset);
4901 } else { 5023 } else {
4902 __ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset)); 5024 __ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset));
4903 __ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize)); 5025 __ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize));
4904 } 5026 }
4905 // Now that we have finished with the object's real address tag it 5027 // Now that we have finished with the object's real address tag it
4906 __ add(reg, reg, Operand(kHeapObjectTag)); 5028 __ add(reg, reg, Operand(kHeapObjectTag));
5029 __ bind(&done);
4907 } 5030 }
4908 5031
4909 5032
4910 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 5033 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4911 // TODO(3095996): Get rid of this. For now, we need to make the 5034 // TODO(3095996): Get rid of this. For now, we need to make the
4912 // result register contain a valid pointer because it is already 5035 // result register contain a valid pointer because it is already
4913 // contained in the register pointer map. 5036 // contained in the register pointer map.
4914 Register reg = ToRegister(instr->result()); 5037 Register reg = ToRegister(instr->result());
4915 __ mov(reg, Operand::Zero()); 5038 __ mov(reg, Operand::Zero());
4916 5039
(...skipping 21 matching lines...) Expand all
4938 } else { 5061 } else {
4939 __ SmiUntag(result, input); 5062 __ SmiUntag(result, input);
4940 } 5063 }
4941 } 5064 }
4942 5065
4943 5066
4944 void LCodeGen::EmitNumberUntagD(Register input_reg, 5067 void LCodeGen::EmitNumberUntagD(Register input_reg,
4945 DwVfpRegister result_reg, 5068 DwVfpRegister result_reg,
4946 bool deoptimize_on_undefined, 5069 bool deoptimize_on_undefined,
4947 bool deoptimize_on_minus_zero, 5070 bool deoptimize_on_minus_zero,
4948 LEnvironment* env) { 5071 LEnvironment* env,
5072 NumberUntagDMode mode) {
4949 Register scratch = scratch0(); 5073 Register scratch = scratch0();
4950 SwVfpRegister flt_scratch = double_scratch0().low(); 5074 SwVfpRegister flt_scratch = double_scratch0().low();
4951 ASSERT(!result_reg.is(double_scratch0())); 5075 ASSERT(!result_reg.is(double_scratch0()));
4952 CpuFeatures::Scope scope(VFP2); 5076 CpuFeatures::Scope scope(VFP2);
4953 5077
4954 Label load_smi, heap_number, done; 5078 Label load_smi, heap_number, done;
4955 5079
4956 // Smi check. 5080 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4957 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); 5081 // Smi check.
5082 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4958 5083
4959 // Heap number map check. 5084 // Heap number map check.
4960 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 5085 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4961 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 5086 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4962 __ cmp(scratch, Operand(ip)); 5087 __ cmp(scratch, Operand(ip));
4963 if (deoptimize_on_undefined) { 5088 if (deoptimize_on_undefined) {
4964 DeoptimizeIf(ne, env); 5089 DeoptimizeIf(ne, env);
5090 } else {
5091 Label heap_number;
5092 __ b(eq, &heap_number);
5093
5094 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5095 __ cmp(input_reg, Operand(ip));
5096 DeoptimizeIf(ne, env);
5097
5098 // Convert undefined to NaN.
5099 __ LoadRoot(ip, Heap::kNanValueRootIndex);
5100 __ sub(ip, ip, Operand(kHeapObjectTag));
5101 __ vldr(result_reg, ip, HeapNumber::kValueOffset);
5102 __ jmp(&done);
5103
5104 __ bind(&heap_number);
5105 }
5106 // Heap number to double register conversion.
5107 __ sub(ip, input_reg, Operand(kHeapObjectTag));
5108 __ vldr(result_reg, ip, HeapNumber::kValueOffset);
5109 if (deoptimize_on_minus_zero) {
5110 __ vmov(ip, result_reg.low());
5111 __ cmp(ip, Operand::Zero());
5112 __ b(ne, &done);
5113 __ vmov(ip, result_reg.high());
5114 __ cmp(ip, Operand(HeapNumber::kSignMask));
5115 DeoptimizeIf(eq, env);
5116 }
5117 __ jmp(&done);
5118 } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
5119 __ SmiUntag(scratch, input_reg, SetCC);
5120 DeoptimizeIf(cs, env);
5121 } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
5122 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
5123 __ Vmov(result_reg,
5124 FixedDoubleArray::hole_nan_as_double(),
5125 no_reg, vs);
Rodolph Perfetta 2013/01/31 20:05:55 same here, did you mean vmovvs?
danno 2013/02/01 11:37:35 Done.
5126 __ b(&done);
4965 } else { 5127 } else {
4966 Label heap_number; 5128 __ SmiUntag(scratch, input_reg);
4967 __ b(eq, &heap_number); 5129 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
4968
4969 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4970 __ cmp(input_reg, Operand(ip));
4971 DeoptimizeIf(ne, env);
4972
4973 // Convert undefined to NaN.
4974 __ LoadRoot(ip, Heap::kNanValueRootIndex);
4975 __ sub(ip, ip, Operand(kHeapObjectTag));
4976 __ vldr(result_reg, ip, HeapNumber::kValueOffset);
4977 __ jmp(&done);
4978
4979 __ bind(&heap_number);
4980 } 5130 }
4981 // Heap number to double register conversion.
4982 __ sub(ip, input_reg, Operand(kHeapObjectTag));
4983 __ vldr(result_reg, ip, HeapNumber::kValueOffset);
4984 if (deoptimize_on_minus_zero) {
4985 __ vmov(ip, result_reg.low());
4986 __ cmp(ip, Operand::Zero());
4987 __ b(ne, &done);
4988 __ vmov(ip, result_reg.high());
4989 __ cmp(ip, Operand(HeapNumber::kSignMask));
4990 DeoptimizeIf(eq, env);
4991 }
4992 __ jmp(&done);
4993 5131
4994 // Smi to double register conversion 5132 // Smi to double register conversion
4995 __ bind(&load_smi); 5133 __ bind(&load_smi);
4996 // scratch: untagged value of input_reg 5134 // scratch: untagged value of input_reg
4997 __ vmov(flt_scratch, scratch); 5135 __ vmov(flt_scratch, scratch);
4998 __ vcvt_f64_s32(result_reg, flt_scratch); 5136 __ vcvt_f64_s32(result_reg, flt_scratch);
4999 __ bind(&done); 5137 __ bind(&done);
5000 } 5138 }
5001 5139
5002 5140
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
5110 5248
5111 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 5249 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5112 LOperand* input = instr->value(); 5250 LOperand* input = instr->value();
5113 ASSERT(input->IsRegister()); 5251 ASSERT(input->IsRegister());
5114 LOperand* result = instr->result(); 5252 LOperand* result = instr->result();
5115 ASSERT(result->IsDoubleRegister()); 5253 ASSERT(result->IsDoubleRegister());
5116 5254
5117 Register input_reg = ToRegister(input); 5255 Register input_reg = ToRegister(input);
5118 DwVfpRegister result_reg = ToDoubleRegister(result); 5256 DwVfpRegister result_reg = ToDoubleRegister(result);
5119 5257
5258 NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
5259 HValue* value = instr->hydrogen()->value();
5260 if (value->type().IsSmi()) {
5261 if (value->IsLoadKeyed()) {
5262 HLoadKeyed* load = HLoadKeyed::cast(value);
5263 if (load->CanReturnHole()) {
5264 if (load->hole_mode() == ALLOW_RETURN_HOLE) {
5265 mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
5266 } else {
5267 mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
5268 }
5269 } else {
5270 mode = NUMBER_CANDIDATE_IS_SMI;
5271 }
5272 }
5273 }
5274
5120 EmitNumberUntagD(input_reg, result_reg, 5275 EmitNumberUntagD(input_reg, result_reg,
5121 instr->hydrogen()->deoptimize_on_undefined(), 5276 instr->hydrogen()->deoptimize_on_undefined(),
5122 instr->hydrogen()->deoptimize_on_minus_zero(), 5277 instr->hydrogen()->deoptimize_on_minus_zero(),
5123 instr->environment()); 5278 instr->environment(),
5279 mode);
5124 } 5280 }
5125 5281
5126 5282
5127 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 5283 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5128 Register result_reg = ToRegister(instr->result()); 5284 Register result_reg = ToRegister(instr->result());
5129 Register scratch1 = scratch0(); 5285 Register scratch1 = scratch0();
5130 Register scratch2 = ToRegister(instr->temp()); 5286 Register scratch2 = ToRegister(instr->temp());
5131 DwVfpRegister double_input = ToDoubleRegister(instr->value()); 5287 DwVfpRegister double_input = ToDoubleRegister(instr->value());
5132 DwVfpRegister double_scratch = double_scratch0(); 5288 DwVfpRegister double_scratch = double_scratch0();
5133 5289
(...skipping 280 matching lines...) Expand 10 before | Expand all | Expand 10 after
5414 __ mov(result, Operand::Zero()); 5570 __ mov(result, Operand::Zero());
5415 5571
5416 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 5572 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5417 __ mov(r0, Operand(Smi::FromInt(instance_size))); 5573 __ mov(r0, Operand(Smi::FromInt(instance_size)));
5418 __ push(r0); 5574 __ push(r0);
5419 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr); 5575 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
5420 __ StoreToSafepointRegisterSlot(r0, result); 5576 __ StoreToSafepointRegisterSlot(r0, result);
5421 } 5577 }
5422 5578
5423 5579
5580 void LCodeGen::DoAllocate(LAllocate* instr) {
5581 class DeferredAllocate: public LDeferredCode {
5582 public:
5583 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5584 : LDeferredCode(codegen), instr_(instr) { }
5585 virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
5586 virtual LInstruction* instr() { return instr_; }
5587 private:
5588 LAllocate* instr_;
5589 };
5590
5591 DeferredAllocate* deferred =
5592 new(zone()) DeferredAllocate(this, instr);
5593
5594 Register size = ToRegister(instr->size());
5595 Register result = ToRegister(instr->result());
5596 Register scratch = ToRegister(instr->temp1());
5597 Register scratch2 = ToRegister(instr->temp2());
5598
5599 HAllocate* original_instr = instr->hydrogen();
5600 if (original_instr->size()->IsConstant()) {
5601 UNREACHABLE();
5602 } else {
5603 // Allocate memory for the object.
5604 AllocationFlags flags = TAG_OBJECT;
5605 if (original_instr->MustAllocateDoubleAligned()) {
5606 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5607 }
5608 __ AllocateInNewSpace(size,
5609 result,
5610 scratch,
5611 scratch2,
5612 deferred->entry(),
5613 TAG_OBJECT);
5614 }
5615
5616 __ bind(deferred->exit());
5617 }
5618
5619
5620 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5621 Register size = ToRegister(instr->size());
5622 Register result = ToRegister(instr->result());
5623
5624 // TODO(3095996): Get rid of this. For now, we need to make the
5625 // result register contain a valid pointer because it is already
5626 // contained in the register pointer map.
5627 __ mov(result, Operand(Smi::FromInt(0)));
5628
5629 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5630 __ SmiTag(size, size);
5631 __ push(size);
5632 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
5633 __ StoreToSafepointRegisterSlot(r0, result);
5634 }
5635
5636
5424 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { 5637 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
5425 Handle<FixedArray> literals(instr->environment()->closure()->literals()); 5638 Handle<FixedArray> literals(instr->environment()->closure()->literals());
5426 ElementsKind boilerplate_elements_kind = 5639 ElementsKind boilerplate_elements_kind =
5427 instr->hydrogen()->boilerplate_elements_kind(); 5640 instr->hydrogen()->boilerplate_elements_kind();
5428 AllocationSiteMode allocation_site_mode = 5641 AllocationSiteMode allocation_site_mode =
5429 instr->hydrogen()->allocation_site_mode(); 5642 instr->hydrogen()->allocation_site_mode();
5430 5643
5431 // Deopt if the array literal boilerplate ElementsKind is of a type different 5644 // Deopt if the array literal boilerplate ElementsKind is of a type different
5432 // than the expected one. The check isn't necessary if the boilerplate has 5645 // than the expected one. The check isn't necessary if the boilerplate has
5433 // already been converted to TERMINAL_FAST_ELEMENTS_KIND. 5646 // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
(...skipping 671 matching lines...) Expand 10 before | Expand all | Expand 10 after
6105 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); 6318 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
6106 __ ldr(result, FieldMemOperand(scratch, 6319 __ ldr(result, FieldMemOperand(scratch,
6107 FixedArray::kHeaderSize - kPointerSize)); 6320 FixedArray::kHeaderSize - kPointerSize));
6108 __ bind(&done); 6321 __ bind(&done);
6109 } 6322 }
6110 6323
6111 6324
6112 #undef __ 6325 #undef __
6113 6326
6114 } } // namespace v8::internal 6327 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/arm/lithium-codegen-arm.h ('k') | src/arm/macro-assembler-arm.h » ('j') | src/hydrogen.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698