Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(75)

Side by Side Diff: src/ia32/lithium-codegen-ia32.cc

Issue 11659022: Generate the TransitionElementsStub using Crankshaft (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Address review feedback Created 7 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/lithium-codegen-ia32.h ('k') | src/ia32/lithium-ia32.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
70 bool LCodeGen::GenerateCode() { 70 bool LCodeGen::GenerateCode() {
71 HPhase phase("Z_Code generation", chunk()); 71 HPhase phase("Z_Code generation", chunk());
72 ASSERT(is_unused()); 72 ASSERT(is_unused());
73 status_ = GENERATING; 73 status_ = GENERATING;
74 74
75 // Open a frame scope to indicate that there is a frame on the stack. The 75 // Open a frame scope to indicate that there is a frame on the stack. The
76 // MANUAL indicates that the scope shouldn't actually generate code to set up 76 // MANUAL indicates that the scope shouldn't actually generate code to set up
77 // the frame (that is done in GeneratePrologue). 77 // the frame (that is done in GeneratePrologue).
78 FrameScope frame_scope(masm_, StackFrame::MANUAL); 78 FrameScope frame_scope(masm_, StackFrame::MANUAL);
79 79
80 support_aligned_spilled_doubles_ = info()->IsOptimizing();
81
80 dynamic_frame_alignment_ = info()->IsOptimizing() && 82 dynamic_frame_alignment_ = info()->IsOptimizing() &&
81 ((chunk()->num_double_slots() > 2 && 83 ((chunk()->num_double_slots() > 2 &&
82 !chunk()->graph()->is_recursive()) || 84 !chunk()->graph()->is_recursive()) ||
83 !info()->osr_ast_id().IsNone()); 85 !info()->osr_ast_id().IsNone());
84 86
85 return GeneratePrologue() && 87 return GeneratePrologue() &&
86 GenerateBody() && 88 GenerateBody() &&
87 GenerateDeferredCode() && 89 GenerateDeferredCode() &&
88 GenerateJumpTable() && 90 GenerateJumpTable() &&
89 GenerateSafepointTable(); 91 GenerateSafepointTable();
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
146 Label ok; 148 Label ok;
147 __ test(ecx, Operand(ecx)); 149 __ test(ecx, Operand(ecx));
148 __ j(zero, &ok, Label::kNear); 150 __ j(zero, &ok, Label::kNear);
149 // +1 for return address. 151 // +1 for return address.
150 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize; 152 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
151 __ mov(Operand(esp, receiver_offset), 153 __ mov(Operand(esp, receiver_offset),
152 Immediate(isolate()->factory()->undefined_value())); 154 Immediate(isolate()->factory()->undefined_value()));
153 __ bind(&ok); 155 __ bind(&ok);
154 } 156 }
155 157
156 if (dynamic_frame_alignment_) { 158 if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
157 // Move state of dynamic frame alignment into edx. 159 // Move state of dynamic frame alignment into edx.
158 __ mov(edx, Immediate(kNoAlignmentPadding)); 160 __ mov(edx, Immediate(kNoAlignmentPadding));
159 161
160 Label do_not_pad, align_loop; 162 Label do_not_pad, align_loop;
161 STATIC_ASSERT(kDoubleSize == 2 * kPointerSize); 163 STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
162 // Align esp + 4 to a multiple of 2 * kPointerSize. 164 // Align esp + 4 to a multiple of 2 * kPointerSize.
163 __ test(esp, Immediate(kPointerSize)); 165 __ test(esp, Immediate(kPointerSize));
164 __ j(not_zero, &do_not_pad, Label::kNear); 166 __ j(not_zero, &do_not_pad, Label::kNear);
165 __ push(Immediate(0)); 167 __ push(Immediate(0));
166 __ mov(ebx, esp); 168 __ mov(ebx, esp);
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
205 ASSERT(slots != 0 || !info()->IsOptimizing()); 207 ASSERT(slots != 0 || !info()->IsOptimizing());
206 if (slots > 0) { 208 if (slots > 0) {
207 if (slots == 1) { 209 if (slots == 1) {
208 if (dynamic_frame_alignment_) { 210 if (dynamic_frame_alignment_) {
209 __ push(edx); 211 __ push(edx);
210 } else { 212 } else {
211 __ push(Immediate(kNoAlignmentPadding)); 213 __ push(Immediate(kNoAlignmentPadding));
212 } 214 }
213 } else { 215 } else {
214 if (FLAG_debug_code) { 216 if (FLAG_debug_code) {
217 __ sub(Operand(esp), Immediate(slots * kPointerSize));
218 __ push(eax);
215 __ mov(Operand(eax), Immediate(slots)); 219 __ mov(Operand(eax), Immediate(slots));
216 Label loop; 220 Label loop;
217 __ bind(&loop); 221 __ bind(&loop);
218 __ push(Immediate(kSlotsZapValue)); 222 __ mov(MemOperand(esp, eax, times_4, 0),
223 Immediate(kSlotsZapValue));
219 __ dec(eax); 224 __ dec(eax);
220 __ j(not_zero, &loop); 225 __ j(not_zero, &loop);
226 __ pop(eax);
221 } else { 227 } else {
222 __ sub(Operand(esp), Immediate(slots * kPointerSize)); 228 __ sub(Operand(esp), Immediate(slots * kPointerSize));
223 #ifdef _MSC_VER 229 #ifdef _MSC_VER
224 // On windows, you may not access the stack more than one page below 230 // On windows, you may not access the stack more than one page below
225 // the most recently mapped page. To make the allocated area randomly 231 // the most recently mapped page. To make the allocated area randomly
226 // accessible, we write to each page in turn (the value is irrelevant). 232 // accessible, we write to each page in turn (the value is irrelevant).
227 const int kPageSize = 4 * KB; 233 const int kPageSize = 4 * KB;
228 for (int offset = slots * kPointerSize - kPageSize; 234 for (int offset = slots * kPointerSize - kPageSize;
229 offset > 0; 235 offset > 0;
230 offset -= kPageSize) { 236 offset -= kPageSize) {
231 __ mov(Operand(esp, offset), eax); 237 __ mov(Operand(esp, offset), eax);
232 } 238 }
233 #endif 239 #endif
234 } 240 }
235 241
236 // Store dynamic frame alignment state in the first local. 242 if (support_aligned_spilled_doubles_) {
237 if (dynamic_frame_alignment_) { 243 Comment(";;; Store dynamic frame alignment tag for spilled doubles");
238 __ mov(Operand(ebp, 244 // Store dynamic frame alignment state in the first local.
239 JavaScriptFrameConstants::kDynamicAlignmentStateOffset), 245 int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
240 edx); 246 if (dynamic_frame_alignment_) {
241 } else { 247 __ mov(Operand(ebp, offset), edx);
242 __ mov(Operand(ebp, 248 } else {
243 JavaScriptFrameConstants::kDynamicAlignmentStateOffset), 249 __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
244 Immediate(kNoAlignmentPadding)); 250 }
251 }
252 }
253
254 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
255 Comment(";;; Save clobbered callee double registers");
256 CpuFeatures::Scope scope(SSE2);
257 int count = 0;
258 BitVector* doubles = chunk()->allocated_double_registers();
259 BitVector::Iterator save_iterator(doubles);
260 while (!save_iterator.Done()) {
261 __ movdbl(MemOperand(esp, count * kDoubleSize),
262 XMMRegister::FromAllocationIndex(save_iterator.Current()));
263 save_iterator.Advance();
264 count++;
245 } 265 }
246 } 266 }
247 } 267 }
248 268
249 // Possibly allocate a local context. 269 // Possibly allocate a local context.
250 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 270 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
251 if (heap_slots > 0) { 271 if (heap_slots > 0) {
252 Comment(";;; Allocate local context"); 272 Comment(";;; Allocate local context");
253 // Argument to NewContext is the function, which is still in edi. 273 // Argument to NewContext is the function, which is still in edi.
254 __ push(edi); 274 __ push(edi);
(...skipping 478 matching lines...) Expand 10 before | Expand all | Expand 10 after
733 RecordPosition(pointers->position()); 753 RecordPosition(pointers->position());
734 754
735 __ CallRuntime(fun, argc); 755 __ CallRuntime(fun, argc);
736 756
737 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 757 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
738 758
739 ASSERT(info()->is_calling()); 759 ASSERT(info()->is_calling());
740 } 760 }
741 761
742 762
743 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, 763 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
744 int argc,
745 LInstruction* instr,
746 LOperand* context) {
747 if (context->IsRegister()) { 764 if (context->IsRegister()) {
748 if (!ToRegister(context).is(esi)) { 765 if (!ToRegister(context).is(esi)) {
749 __ mov(esi, ToRegister(context)); 766 __ mov(esi, ToRegister(context));
750 } 767 }
751 } else if (context->IsStackSlot()) { 768 } else if (context->IsStackSlot()) {
752 __ mov(esi, ToOperand(context)); 769 __ mov(esi, ToOperand(context));
753 } else if (context->IsConstantOperand()) { 770 } else if (context->IsConstantOperand()) {
754 HConstant* constant = 771 HConstant* constant =
755 chunk_->LookupConstant(LConstantOperand::cast(context)); 772 chunk_->LookupConstant(LConstantOperand::cast(context));
756 __ LoadHeapObject(esi, Handle<Context>::cast(constant->handle())); 773 __ LoadHeapObject(esi, Handle<Context>::cast(constant->handle()));
757 } else { 774 } else {
758 UNREACHABLE(); 775 UNREACHABLE();
759 } 776 }
777 }
778
779 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
780 int argc,
781 LInstruction* instr,
782 LOperand* context) {
783 LoadContextFromDeferred(context);
760 784
761 __ CallRuntimeSaveDoubles(id); 785 __ CallRuntimeSaveDoubles(id);
762 RecordSafepointWithRegisters( 786 RecordSafepointWithRegisters(
763 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); 787 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
764 788
765 ASSERT(info()->is_calling()); 789 ASSERT(info()->is_calling());
766 } 790 }
767 791
768 792
769 void LCodeGen::RegisterEnvironmentForDeoptimization( 793 void LCodeGen::RegisterEnvironmentForDeoptimization(
(...skipping 1867 matching lines...) Expand 10 before | Expand all | Expand 10 after
2637 void LCodeGen::DoReturn(LReturn* instr) { 2661 void LCodeGen::DoReturn(LReturn* instr) {
2638 if (FLAG_trace && info()->IsOptimizing()) { 2662 if (FLAG_trace && info()->IsOptimizing()) {
2639 // Preserve the return value on the stack and rely on the runtime call 2663 // Preserve the return value on the stack and rely on the runtime call
2640 // to return the value in the same register. We're leaving the code 2664 // to return the value in the same register. We're leaving the code
2641 // managed by the register allocator and tearing down the frame, it's 2665 // managed by the register allocator and tearing down the frame, it's
2642 // safe to write to the context register. 2666 // safe to write to the context register.
2643 __ push(eax); 2667 __ push(eax);
2644 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 2668 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2645 __ CallRuntime(Runtime::kTraceExit, 1); 2669 __ CallRuntime(Runtime::kTraceExit, 1);
2646 } 2670 }
2671 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
2672 ASSERT(NeedsEagerFrame());
2673 CpuFeatures::Scope scope(SSE2);
2674 BitVector* doubles = chunk()->allocated_double_registers();
2675 BitVector::Iterator save_iterator(doubles);
2676 int count = 0;
2677 while (!save_iterator.Done()) {
2678 __ movdbl(XMMRegister::FromAllocationIndex(save_iterator.Current()),
2679 MemOperand(esp, count * kDoubleSize));
2680 save_iterator.Advance();
2681 count++;
2682 }
2683 }
2647 if (dynamic_frame_alignment_) { 2684 if (dynamic_frame_alignment_) {
2648 // Fetch the state of the dynamic frame alignment. 2685 // Fetch the state of the dynamic frame alignment.
2649 __ mov(edx, Operand(ebp, 2686 __ mov(edx, Operand(ebp,
2650 JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); 2687 JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
2651 } 2688 }
2652 if (NeedsEagerFrame()) { 2689 if (NeedsEagerFrame()) {
2653 __ mov(esp, ebp); 2690 __ mov(esp, ebp);
2654 __ pop(ebp); 2691 __ pop(ebp);
2655 } 2692 }
2656 if (dynamic_frame_alignment_) { 2693 if (dynamic_frame_alignment_) {
(...skipping 1641 matching lines...) Expand 10 before | Expand all | Expand 10 after
4298 ASSERT(ToRegister(instr->key()).is(ecx)); 4335 ASSERT(ToRegister(instr->key()).is(ecx));
4299 ASSERT(ToRegister(instr->value()).is(eax)); 4336 ASSERT(ToRegister(instr->value()).is(eax));
4300 4337
4301 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode) 4338 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
4302 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() 4339 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4303 : isolate()->builtins()->KeyedStoreIC_Initialize(); 4340 : isolate()->builtins()->KeyedStoreIC_Initialize();
4304 CallCode(ic, RelocInfo::CODE_TARGET, instr); 4341 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4305 } 4342 }
4306 4343
4307 4344
4345 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4346 Register object = ToRegister(instr->object());
4347 Register temp = ToRegister(instr->temp());
4348 __ TestJSArrayForAllocationSiteInfo(object, temp);
4349 DeoptimizeIf(equal, instr->environment());
4350 }
4351
4352
4308 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { 4353 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4309 Register object_reg = ToRegister(instr->object()); 4354 Register object_reg = ToRegister(instr->object());
4310 Register new_map_reg = ToRegister(instr->new_map_temp());
4311 4355
4312 Handle<Map> from_map = instr->original_map(); 4356 Handle<Map> from_map = instr->original_map();
4313 Handle<Map> to_map = instr->transitioned_map(); 4357 Handle<Map> to_map = instr->transitioned_map();
4314 ElementsKind from_kind = instr->from_kind(); 4358 ElementsKind from_kind = instr->from_kind();
4315 ElementsKind to_kind = instr->to_kind(); 4359 ElementsKind to_kind = instr->to_kind();
4316 4360
4317 Label not_applicable; 4361 Label not_applicable;
4318 bool is_simple_map_transition = 4362 bool is_simple_map_transition =
4319 IsSimpleMapChangeTransition(from_kind, to_kind); 4363 IsSimpleMapChangeTransition(from_kind, to_kind);
4320 Label::Distance branch_distance = 4364 Label::Distance branch_distance =
4321 is_simple_map_transition ? Label::kNear : Label::kFar; 4365 is_simple_map_transition ? Label::kNear : Label::kFar;
4322 __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map); 4366 __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4323 __ j(not_equal, &not_applicable, branch_distance); 4367 __ j(not_equal, &not_applicable, branch_distance);
4324 if (is_simple_map_transition) { 4368 if (is_simple_map_transition) {
4325 Register object_reg = ToRegister(instr->object()); 4369 Register new_map_reg = ToRegister(instr->new_map_temp());
4326 Handle<Map> map = instr->hydrogen()->transitioned_map(); 4370 Handle<Map> map = instr->hydrogen()->transitioned_map();
4327 __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), 4371 __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
4328 Immediate(map)); 4372 Immediate(map));
4329 // Write barrier. 4373 // Write barrier.
4330 ASSERT_NE(instr->temp(), NULL); 4374 ASSERT_NE(instr->temp(), NULL);
4331 __ RecordWriteForMap(object_reg, to_map, new_map_reg, 4375 __ RecordWriteForMap(object_reg, to_map, new_map_reg,
4332 ToRegister(instr->temp()), 4376 ToRegister(instr->temp()),
4333 kDontSaveFPRegs); 4377 kDontSaveFPRegs);
4378 } else if (FLAG_compiled_transitions) {
4379 PushSafepointRegistersScope scope(this);
4380 if (!object_reg.is(eax)) {
4381 __ push(object_reg);
4382 }
4383 LoadContextFromDeferred(instr->context());
4384 if (!object_reg.is(eax)) {
4385 __ pop(eax);
4386 }
4387 __ mov(ebx, to_map);
4388 TransitionElementsKindStub stub(from_kind, to_kind);
4389 __ CallStub(&stub);
4390 RecordSafepointWithRegisters(
4391 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4334 } else if (IsFastSmiElementsKind(from_kind) && 4392 } else if (IsFastSmiElementsKind(from_kind) &&
4335 IsFastDoubleElementsKind(to_kind)) { 4393 IsFastDoubleElementsKind(to_kind)) {
4394 Register new_map_reg = ToRegister(instr->new_map_temp());
4336 __ mov(new_map_reg, to_map); 4395 __ mov(new_map_reg, to_map);
4337 Register fixed_object_reg = ToRegister(instr->temp()); 4396 Register fixed_object_reg = ToRegister(instr->temp());
4338 ASSERT(fixed_object_reg.is(edx)); 4397 ASSERT(fixed_object_reg.is(edx));
4339 ASSERT(new_map_reg.is(ebx)); 4398 ASSERT(new_map_reg.is(ebx));
4340 __ mov(fixed_object_reg, object_reg); 4399 __ mov(fixed_object_reg, object_reg);
4341 CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(), 4400 CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
4342 RelocInfo::CODE_TARGET, instr); 4401 RelocInfo::CODE_TARGET, instr);
4343 } else if (IsFastDoubleElementsKind(from_kind) && 4402 } else if (IsFastDoubleElementsKind(from_kind) &&
4344 IsFastObjectElementsKind(to_kind)) { 4403 IsFastObjectElementsKind(to_kind)) {
4404 Register new_map_reg = ToRegister(instr->new_map_temp());
4345 __ mov(new_map_reg, to_map); 4405 __ mov(new_map_reg, to_map);
4346 Register fixed_object_reg = ToRegister(instr->temp()); 4406 Register fixed_object_reg = ToRegister(instr->temp());
4347 ASSERT(fixed_object_reg.is(edx)); 4407 ASSERT(fixed_object_reg.is(edx));
4348 ASSERT(new_map_reg.is(ebx)); 4408 ASSERT(new_map_reg.is(ebx));
4349 __ mov(fixed_object_reg, object_reg); 4409 __ mov(fixed_object_reg, object_reg);
4350 CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(), 4410 CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
4351 RelocInfo::CODE_TARGET, instr); 4411 RelocInfo::CODE_TARGET, instr);
4352 } else { 4412 } else {
4353 UNREACHABLE(); 4413 UNREACHABLE();
4354 } 4414 }
(...skipping 276 matching lines...) Expand 10 before | Expand all | Expand 10 after
4631 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) 4691 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4632 : LDeferredCode(codegen), instr_(instr) { } 4692 : LDeferredCode(codegen), instr_(instr) { }
4633 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } 4693 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4634 virtual LInstruction* instr() { return instr_; } 4694 virtual LInstruction* instr() { return instr_; }
4635 private: 4695 private:
4636 LNumberTagD* instr_; 4696 LNumberTagD* instr_;
4637 }; 4697 };
4638 4698
4639 Register reg = ToRegister(instr->result()); 4699 Register reg = ToRegister(instr->result());
4640 4700
4701 bool convert_hole = false;
4702 HValue* change_input = instr->hydrogen()->value();
4703 if (change_input->IsLoadKeyed()) {
4704 HLoadKeyed* load = HLoadKeyed::cast(change_input);
4705 convert_hole = load->UsesMustHandleHole();
4706 }
4707
4708 Label no_special_nan_handling;
4709 Label done;
4710 if (convert_hole) {
4711 bool use_sse2 = CpuFeatures::IsSupported(SSE2);
4712 if (use_sse2) {
4713 CpuFeatures::Scope scope(SSE2);
4714 XMMRegister input_reg = ToDoubleRegister(instr->value());
4715 __ ucomisd(input_reg, input_reg);
4716 } else {
4717 if (!IsX87TopOfStack(instr->value())) {
4718 __ fld_d(ToOperand(instr->value()));
4719 }
4720 __ fld(0);
4721 __ fld(0);
4722 __ FCmp();
4723 }
4724
4725 __ j(parity_odd, &no_special_nan_handling);
4726 __ sub(esp, Immediate(kDoubleSize));
4727 if (use_sse2) {
4728 CpuFeatures::Scope scope(SSE2);
4729 XMMRegister input_reg = ToDoubleRegister(instr->value());
4730 __ movdbl(MemOperand(esp, 0), input_reg);
4731 } else {
4732 __ fld(0);
4733 __ fstp_d(MemOperand(esp, 0));
4734 }
4735 __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
4736 Immediate(kHoleNanUpper32));
4737 Label canonicalize;
4738 __ j(not_equal, &canonicalize);
4739 __ add(esp, Immediate(kDoubleSize));
4740 __ mov(reg, factory()->the_hole_value());
4741 __ jmp(&done);
4742 __ bind(&canonicalize);
4743 __ add(esp, Immediate(kDoubleSize));
4744 ExternalReference nan =
4745 ExternalReference::address_of_canonical_non_hole_nan();
4746 if (use_sse2) {
4747 CpuFeatures::Scope scope(SSE2);
4748 XMMRegister input_reg = ToDoubleRegister(instr->value());
4749 __ movdbl(input_reg, Operand::StaticVariable(nan));
4750 } else {
4751 __ fstp(0);
4752 __ fld_d(Operand::StaticVariable(nan));
4753 }
4754 }
4755
4756 __ bind(&no_special_nan_handling);
4641 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); 4757 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4642 if (FLAG_inline_new) { 4758 if (FLAG_inline_new) {
4643 Register tmp = ToRegister(instr->temp()); 4759 Register tmp = ToRegister(instr->temp());
4644 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); 4760 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
4645 } else { 4761 } else {
4646 __ jmp(deferred->entry()); 4762 __ jmp(deferred->entry());
4647 } 4763 }
4648 __ bind(deferred->exit()); 4764 __ bind(deferred->exit());
4649 if (CpuFeatures::IsSupported(SSE2)) { 4765 if (CpuFeatures::IsSupported(SSE2)) {
4650 CpuFeatures::Scope scope(SSE2); 4766 CpuFeatures::Scope scope(SSE2);
4651 XMMRegister input_reg = ToDoubleRegister(instr->value()); 4767 XMMRegister input_reg = ToDoubleRegister(instr->value());
4652 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); 4768 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
4653 } else { 4769 } else {
4654 if (!IsX87TopOfStack(instr->value())) { 4770 if (!IsX87TopOfStack(instr->value())) {
4655 __ fld_d(ToOperand(instr->value())); 4771 __ fld_d(ToOperand(instr->value()));
4656 } 4772 }
4657 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); 4773 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
4658 } 4774 }
4775 __ bind(&done);
4659 } 4776 }
4660 4777
4661 4778
4662 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 4779 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4663 // TODO(3095996): Get rid of this. For now, we need to make the 4780 // TODO(3095996): Get rid of this. For now, we need to make the
4664 // result register contain a valid pointer because it is already 4781 // result register contain a valid pointer because it is already
4665 // contained in the register pointer map. 4782 // contained in the register pointer map.
4666 Register reg = ToRegister(instr->result()); 4783 Register reg = ToRegister(instr->result());
4667 __ Set(reg, Immediate(0)); 4784 __ Set(reg, Immediate(0));
4668 4785
(...skipping 30 matching lines...) Expand all
4699 } 4816 }
4700 __ SmiUntag(ToRegister(input)); 4817 __ SmiUntag(ToRegister(input));
4701 } 4818 }
4702 4819
4703 4820
4704 void LCodeGen::EmitNumberUntagD(Register input_reg, 4821 void LCodeGen::EmitNumberUntagD(Register input_reg,
4705 Register temp_reg, 4822 Register temp_reg,
4706 XMMRegister result_reg, 4823 XMMRegister result_reg,
4707 bool deoptimize_on_undefined, 4824 bool deoptimize_on_undefined,
4708 bool deoptimize_on_minus_zero, 4825 bool deoptimize_on_minus_zero,
4709 LEnvironment* env) { 4826 LEnvironment* env,
4827 NumberUntagDMode mode) {
4710 Label load_smi, done; 4828 Label load_smi, done;
4711 4829
4712 // Smi check. 4830 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4713 __ JumpIfSmi(input_reg, &load_smi, Label::kNear); 4831 // Smi check.
4832 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
4714 4833
4715 // Heap number map check. 4834 // Heap number map check.
4716 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 4835 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4717 factory()->heap_number_map()); 4836 factory()->heap_number_map());
4718 if (deoptimize_on_undefined) { 4837 if (deoptimize_on_undefined) {
4838 DeoptimizeIf(not_equal, env);
4839 } else {
4840 Label heap_number;
4841 __ j(equal, &heap_number, Label::kNear);
4842
4843 __ cmp(input_reg, factory()->undefined_value());
4844 DeoptimizeIf(not_equal, env);
4845
4846 // Convert undefined to NaN.
4847 ExternalReference nan =
4848 ExternalReference::address_of_canonical_non_hole_nan();
4849 __ movdbl(result_reg, Operand::StaticVariable(nan));
4850 __ jmp(&done, Label::kNear);
4851
4852 __ bind(&heap_number);
4853 }
4854 // Heap number to XMM conversion.
4855 __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
4856 if (deoptimize_on_minus_zero) {
4857 XMMRegister xmm_scratch = xmm0;
4858 __ xorps(xmm_scratch, xmm_scratch);
4859 __ ucomisd(result_reg, xmm_scratch);
4860 __ j(not_zero, &done, Label::kNear);
4861 __ movmskpd(temp_reg, result_reg);
4862 __ test_b(temp_reg, 1);
4863 DeoptimizeIf(not_zero, env);
4864 }
4865 __ jmp(&done, Label::kNear);
4866 } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
4867 __ test(input_reg, Immediate(kSmiTagMask));
4719 DeoptimizeIf(not_equal, env); 4868 DeoptimizeIf(not_equal, env);
4869 } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
4870 __ test(input_reg, Immediate(kSmiTagMask));
4871 __ j(zero, &load_smi);
4872 ExternalReference hole_nan_reference =
4873 ExternalReference::address_of_the_hole_nan();
4874 __ movdbl(result_reg, Operand::StaticVariable(hole_nan_reference));
4875 __ jmp(&done, Label::kNear);
4720 } else { 4876 } else {
4721 Label heap_number; 4877 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
4722 __ j(equal, &heap_number, Label::kNear);
4723
4724 __ cmp(input_reg, factory()->undefined_value());
4725 DeoptimizeIf(not_equal, env);
4726
4727 // Convert undefined to NaN.
4728 ExternalReference nan =
4729 ExternalReference::address_of_canonical_non_hole_nan();
4730 __ movdbl(result_reg, Operand::StaticVariable(nan));
4731 __ jmp(&done, Label::kNear);
4732
4733 __ bind(&heap_number);
4734 } 4878 }
4735 // Heap number to XMM conversion.
4736 __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
4737 if (deoptimize_on_minus_zero) {
4738 XMMRegister xmm_scratch = xmm0;
4739 __ xorps(xmm_scratch, xmm_scratch);
4740 __ ucomisd(result_reg, xmm_scratch);
4741 __ j(not_zero, &done, Label::kNear);
4742 __ movmskpd(temp_reg, result_reg);
4743 __ test_b(temp_reg, 1);
4744 DeoptimizeIf(not_zero, env);
4745 }
4746 __ jmp(&done, Label::kNear);
4747 4879
4748 // Smi to XMM conversion 4880 // Smi to XMM conversion
4749 __ bind(&load_smi); 4881 __ bind(&load_smi);
4750 __ SmiUntag(input_reg); // Untag smi before converting to float. 4882 __ SmiUntag(input_reg); // Untag smi before converting to float.
4751 __ cvtsi2sd(result_reg, Operand(input_reg)); 4883 __ cvtsi2sd(result_reg, Operand(input_reg));
4752 __ SmiTag(input_reg); // Retag smi. 4884 __ SmiTag(input_reg); // Retag smi.
4753 __ bind(&done); 4885 __ bind(&done);
4754 } 4886 }
4755 4887
4756 4888
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
4882 5014
4883 if (CpuFeatures::IsSupported(SSE2)) { 5015 if (CpuFeatures::IsSupported(SSE2)) {
4884 CpuFeatures::Scope scope(SSE2); 5016 CpuFeatures::Scope scope(SSE2);
4885 Register input_reg = ToRegister(input); 5017 Register input_reg = ToRegister(input);
4886 XMMRegister result_reg = ToDoubleRegister(result); 5018 XMMRegister result_reg = ToDoubleRegister(result);
4887 5019
4888 bool deoptimize_on_minus_zero = 5020 bool deoptimize_on_minus_zero =
4889 instr->hydrogen()->deoptimize_on_minus_zero(); 5021 instr->hydrogen()->deoptimize_on_minus_zero();
4890 Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg; 5022 Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
4891 5023
5024 NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
5025 HValue* value = instr->hydrogen()->value();
5026 if (value->type().IsSmi()) {
5027 if (value->IsLoadKeyed()) {
5028 HLoadKeyed* load = HLoadKeyed::cast(value);
5029 if (load->UsesMustHandleHole()) {
5030 if (load->hole_mode() == ALLOW_RETURN_HOLE) {
5031 mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
5032 } else {
5033 mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
5034 }
5035 } else {
5036 mode = NUMBER_CANDIDATE_IS_SMI;
5037 }
5038 }
5039 }
5040
4892 EmitNumberUntagD(input_reg, 5041 EmitNumberUntagD(input_reg,
4893 temp_reg, 5042 temp_reg,
4894 result_reg, 5043 result_reg,
4895 instr->hydrogen()->deoptimize_on_undefined(), 5044 instr->hydrogen()->deoptimize_on_undefined(),
4896 deoptimize_on_minus_zero, 5045 deoptimize_on_minus_zero,
4897 instr->environment()); 5046 instr->environment(),
5047 mode);
4898 } else { 5048 } else {
4899 UNIMPLEMENTED(); 5049 UNIMPLEMENTED();
4900 } 5050 }
4901 } 5051 }
4902 5052
4903 5053
4904 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 5054 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4905 LOperand* input = instr->value(); 5055 LOperand* input = instr->value();
4906 ASSERT(input->IsDoubleRegister()); 5056 ASSERT(input->IsDoubleRegister());
4907 LOperand* result = instr->result(); 5057 LOperand* result = instr->result();
(...skipping 372 matching lines...) Expand 10 before | Expand all | Expand 10 after
5280 __ Set(result, Immediate(0)); 5430 __ Set(result, Immediate(0));
5281 5431
5282 PushSafepointRegistersScope scope(this); 5432 PushSafepointRegistersScope scope(this);
5283 __ push(Immediate(Smi::FromInt(instance_size))); 5433 __ push(Immediate(Smi::FromInt(instance_size)));
5284 CallRuntimeFromDeferred( 5434 CallRuntimeFromDeferred(
5285 Runtime::kAllocateInNewSpace, 1, instr, instr->context()); 5435 Runtime::kAllocateInNewSpace, 1, instr, instr->context());
5286 __ StoreToSafepointRegisterSlot(result, eax); 5436 __ StoreToSafepointRegisterSlot(result, eax);
5287 } 5437 }
5288 5438
5289 5439
5440 void LCodeGen::DoAllocate(LAllocate* instr) {
5441 class DeferredAllocate: public LDeferredCode {
5442 public:
5443 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5444 : LDeferredCode(codegen), instr_(instr) { }
5445 virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
5446 virtual LInstruction* instr() { return instr_; }
5447 private:
5448 LAllocate* instr_;
5449 };
5450
5451 DeferredAllocate* deferred =
5452 new(zone()) DeferredAllocate(this, instr);
5453
5454 Register size = ToRegister(instr->size());
5455 Register result = ToRegister(instr->result());
5456 Register temp = ToRegister(instr->temp());
5457
5458 HAllocate* original_instr = instr->hydrogen();
5459 if (original_instr->size()->IsConstant()) {
5460 UNREACHABLE();
5461 } else {
5462 // Allocate memory for the object.
5463 AllocationFlags flags = TAG_OBJECT;
5464 if (original_instr->MustAllocateDoubleAligned()) {
5465 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5466 }
5467 __ AllocateInNewSpace(size, result, temp, no_reg,
5468 deferred->entry(), flags);
5469 }
5470
5471 __ bind(deferred->exit());
5472 }
5473
5474
5475 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5476 Register size = ToRegister(instr->size());
5477 Register result = ToRegister(instr->result());
5478
5479 __ SmiTag(size);
5480 PushSafepointRegistersScope scope(this);
5481 // TODO(3095996): Get rid of this. For now, we need to make the
5482 // result register contain a valid pointer because it is already
5483 // contained in the register pointer map.
5484 if (!size.is(result)) {
5485 __ StoreToSafepointRegisterSlot(result, size);
5486 }
5487 __ push(size);
5488 CallRuntimeFromDeferred(
5489 Runtime::kAllocateInNewSpace, 1, instr, instr->context());
5490 __ StoreToSafepointRegisterSlot(result, eax);
5491 }
5492
5493
5290 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { 5494 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
5291 ASSERT(ToRegister(instr->context()).is(esi)); 5495 ASSERT(ToRegister(instr->context()).is(esi));
5292 Handle<FixedArray> literals(instr->environment()->closure()->literals()); 5496 Handle<FixedArray> literals(instr->environment()->closure()->literals());
5293 ElementsKind boilerplate_elements_kind = 5497 ElementsKind boilerplate_elements_kind =
5294 instr->hydrogen()->boilerplate_elements_kind(); 5498 instr->hydrogen()->boilerplate_elements_kind();
5295 AllocationSiteMode allocation_site_mode = 5499 AllocationSiteMode allocation_site_mode =
5296 instr->hydrogen()->allocation_site_mode(); 5500 instr->hydrogen()->allocation_site_mode();
5297 5501
5298 // Deopt if the array literal boilerplate ElementsKind is of a type different 5502 // Deopt if the array literal boilerplate ElementsKind is of a type different
5299 // than the expected one. The check isn't necessary if the boilerplate has 5503 // than the expected one. The check isn't necessary if the boilerplate has
(...skipping 674 matching lines...) Expand 10 before | Expand all | Expand 10 after
5974 FixedArray::kHeaderSize - kPointerSize)); 6178 FixedArray::kHeaderSize - kPointerSize));
5975 __ bind(&done); 6179 __ bind(&done);
5976 } 6180 }
5977 6181
5978 6182
5979 #undef __ 6183 #undef __
5980 6184
5981 } } // namespace v8::internal 6185 } } // namespace v8::internal
5982 6186
5983 #endif // V8_TARGET_ARCH_IA32 6187 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/lithium-codegen-ia32.h ('k') | src/ia32/lithium-ia32.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698