Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(11)

Side by Side Diff: src/ia32/lithium-codegen-ia32.cc

Issue 11659022: Generate the TransitionElementsStub using Crankshaft (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Tweaks Created 7 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
70 bool LCodeGen::GenerateCode() { 70 bool LCodeGen::GenerateCode() {
71 HPhase phase("Z_Code generation", chunk()); 71 HPhase phase("Z_Code generation", chunk());
72 ASSERT(is_unused()); 72 ASSERT(is_unused());
73 status_ = GENERATING; 73 status_ = GENERATING;
74 74
75 // Open a frame scope to indicate that there is a frame on the stack. The 75 // Open a frame scope to indicate that there is a frame on the stack. The
76 // MANUAL indicates that the scope shouldn't actually generate code to set up 76 // MANUAL indicates that the scope shouldn't actually generate code to set up
77 // the frame (that is done in GeneratePrologue). 77 // the frame (that is done in GeneratePrologue).
78 FrameScope frame_scope(masm_, StackFrame::MANUAL); 78 FrameScope frame_scope(masm_, StackFrame::MANUAL);
79 79
80 support_aligned_spilled_doubles_ = info()->IsOptimizing();
81
80 dynamic_frame_alignment_ = info()->IsOptimizing() && 82 dynamic_frame_alignment_ = info()->IsOptimizing() &&
81 ((chunk()->num_double_slots() > 2 && 83 ((chunk()->num_double_slots() > 2 &&
82 !chunk()->graph()->is_recursive()) || 84 !chunk()->graph()->is_recursive()) ||
83 !info()->osr_ast_id().IsNone()); 85 !info()->osr_ast_id().IsNone());
84 86
85 return GeneratePrologue() && 87 return GeneratePrologue() &&
86 GenerateBody() && 88 GenerateBody() &&
87 GenerateDeferredCode() && 89 GenerateDeferredCode() &&
88 GenerateJumpTable() && 90 GenerateJumpTable() &&
89 GenerateSafepointTable(); 91 GenerateSafepointTable();
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
146 Label ok; 148 Label ok;
147 __ test(ecx, Operand(ecx)); 149 __ test(ecx, Operand(ecx));
148 __ j(zero, &ok, Label::kNear); 150 __ j(zero, &ok, Label::kNear);
149 // +1 for return address. 151 // +1 for return address.
150 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize; 152 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
151 __ mov(Operand(esp, receiver_offset), 153 __ mov(Operand(esp, receiver_offset),
152 Immediate(isolate()->factory()->undefined_value())); 154 Immediate(isolate()->factory()->undefined_value()));
153 __ bind(&ok); 155 __ bind(&ok);
154 } 156 }
155 157
156 if (dynamic_frame_alignment_) { 158 if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
157 // Move state of dynamic frame alignment into edx. 159 // Move state of dynamic frame alignment into edx.
158 __ mov(edx, Immediate(kNoAlignmentPadding)); 160 __ mov(edx, Immediate(kNoAlignmentPadding));
159 161
160 Label do_not_pad, align_loop; 162 Label do_not_pad, align_loop;
161 STATIC_ASSERT(kDoubleSize == 2 * kPointerSize); 163 STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
162 // Align esp + 4 to a multiple of 2 * kPointerSize. 164 // Align esp + 4 to a multiple of 2 * kPointerSize.
163 __ test(esp, Immediate(kPointerSize)); 165 __ test(esp, Immediate(kPointerSize));
164 __ j(not_zero, &do_not_pad, Label::kNear); 166 __ j(not_zero, &do_not_pad, Label::kNear);
165 __ push(Immediate(0)); 167 __ push(Immediate(0));
166 __ mov(ebx, esp); 168 __ mov(ebx, esp);
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
205 ASSERT(slots != 0 || !info()->IsOptimizing()); 207 ASSERT(slots != 0 || !info()->IsOptimizing());
206 if (slots > 0) { 208 if (slots > 0) {
207 if (slots == 1) { 209 if (slots == 1) {
208 if (dynamic_frame_alignment_) { 210 if (dynamic_frame_alignment_) {
209 __ push(edx); 211 __ push(edx);
210 } else { 212 } else {
211 __ push(Immediate(kNoAlignmentPadding)); 213 __ push(Immediate(kNoAlignmentPadding));
212 } 214 }
213 } else { 215 } else {
214 if (FLAG_debug_code) { 216 if (FLAG_debug_code) {
217 __ sub(Operand(esp), Immediate(slots * kPointerSize));
218 __ push(eax);
215 __ mov(Operand(eax), Immediate(slots)); 219 __ mov(Operand(eax), Immediate(slots));
216 Label loop; 220 Label loop;
217 __ bind(&loop); 221 __ bind(&loop);
218 __ push(Immediate(kSlotsZapValue)); 222 __ mov(MemOperand(esp, eax, times_4, 0),
223 Immediate(kSlotsZapValue));
219 __ dec(eax); 224 __ dec(eax);
220 __ j(not_zero, &loop); 225 __ j(not_zero, &loop);
226 __ pop(eax);
221 } else { 227 } else {
222 __ sub(Operand(esp), Immediate(slots * kPointerSize)); 228 __ sub(Operand(esp), Immediate(slots * kPointerSize));
223 #ifdef _MSC_VER 229 #ifdef _MSC_VER
224 // On windows, you may not access the stack more than one page below 230 // On windows, you may not access the stack more than one page below
225 // the most recently mapped page. To make the allocated area randomly 231 // the most recently mapped page. To make the allocated area randomly
226 // accessible, we write to each page in turn (the value is irrelevant). 232 // accessible, we write to each page in turn (the value is irrelevant).
227 const int kPageSize = 4 * KB; 233 const int kPageSize = 4 * KB;
228 for (int offset = slots * kPointerSize - kPageSize; 234 for (int offset = slots * kPointerSize - kPageSize;
229 offset > 0; 235 offset > 0;
230 offset -= kPageSize) { 236 offset -= kPageSize) {
231 __ mov(Operand(esp, offset), eax); 237 __ mov(Operand(esp, offset), eax);
232 } 238 }
233 #endif 239 #endif
234 } 240 }
235 241
236 // Store dynamic frame alignment state in the first local. 242 if (support_aligned_spilled_doubles_) {
237 if (dynamic_frame_alignment_) { 243 Comment(";;; Store dynamic frame alignment tag for spilled doubles");
238 __ mov(Operand(ebp, 244 // Store dynamic frame alignment state in the first local.
239 JavaScriptFrameConstants::kDynamicAlignmentStateOffset), 245 int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
240 edx); 246 if (dynamic_frame_alignment_) {
241 } else { 247 __ mov(Operand(ebp, offset), edx);
242 __ mov(Operand(ebp, 248 } else {
243 JavaScriptFrameConstants::kDynamicAlignmentStateOffset), 249 __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
244 Immediate(kNoAlignmentPadding)); 250 }
251 }
252 }
253
254 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
255 Comment(";;; Save clobbered callee double registers");
256 CpuFeatures::Scope scope(SSE2);
257 int count = 0;
258 BitVector* doubles = chunk()->allocated_double_registers();
259 BitVector::Iterator save_iterator(doubles);
260 while (!save_iterator.Done()) {
261 __ movdbl(MemOperand(esp, count * kDoubleSize),
262 XMMRegister::FromAllocationIndex(save_iterator.Current()));
263 save_iterator.Advance();
264 count++;
245 } 265 }
246 } 266 }
247 } 267 }
248 268
249 // Possibly allocate a local context. 269 // Possibly allocate a local context.
250 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 270 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
251 if (heap_slots > 0) { 271 if (heap_slots > 0) {
252 Comment(";;; Allocate local context"); 272 Comment(";;; Allocate local context");
253 // Argument to NewContext is the function, which is still in edi. 273 // Argument to NewContext is the function, which is still in edi.
254 __ push(edi); 274 __ push(edi);
(...skipping 478 matching lines...) Expand 10 before | Expand all | Expand 10 after
733 RecordPosition(pointers->position()); 753 RecordPosition(pointers->position());
734 754
735 __ CallRuntime(fun, argc); 755 __ CallRuntime(fun, argc);
736 756
737 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 757 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
738 758
739 ASSERT(info()->is_calling()); 759 ASSERT(info()->is_calling());
740 } 760 }
741 761
742 762
743 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, 763 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
744 int argc,
745 LInstruction* instr,
746 LOperand* context) {
747 if (context->IsRegister()) { 764 if (context->IsRegister()) {
748 if (!ToRegister(context).is(esi)) { 765 if (!ToRegister(context).is(esi)) {
749 __ mov(esi, ToRegister(context)); 766 __ mov(esi, ToRegister(context));
750 } 767 }
751 } else if (context->IsStackSlot()) { 768 } else if (context->IsStackSlot()) {
752 __ mov(esi, ToOperand(context)); 769 __ mov(esi, ToOperand(context));
753 } else if (context->IsConstantOperand()) { 770 } else if (context->IsConstantOperand()) {
754 HConstant* constant = 771 HConstant* constant =
755 chunk_->LookupConstant(LConstantOperand::cast(context)); 772 chunk_->LookupConstant(LConstantOperand::cast(context));
756 __ LoadHeapObject(esi, Handle<Context>::cast(constant->handle())); 773 __ LoadHeapObject(esi, Handle<Context>::cast(constant->handle()));
757 } else { 774 } else {
758 UNREACHABLE(); 775 UNREACHABLE();
759 } 776 }
777 }
778
779 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
780 int argc,
781 LInstruction* instr,
782 LOperand* context) {
783 LoadContextFromDeferred(context);
760 784
761 __ CallRuntimeSaveDoubles(id); 785 __ CallRuntimeSaveDoubles(id);
762 RecordSafepointWithRegisters( 786 RecordSafepointWithRegisters(
763 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); 787 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
764 788
765 ASSERT(info()->is_calling()); 789 ASSERT(info()->is_calling());
766 } 790 }
767 791
768 792
769 void LCodeGen::RegisterEnvironmentForDeoptimization( 793 void LCodeGen::RegisterEnvironmentForDeoptimization(
(...skipping 1867 matching lines...) Expand 10 before | Expand all | Expand 10 after
2637 void LCodeGen::DoReturn(LReturn* instr) { 2661 void LCodeGen::DoReturn(LReturn* instr) {
2638 if (FLAG_trace && info()->IsOptimizing()) { 2662 if (FLAG_trace && info()->IsOptimizing()) {
2639 // Preserve the return value on the stack and rely on the runtime call 2663 // Preserve the return value on the stack and rely on the runtime call
2640 // to return the value in the same register. We're leaving the code 2664 // to return the value in the same register. We're leaving the code
2641 // managed by the register allocator and tearing down the frame, it's 2665 // managed by the register allocator and tearing down the frame, it's
2642 // safe to write to the context register. 2666 // safe to write to the context register.
2643 __ push(eax); 2667 __ push(eax);
2644 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 2668 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2645 __ CallRuntime(Runtime::kTraceExit, 1); 2669 __ CallRuntime(Runtime::kTraceExit, 1);
2646 } 2670 }
2671 if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
2672 ASSERT(NeedsEagerFrame());
2673 CpuFeatures::Scope scope(SSE2);
2674 BitVector* doubles = chunk()->allocated_double_registers();
2675 BitVector::Iterator save_iterator(doubles);
2676 int count = 0;
2677 while (!save_iterator.Done()) {
2678 __ movdbl(XMMRegister::FromAllocationIndex(save_iterator.Current()),
2679 MemOperand(esp, count * kDoubleSize));
2680 save_iterator.Advance();
2681 count++;
2682 }
2683 }
2647 if (dynamic_frame_alignment_) { 2684 if (dynamic_frame_alignment_) {
2648 // Fetch the state of the dynamic frame alignment. 2685 // Fetch the state of the dynamic frame alignment.
2649 __ mov(edx, Operand(ebp, 2686 __ mov(edx, Operand(ebp,
2650 JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); 2687 JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
2651 } 2688 }
2652 if (NeedsEagerFrame()) { 2689 if (NeedsEagerFrame()) {
2653 __ mov(esp, ebp); 2690 __ mov(esp, ebp);
2654 __ pop(ebp); 2691 __ pop(ebp);
2655 } 2692 }
2656 if (dynamic_frame_alignment_) { 2693 if (dynamic_frame_alignment_) {
(...skipping 1641 matching lines...) Expand 10 before | Expand all | Expand 10 after
4298 ASSERT(ToRegister(instr->key()).is(ecx)); 4335 ASSERT(ToRegister(instr->key()).is(ecx));
4299 ASSERT(ToRegister(instr->value()).is(eax)); 4336 ASSERT(ToRegister(instr->value()).is(eax));
4300 4337
4301 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode) 4338 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
4302 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() 4339 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4303 : isolate()->builtins()->KeyedStoreIC_Initialize(); 4340 : isolate()->builtins()->KeyedStoreIC_Initialize();
4304 CallCode(ic, RelocInfo::CODE_TARGET, instr); 4341 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4305 } 4342 }
4306 4343
4307 4344
4345 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4346 Register object = ToRegister(instr->object());
4347 Register temp = ToRegister(instr->temp());
4348 __ TestJSArrayForAllocationSiteInfo(object, temp);
4349 DeoptimizeIf(equal, instr->environment());
4350 }
4351
4352
4308 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { 4353 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4309 Register object_reg = ToRegister(instr->object()); 4354 Register object_reg = ToRegister(instr->object());
4310 Register new_map_reg = ToRegister(instr->new_map_temp());
4311 4355
4312 Handle<Map> from_map = instr->original_map(); 4356 Handle<Map> from_map = instr->original_map();
4313 Handle<Map> to_map = instr->transitioned_map(); 4357 Handle<Map> to_map = instr->transitioned_map();
4314 ElementsKind from_kind = instr->from_kind(); 4358 ElementsKind from_kind = instr->from_kind();
4315 ElementsKind to_kind = instr->to_kind(); 4359 ElementsKind to_kind = instr->to_kind();
4316 4360
4317 Label not_applicable; 4361 Label not_applicable;
4318 bool is_simple_map_transition = 4362 bool is_simple_map_transition =
4319 IsSimpleMapChangeTransition(from_kind, to_kind); 4363 IsSimpleMapChangeTransition(from_kind, to_kind);
4320 Label::Distance branch_distance = 4364 Label::Distance branch_distance =
4321 is_simple_map_transition ? Label::kNear : Label::kFar; 4365 is_simple_map_transition ? Label::kNear : Label::kFar;
4322 __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map); 4366 __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4323 __ j(not_equal, &not_applicable, branch_distance); 4367 __ j(not_equal, &not_applicable, branch_distance);
4324 if (is_simple_map_transition) { 4368 if (is_simple_map_transition) {
4325 Register object_reg = ToRegister(instr->object()); 4369 Register new_map_reg = ToRegister(instr->new_map_temp());
4326 Handle<Map> map = instr->hydrogen()->transitioned_map(); 4370 Handle<Map> map = instr->hydrogen()->transitioned_map();
4327 __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), 4371 __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
4328 Immediate(map)); 4372 Immediate(map));
4329 // Write barrier. 4373 // Write barrier.
4330 ASSERT_NE(instr->temp(), NULL); 4374 ASSERT_NE(instr->temp(), NULL);
4331 __ RecordWriteForMap(object_reg, to_map, new_map_reg, 4375 __ RecordWriteForMap(object_reg, to_map, new_map_reg,
4332 ToRegister(instr->temp()), 4376 ToRegister(instr->temp()),
4333 kDontSaveFPRegs); 4377 kDontSaveFPRegs);
4334 } else if (IsFastSmiElementsKind(from_kind) &&
4335 IsFastDoubleElementsKind(to_kind)) {
4336 __ mov(new_map_reg, to_map);
4337 Register fixed_object_reg = ToRegister(instr->temp());
4338 ASSERT(fixed_object_reg.is(edx));
4339 ASSERT(new_map_reg.is(ebx));
4340 __ mov(fixed_object_reg, object_reg);
4341 CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
4342 RelocInfo::CODE_TARGET, instr);
4343 } else if (IsFastDoubleElementsKind(from_kind) &&
4344 IsFastObjectElementsKind(to_kind)) {
4345 __ mov(new_map_reg, to_map);
4346 Register fixed_object_reg = ToRegister(instr->temp());
4347 ASSERT(fixed_object_reg.is(edx));
4348 ASSERT(new_map_reg.is(ebx));
4349 __ mov(fixed_object_reg, object_reg);
4350 CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
4351 RelocInfo::CODE_TARGET, instr);
4352 } else { 4378 } else {
4353 UNREACHABLE(); 4379 PushSafepointRegistersScope scope(this);
4380 if (!object_reg.is(eax)) {
4381 __ push(object_reg);
4382 }
4383 LoadContextFromDeferred(instr->context());
4384 if (!object_reg.is(eax)) {
4385 __ pop(eax);
4386 }
4387 __ mov(ebx, to_map);
4388 TransitionElementsKindStub stub(from_kind, to_kind);
4389 __ CallStub(&stub);
4390 RecordSafepointWithRegisters(
4391 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4354 } 4392 }
4355 __ bind(&not_applicable); 4393 __ bind(&not_applicable);
4356 } 4394 }
4357 4395
4358 4396
4359 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { 4397 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4360 class DeferredStringCharCodeAt: public LDeferredCode { 4398 class DeferredStringCharCodeAt: public LDeferredCode {
4361 public: 4399 public:
4362 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) 4400 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4363 : LDeferredCode(codegen), instr_(instr) { } 4401 : LDeferredCode(codegen), instr_(instr) { }
(...skipping 267 matching lines...) Expand 10 before | Expand all | Expand 10 after
4631 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) 4669 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4632 : LDeferredCode(codegen), instr_(instr) { } 4670 : LDeferredCode(codegen), instr_(instr) { }
4633 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } 4671 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4634 virtual LInstruction* instr() { return instr_; } 4672 virtual LInstruction* instr() { return instr_; }
4635 private: 4673 private:
4636 LNumberTagD* instr_; 4674 LNumberTagD* instr_;
4637 }; 4675 };
4638 4676
4639 Register reg = ToRegister(instr->result()); 4677 Register reg = ToRegister(instr->result());
4640 4678
4679 bool convert_hole = false;
4680 HValue* change_input = instr->hydrogen()->value();
4681 if (change_input->IsLoadKeyed()) {
4682 HLoadKeyed* load = HLoadKeyed::cast(change_input);
4683 convert_hole = load->CanReturnHole();
4684 }
4685
4686 Label no_special_nan_handling;
4687 Label done;
4688 if (convert_hole) {
4689 bool use_sse2 = CpuFeatures::IsSupported(SSE2);
4690 if (use_sse2) {
4691 CpuFeatures::Scope scope(SSE2);
4692 XMMRegister input_reg = ToDoubleRegister(instr->value());
4693 __ ucomisd(input_reg, input_reg);
4694 } else {
4695 if (!IsX87TopOfStack(instr->value())) {
4696 __ fld_d(ToOperand(instr->value()));
4697 }
4698 __ fld(0);
4699 __ fld(0);
4700 __ FCmp();
4701 }
4702
4703 __ j(parity_odd, &no_special_nan_handling);
4704 __ sub(esp, Immediate(kDoubleSize));
4705 if (use_sse2) {
4706 CpuFeatures::Scope scope(SSE2);
4707 XMMRegister input_reg = ToDoubleRegister(instr->value());
4708 __ movdbl(MemOperand(esp, 0), input_reg);
4709 } else {
4710 __ fld(0);
4711 __ fstp_d(MemOperand(esp, 0));
4712 }
4713 __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
4714 Immediate(kHoleNanUpper32));
4715 Label canonicalize;
4716 __ j(not_equal, &canonicalize);
4717 __ add(esp, Immediate(kDoubleSize));
4718 __ mov(reg, factory()->the_hole_value());
4719 __ jmp(&done);
4720 __ bind(&canonicalize);
4721 __ add(esp, Immediate(kDoubleSize));
4722 ExternalReference nan =
4723 ExternalReference::address_of_canonical_non_hole_nan();
4724 if (use_sse2) {
4725 CpuFeatures::Scope scope(SSE2);
4726 XMMRegister input_reg = ToDoubleRegister(instr->value());
4727 __ movdbl(input_reg, Operand::StaticVariable(nan));
4728 } else {
4729 __ fstp(0);
4730 __ fld_d(Operand::StaticVariable(nan));
4731 }
4732 }
4733
4734 __ bind(&no_special_nan_handling);
4641 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); 4735 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4642 if (FLAG_inline_new) { 4736 if (FLAG_inline_new) {
4643 Register tmp = ToRegister(instr->temp()); 4737 Register tmp = ToRegister(instr->temp());
4644 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); 4738 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
4645 } else { 4739 } else {
4646 __ jmp(deferred->entry()); 4740 __ jmp(deferred->entry());
4647 } 4741 }
4648 __ bind(deferred->exit()); 4742 __ bind(deferred->exit());
4649 if (CpuFeatures::IsSupported(SSE2)) { 4743 if (CpuFeatures::IsSupported(SSE2)) {
4650 CpuFeatures::Scope scope(SSE2); 4744 CpuFeatures::Scope scope(SSE2);
4651 XMMRegister input_reg = ToDoubleRegister(instr->value()); 4745 XMMRegister input_reg = ToDoubleRegister(instr->value());
4652 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); 4746 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
4653 } else { 4747 } else {
4654 if (!IsX87TopOfStack(instr->value())) { 4748 if (!IsX87TopOfStack(instr->value())) {
4655 __ fld_d(ToOperand(instr->value())); 4749 __ fld_d(ToOperand(instr->value()));
4656 } 4750 }
4657 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); 4751 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
4658 } 4752 }
4753 __ bind(&done);
4659 } 4754 }
4660 4755
4661 4756
4662 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 4757 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4663 // TODO(3095996): Get rid of this. For now, we need to make the 4758 // TODO(3095996): Get rid of this. For now, we need to make the
4664 // result register contain a valid pointer because it is already 4759 // result register contain a valid pointer because it is already
4665 // contained in the register pointer map. 4760 // contained in the register pointer map.
4666 Register reg = ToRegister(instr->result()); 4761 Register reg = ToRegister(instr->result());
4667 __ Set(reg, Immediate(0)); 4762 __ Set(reg, Immediate(0));
4668 4763
(...skipping 30 matching lines...) Expand all
4699 } 4794 }
4700 __ SmiUntag(ToRegister(input)); 4795 __ SmiUntag(ToRegister(input));
4701 } 4796 }
4702 4797
4703 4798
4704 void LCodeGen::EmitNumberUntagD(Register input_reg, 4799 void LCodeGen::EmitNumberUntagD(Register input_reg,
4705 Register temp_reg, 4800 Register temp_reg,
4706 XMMRegister result_reg, 4801 XMMRegister result_reg,
4707 bool deoptimize_on_undefined, 4802 bool deoptimize_on_undefined,
4708 bool deoptimize_on_minus_zero, 4803 bool deoptimize_on_minus_zero,
4709 LEnvironment* env) { 4804 LEnvironment* env,
4805 NumberUntagDMode mode) {
4710 Label load_smi, done; 4806 Label load_smi, done;
4711 4807
4712 // Smi check. 4808 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4713 __ JumpIfSmi(input_reg, &load_smi, Label::kNear); 4809 // Smi check.
4810 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
4714 4811
4715 // Heap number map check. 4812 // Heap number map check.
4716 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 4813 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4717 factory()->heap_number_map()); 4814 factory()->heap_number_map());
4718 if (deoptimize_on_undefined) { 4815 if (deoptimize_on_undefined) {
4816 DeoptimizeIf(not_equal, env);
4817 } else {
4818 Label heap_number;
4819 __ j(equal, &heap_number, Label::kNear);
4820
4821 __ cmp(input_reg, factory()->undefined_value());
4822 DeoptimizeIf(not_equal, env);
4823
4824 // Convert undefined to NaN.
4825 ExternalReference nan =
4826 ExternalReference::address_of_canonical_non_hole_nan();
4827 __ movdbl(result_reg, Operand::StaticVariable(nan));
4828 __ jmp(&done, Label::kNear);
4829
4830 __ bind(&heap_number);
4831 }
4832 // Heap number to XMM conversion.
4833 __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
4834 if (deoptimize_on_minus_zero) {
4835 XMMRegister xmm_scratch = xmm0;
4836 __ xorps(xmm_scratch, xmm_scratch);
4837 __ ucomisd(result_reg, xmm_scratch);
4838 __ j(not_zero, &done, Label::kNear);
4839 __ movmskpd(temp_reg, result_reg);
4840 __ test_b(temp_reg, 1);
4841 DeoptimizeIf(not_zero, env);
4842 }
4843 __ jmp(&done, Label::kNear);
4844 } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
4845 __ test(input_reg, Immediate(kSmiTagMask));
4719 DeoptimizeIf(not_equal, env); 4846 DeoptimizeIf(not_equal, env);
4847 } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
4848 __ test(input_reg, Immediate(kSmiTagMask));
4849 __ j(zero, &load_smi);
4850 ExternalReference hole_nan_reference =
4851 ExternalReference::address_of_the_hole_nan();
4852 __ movdbl(result_reg, Operand::StaticVariable(hole_nan_reference));
4853 __ jmp(&done, Label::kNear);
4720 } else { 4854 } else {
4721 Label heap_number; 4855 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
4722 __ j(equal, &heap_number, Label::kNear);
4723
4724 __ cmp(input_reg, factory()->undefined_value());
4725 DeoptimizeIf(not_equal, env);
4726
4727 // Convert undefined to NaN.
4728 ExternalReference nan =
4729 ExternalReference::address_of_canonical_non_hole_nan();
4730 __ movdbl(result_reg, Operand::StaticVariable(nan));
4731 __ jmp(&done, Label::kNear);
4732
4733 __ bind(&heap_number);
4734 } 4856 }
4735 // Heap number to XMM conversion.
4736 __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
4737 if (deoptimize_on_minus_zero) {
4738 XMMRegister xmm_scratch = xmm0;
4739 __ xorps(xmm_scratch, xmm_scratch);
4740 __ ucomisd(result_reg, xmm_scratch);
4741 __ j(not_zero, &done, Label::kNear);
4742 __ movmskpd(temp_reg, result_reg);
4743 __ test_b(temp_reg, 1);
4744 DeoptimizeIf(not_zero, env);
4745 }
4746 __ jmp(&done, Label::kNear);
4747 4857
4748 // Smi to XMM conversion 4858 // Smi to XMM conversion
4749 __ bind(&load_smi); 4859 __ bind(&load_smi);
4750 __ SmiUntag(input_reg); // Untag smi before converting to float. 4860 __ SmiUntag(input_reg); // Untag smi before converting to float.
4751 __ cvtsi2sd(result_reg, Operand(input_reg)); 4861 __ cvtsi2sd(result_reg, Operand(input_reg));
4752 __ SmiTag(input_reg); // Retag smi. 4862 __ SmiTag(input_reg); // Retag smi.
4753 __ bind(&done); 4863 __ bind(&done);
4754 } 4864 }
4755 4865
4756 4866
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
4882 4992
4883 if (CpuFeatures::IsSupported(SSE2)) { 4993 if (CpuFeatures::IsSupported(SSE2)) {
4884 CpuFeatures::Scope scope(SSE2); 4994 CpuFeatures::Scope scope(SSE2);
4885 Register input_reg = ToRegister(input); 4995 Register input_reg = ToRegister(input);
4886 XMMRegister result_reg = ToDoubleRegister(result); 4996 XMMRegister result_reg = ToDoubleRegister(result);
4887 4997
4888 bool deoptimize_on_minus_zero = 4998 bool deoptimize_on_minus_zero =
4889 instr->hydrogen()->deoptimize_on_minus_zero(); 4999 instr->hydrogen()->deoptimize_on_minus_zero();
4890 Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg; 5000 Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
4891 5001
5002 NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
5003 HValue* value = instr->hydrogen()->value();
5004 if (value->type().IsSmi()) {
5005 if (value->IsLoadKeyed()) {
5006 HLoadKeyed* load = HLoadKeyed::cast(value);
5007 if (load->CanReturnHole()) {
5008 if (load->hole_mode() == ALLOW_RETURN_HOLE) {
5009 mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
5010 } else {
5011 mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
5012 }
5013 } else {
5014 mode = NUMBER_CANDIDATE_IS_SMI;
5015 }
5016 }
5017 }
5018
4892 EmitNumberUntagD(input_reg, 5019 EmitNumberUntagD(input_reg,
4893 temp_reg, 5020 temp_reg,
4894 result_reg, 5021 result_reg,
4895 instr->hydrogen()->deoptimize_on_undefined(), 5022 instr->hydrogen()->deoptimize_on_undefined(),
4896 deoptimize_on_minus_zero, 5023 deoptimize_on_minus_zero,
4897 instr->environment()); 5024 instr->environment(),
5025 mode);
4898 } else { 5026 } else {
4899 UNIMPLEMENTED(); 5027 UNIMPLEMENTED();
4900 } 5028 }
4901 } 5029 }
4902 5030
4903 5031
4904 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 5032 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4905 LOperand* input = instr->value(); 5033 LOperand* input = instr->value();
4906 ASSERT(input->IsDoubleRegister()); 5034 ASSERT(input->IsDoubleRegister());
4907 LOperand* result = instr->result(); 5035 LOperand* result = instr->result();
(...skipping 372 matching lines...) Expand 10 before | Expand all | Expand 10 after
5280 __ Set(result, Immediate(0)); 5408 __ Set(result, Immediate(0));
5281 5409
5282 PushSafepointRegistersScope scope(this); 5410 PushSafepointRegistersScope scope(this);
5283 __ push(Immediate(Smi::FromInt(instance_size))); 5411 __ push(Immediate(Smi::FromInt(instance_size)));
5284 CallRuntimeFromDeferred( 5412 CallRuntimeFromDeferred(
5285 Runtime::kAllocateInNewSpace, 1, instr, instr->context()); 5413 Runtime::kAllocateInNewSpace, 1, instr, instr->context());
5286 __ StoreToSafepointRegisterSlot(result, eax); 5414 __ StoreToSafepointRegisterSlot(result, eax);
5287 } 5415 }
5288 5416
5289 5417
5418 void LCodeGen::DoAllocate(LAllocate* instr) {
5419 class DeferredAllocate: public LDeferredCode {
5420 public:
5421 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5422 : LDeferredCode(codegen), instr_(instr) { }
5423 virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
5424 virtual LInstruction* instr() { return instr_; }
5425 private:
5426 LAllocate* instr_;
5427 };
5428
5429 DeferredAllocate* deferred =
5430 new(zone()) DeferredAllocate(this, instr);
5431
5432 Register size = ToRegister(instr->size());
5433 Register result = ToRegister(instr->result());
5434 Register temp = ToRegister(instr->temp());
5435
5436 HAllocate* original_instr = instr->hydrogen();
5437 if (original_instr->size()->IsConstant()) {
5438 UNREACHABLE();
5439 } else {
5440 // Allocate memory for the object.
5441 AllocationFlags flags = TAG_OBJECT;
5442 if (original_instr->MustAllocateDoubleAligned()) {
5443 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5444 }
5445 __ AllocateInNewSpace(size, result, temp, no_reg,
5446 deferred->entry(), flags);
5447 }
5448
5449 __ bind(deferred->exit());
5450 }
5451
5452
5453 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5454 Register size = ToRegister(instr->size());
5455 Register result = ToRegister(instr->result());
5456
5457 __ SmiTag(size);
5458 PushSafepointRegistersScope scope(this);
5459 // TODO(3095996): Get rid of this. For now, we need to make the
5460 // result register contain a valid pointer because it is already
5461 // contained in the register pointer map.
5462 if (!size.is(result)) {
5463 __ StoreToSafepointRegisterSlot(result, size);
5464 }
5465 __ push(size);
5466 CallRuntimeFromDeferred(
5467 Runtime::kAllocateInNewSpace, 1, instr, instr->context());
5468 __ StoreToSafepointRegisterSlot(result, eax);
5469 }
5470
5471
5290 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { 5472 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
5291 ASSERT(ToRegister(instr->context()).is(esi)); 5473 ASSERT(ToRegister(instr->context()).is(esi));
5292 Handle<FixedArray> literals(instr->environment()->closure()->literals()); 5474 Handle<FixedArray> literals(instr->environment()->closure()->literals());
5293 ElementsKind boilerplate_elements_kind = 5475 ElementsKind boilerplate_elements_kind =
5294 instr->hydrogen()->boilerplate_elements_kind(); 5476 instr->hydrogen()->boilerplate_elements_kind();
5295 AllocationSiteMode allocation_site_mode = 5477 AllocationSiteMode allocation_site_mode =
5296 instr->hydrogen()->allocation_site_mode(); 5478 instr->hydrogen()->allocation_site_mode();
5297 5479
5298 // Deopt if the array literal boilerplate ElementsKind is of a type different 5480 // Deopt if the array literal boilerplate ElementsKind is of a type different
5299 // than the expected one. The check isn't necessary if the boilerplate has 5481 // than the expected one. The check isn't necessary if the boilerplate has
(...skipping 674 matching lines...) Expand 10 before | Expand all | Expand 10 after
5974 FixedArray::kHeaderSize - kPointerSize)); 6156 FixedArray::kHeaderSize - kPointerSize));
5975 __ bind(&done); 6157 __ bind(&done);
5976 } 6158 }
5977 6159
5978 6160
5979 #undef __ 6161 #undef __
5980 6162
5981 } } // namespace v8::internal 6163 } } // namespace v8::internal
5982 6164
5983 #endif // V8_TARGET_ARCH_IA32 6165 #endif // V8_TARGET_ARCH_IA32
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698