OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
11 // with the distribution. | 11 // with the distribution. |
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
156 __ Push(Smi::FromInt(StackFrame::STUB)); | 156 __ Push(Smi::FromInt(StackFrame::STUB)); |
157 } else { | 157 } else { |
158 __ push(rdi); // Callee's JS function. | 158 __ push(rdi); // Callee's JS function. |
159 } | 159 } |
160 } | 160 } |
161 | 161 |
162 // Reserve space for the stack slots needed by the code. | 162 // Reserve space for the stack slots needed by the code. |
163 int slots = GetStackSlotCount(); | 163 int slots = GetStackSlotCount(); |
164 if (slots > 0) { | 164 if (slots > 0) { |
165 if (FLAG_debug_code) { | 165 if (FLAG_debug_code) { |
| 166 __ subq(rsp, Immediate(slots * kPointerSize)); |
| 167 __ push(rax); |
166 __ Set(rax, slots); | 168 __ Set(rax, slots); |
167 __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE64); | 169 __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE64); |
168 Label loop; | 170 Label loop; |
169 __ bind(&loop); | 171 __ bind(&loop); |
170 __ push(kScratchRegister); | 172 __ movq(MemOperand(rsp, rax, times_pointer_size, 0), |
| 173 kScratchRegister); |
171 __ decl(rax); | 174 __ decl(rax); |
172 __ j(not_zero, &loop); | 175 __ j(not_zero, &loop); |
| 176 __ pop(rax); |
173 } else { | 177 } else { |
174 __ subq(rsp, Immediate(slots * kPointerSize)); | 178 __ subq(rsp, Immediate(slots * kPointerSize)); |
175 #ifdef _MSC_VER | 179 #ifdef _MSC_VER |
176 // On windows, you may not access the stack more than one page below | 180 // On windows, you may not access the stack more than one page below |
177 // the most recently mapped page. To make the allocated area randomly | 181 // the most recently mapped page. To make the allocated area randomly |
178 // accessible, we write to each page in turn (the value is irrelevant). | 182 // accessible, we write to each page in turn (the value is irrelevant). |
179 const int kPageSize = 4 * KB; | 183 const int kPageSize = 4 * KB; |
180 for (int offset = slots * kPointerSize - kPageSize; | 184 for (int offset = slots * kPointerSize - kPageSize; |
181 offset > 0; | 185 offset > 0; |
182 offset -= kPageSize) { | 186 offset -= kPageSize) { |
183 __ movq(Operand(rsp, offset), rax); | 187 __ movq(Operand(rsp, offset), rax); |
184 } | 188 } |
185 #endif | 189 #endif |
186 } | 190 } |
| 191 |
| 192 if (info()->saves_caller_doubles()) { |
| 193 Comment(";;; Save clobbered callee double registers"); |
| 194 int count = 0; |
| 195 BitVector* doubles = chunk()->allocated_double_registers(); |
| 196 BitVector::Iterator save_iterator(doubles); |
| 197 while (!save_iterator.Done()) { |
| 198 __ movsd(MemOperand(rsp, count * kDoubleSize), |
| 199 XMMRegister::FromAllocationIndex(save_iterator.Current())); |
| 200 save_iterator.Advance(); |
| 201 count++; |
| 202 } |
| 203 } |
187 } | 204 } |
188 | 205 |
189 // Possibly allocate a local context. | 206 // Possibly allocate a local context. |
190 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; | 207 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; |
191 if (heap_slots > 0) { | 208 if (heap_slots > 0) { |
192 Comment(";;; Allocate local context"); | 209 Comment(";;; Allocate local context"); |
193 // Argument to NewContext is the function, which is still in rdi. | 210 // Argument to NewContext is the function, which is still in rdi. |
194 __ push(rdi); | 211 __ push(rdi); |
195 if (heap_slots <= FastNewContextStub::kMaximumSlots) { | 212 if (heap_slots <= FastNewContextStub::kMaximumSlots) { |
196 FastNewContextStub stub(heap_slots); | 213 FastNewContextStub stub(heap_slots); |
(...skipping 2261 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2458 } | 2475 } |
2459 | 2476 |
2460 | 2477 |
2461 void LCodeGen::DoReturn(LReturn* instr) { | 2478 void LCodeGen::DoReturn(LReturn* instr) { |
2462 if (FLAG_trace && info()->IsOptimizing()) { | 2479 if (FLAG_trace && info()->IsOptimizing()) { |
2463 // Preserve the return value on the stack and rely on the runtime | 2480 // Preserve the return value on the stack and rely on the runtime |
2464 // call to return the value in the same register. | 2481 // call to return the value in the same register. |
2465 __ push(rax); | 2482 __ push(rax); |
2466 __ CallRuntime(Runtime::kTraceExit, 1); | 2483 __ CallRuntime(Runtime::kTraceExit, 1); |
2467 } | 2484 } |
| 2485 if (info()->saves_caller_doubles()) { |
| 2486 ASSERT(NeedsEagerFrame()); |
| 2487 BitVector* doubles = chunk()->allocated_double_registers(); |
| 2488 BitVector::Iterator save_iterator(doubles); |
| 2489 int count = 0; |
| 2490 while (!save_iterator.Done()) { |
| 2491 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()), |
| 2492 MemOperand(rsp, count * kDoubleSize)); |
| 2493 save_iterator.Advance(); |
| 2494 count++; |
| 2495 } |
| 2496 } |
2468 if (NeedsEagerFrame()) { | 2497 if (NeedsEagerFrame()) { |
2469 __ movq(rsp, rbp); | 2498 __ movq(rsp, rbp); |
2470 __ pop(rbp); | 2499 __ pop(rbp); |
2471 } | 2500 } |
2472 if (info()->IsStub()) { | 2501 if (info()->IsStub()) { |
2473 __ Ret(0, r10); | 2502 __ Ret(0, r10); |
2474 } else { | 2503 } else { |
2475 __ Ret((GetParameterCount() + 1) * kPointerSize, rcx); | 2504 __ Ret((GetParameterCount() + 1) * kPointerSize, rcx); |
2476 } | 2505 } |
2477 } | 2506 } |
(...skipping 1650 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4128 | 4157 |
4129 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode) | 4158 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode) |
4130 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() | 4159 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() |
4131 : isolate()->builtins()->KeyedStoreIC_Initialize(); | 4160 : isolate()->builtins()->KeyedStoreIC_Initialize(); |
4132 CallCode(ic, RelocInfo::CODE_TARGET, instr); | 4161 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
4133 } | 4162 } |
4134 | 4163 |
4135 | 4164 |
4136 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { | 4165 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { |
4137 Register object_reg = ToRegister(instr->object()); | 4166 Register object_reg = ToRegister(instr->object()); |
4138 Register new_map_reg = ToRegister(instr->new_map_temp()); | |
4139 | 4167 |
4140 Handle<Map> from_map = instr->original_map(); | 4168 Handle<Map> from_map = instr->original_map(); |
4141 Handle<Map> to_map = instr->transitioned_map(); | 4169 Handle<Map> to_map = instr->transitioned_map(); |
4142 ElementsKind from_kind = instr->from_kind(); | 4170 ElementsKind from_kind = instr->from_kind(); |
4143 ElementsKind to_kind = instr->to_kind(); | 4171 ElementsKind to_kind = instr->to_kind(); |
4144 | 4172 |
4145 Label not_applicable; | 4173 Label not_applicable; |
4146 __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map); | 4174 __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map); |
4147 __ j(not_equal, ¬_applicable); | 4175 __ j(not_equal, ¬_applicable); |
4148 __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT); | |
4149 if (IsSimpleMapChangeTransition(from_kind, to_kind)) { | 4176 if (IsSimpleMapChangeTransition(from_kind, to_kind)) { |
| 4177 Register new_map_reg = ToRegister(instr->new_map_temp()); |
| 4178 __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT); |
4150 __ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg); | 4179 __ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg); |
4151 // Write barrier. | 4180 // Write barrier. |
4152 ASSERT_NE(instr->temp(), NULL); | 4181 ASSERT_NE(instr->temp(), NULL); |
4153 __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, | 4182 __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, |
4154 ToRegister(instr->temp()), kDontSaveFPRegs); | 4183 ToRegister(instr->temp()), kDontSaveFPRegs); |
| 4184 } else if (FLAG_compiled_transitions) { |
| 4185 PushSafepointRegistersScope scope(this); |
| 4186 if (!object_reg.is(rax)) { |
| 4187 __ movq(rax, object_reg); |
| 4188 } |
| 4189 __ Move(rbx, to_map); |
| 4190 TransitionElementsKindStub stub(from_kind, to_kind); |
| 4191 __ CallStub(&stub); |
| 4192 RecordSafepointWithRegisters( |
| 4193 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); |
4155 } else if (IsFastSmiElementsKind(from_kind) && | 4194 } else if (IsFastSmiElementsKind(from_kind) && |
4156 IsFastDoubleElementsKind(to_kind)) { | 4195 IsFastDoubleElementsKind(to_kind)) { |
4157 Register fixed_object_reg = ToRegister(instr->temp()); | 4196 Register fixed_object_reg = ToRegister(instr->temp()); |
4158 ASSERT(fixed_object_reg.is(rdx)); | 4197 ASSERT(fixed_object_reg.is(rdx)); |
| 4198 Register new_map_reg = ToRegister(instr->new_map_temp()); |
4159 ASSERT(new_map_reg.is(rbx)); | 4199 ASSERT(new_map_reg.is(rbx)); |
| 4200 __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT); |
4160 __ movq(fixed_object_reg, object_reg); | 4201 __ movq(fixed_object_reg, object_reg); |
4161 CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(), | 4202 CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(), |
4162 RelocInfo::CODE_TARGET, instr); | 4203 RelocInfo::CODE_TARGET, instr); |
4163 } else if (IsFastDoubleElementsKind(from_kind) && | 4204 } else if (IsFastDoubleElementsKind(from_kind) && |
4164 IsFastObjectElementsKind(to_kind)) { | 4205 IsFastObjectElementsKind(to_kind)) { |
4165 Register fixed_object_reg = ToRegister(instr->temp()); | 4206 Register fixed_object_reg = ToRegister(instr->temp()); |
4166 ASSERT(fixed_object_reg.is(rdx)); | 4207 ASSERT(fixed_object_reg.is(rdx)); |
| 4208 Register new_map_reg = ToRegister(instr->new_map_temp()); |
4167 ASSERT(new_map_reg.is(rbx)); | 4209 ASSERT(new_map_reg.is(rbx)); |
| 4210 __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT); |
4168 __ movq(fixed_object_reg, object_reg); | 4211 __ movq(fixed_object_reg, object_reg); |
4169 CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(), | 4212 CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(), |
4170 RelocInfo::CODE_TARGET, instr); | 4213 RelocInfo::CODE_TARGET, instr); |
4171 } else { | 4214 } else { |
4172 UNREACHABLE(); | 4215 UNREACHABLE(); |
4173 } | 4216 } |
4174 __ bind(¬_applicable); | 4217 __ bind(¬_applicable); |
4175 } | 4218 } |
4176 | 4219 |
4177 | 4220 |
| 4221 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { |
| 4222 Register object = ToRegister(instr->object()); |
| 4223 Register temp = ToRegister(instr->temp()); |
| 4224 __ TestJSArrayForAllocationSiteInfo(object, temp); |
| 4225 DeoptimizeIf(equal, instr->environment()); |
| 4226 } |
| 4227 |
| 4228 |
4178 void LCodeGen::DoStringAdd(LStringAdd* instr) { | 4229 void LCodeGen::DoStringAdd(LStringAdd* instr) { |
4179 EmitPushTaggedOperand(instr->left()); | 4230 EmitPushTaggedOperand(instr->left()); |
4180 EmitPushTaggedOperand(instr->right()); | 4231 EmitPushTaggedOperand(instr->right()); |
4181 StringAddStub stub(NO_STRING_CHECK_IN_STUB); | 4232 StringAddStub stub(NO_STRING_CHECK_IN_STUB); |
4182 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 4233 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
4183 } | 4234 } |
4184 | 4235 |
4185 | 4236 |
4186 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { | 4237 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { |
4187 class DeferredStringCharCodeAt: public LDeferredCode { | 4238 class DeferredStringCharCodeAt: public LDeferredCode { |
(...skipping 206 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4394 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } | 4445 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } |
4395 virtual LInstruction* instr() { return instr_; } | 4446 virtual LInstruction* instr() { return instr_; } |
4396 private: | 4447 private: |
4397 LNumberTagD* instr_; | 4448 LNumberTagD* instr_; |
4398 }; | 4449 }; |
4399 | 4450 |
4400 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 4451 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
4401 Register reg = ToRegister(instr->result()); | 4452 Register reg = ToRegister(instr->result()); |
4402 Register tmp = ToRegister(instr->temp()); | 4453 Register tmp = ToRegister(instr->temp()); |
4403 | 4454 |
| 4455 bool convert_hole = false; |
| 4456 HValue* change_input = instr->hydrogen()->value(); |
| 4457 if (change_input->IsLoadKeyed()) { |
| 4458 HLoadKeyed* load = HLoadKeyed::cast(change_input); |
| 4459 convert_hole = load->UsesMustHandleHole(); |
| 4460 } |
| 4461 |
| 4462 Label no_special_nan_handling; |
| 4463 Label done; |
| 4464 if (convert_hole) { |
| 4465 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| 4466 __ ucomisd(input_reg, input_reg); |
| 4467 __ j(parity_odd, &no_special_nan_handling); |
| 4468 __ subq(rsp, Immediate(kDoubleSize)); |
| 4469 __ movsd(MemOperand(rsp, 0), input_reg); |
| 4470 __ cmpl(MemOperand(rsp, sizeof(kHoleNanLower32)), |
| 4471 Immediate(kHoleNanUpper32)); |
| 4472 Label canonicalize; |
| 4473 __ j(not_equal, &canonicalize); |
| 4474 __ addq(rsp, Immediate(kDoubleSize)); |
| 4475 __ Move(reg, factory()->the_hole_value()); |
| 4476 __ jmp(&done); |
| 4477 __ bind(&canonicalize); |
| 4478 __ addq(rsp, Immediate(kDoubleSize)); |
| 4479 __ Set(kScratchRegister, BitCast<uint64_t>( |
| 4480 FixedDoubleArray::canonical_not_the_hole_nan_as_double())); |
| 4481 __ movq(input_reg, kScratchRegister); |
| 4482 } |
| 4483 |
| 4484 __ bind(&no_special_nan_handling); |
4404 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); | 4485 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); |
4405 if (FLAG_inline_new) { | 4486 if (FLAG_inline_new) { |
4406 __ AllocateHeapNumber(reg, tmp, deferred->entry()); | 4487 __ AllocateHeapNumber(reg, tmp, deferred->entry()); |
4407 } else { | 4488 } else { |
4408 __ jmp(deferred->entry()); | 4489 __ jmp(deferred->entry()); |
4409 } | 4490 } |
4410 __ bind(deferred->exit()); | 4491 __ bind(deferred->exit()); |
4411 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); | 4492 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); |
| 4493 |
| 4494 __ bind(&done); |
4412 } | 4495 } |
4413 | 4496 |
4414 | 4497 |
4415 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { | 4498 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { |
4416 // TODO(3095996): Get rid of this. For now, we need to make the | 4499 // TODO(3095996): Get rid of this. For now, we need to make the |
4417 // result register contain a valid pointer because it is already | 4500 // result register contain a valid pointer because it is already |
4418 // contained in the register pointer map. | 4501 // contained in the register pointer map. |
4419 Register reg = ToRegister(instr->result()); | 4502 Register reg = ToRegister(instr->result()); |
4420 __ Move(reg, Smi::FromInt(0)); | 4503 __ Move(reg, Smi::FromInt(0)); |
4421 | 4504 |
(...skipping 25 matching lines...) Expand all Loading... |
4447 __ AssertSmi(input); | 4530 __ AssertSmi(input); |
4448 } | 4531 } |
4449 __ SmiToInteger32(input, input); | 4532 __ SmiToInteger32(input, input); |
4450 } | 4533 } |
4451 | 4534 |
4452 | 4535 |
4453 void LCodeGen::EmitNumberUntagD(Register input_reg, | 4536 void LCodeGen::EmitNumberUntagD(Register input_reg, |
4454 XMMRegister result_reg, | 4537 XMMRegister result_reg, |
4455 bool deoptimize_on_undefined, | 4538 bool deoptimize_on_undefined, |
4456 bool deoptimize_on_minus_zero, | 4539 bool deoptimize_on_minus_zero, |
4457 LEnvironment* env) { | 4540 LEnvironment* env, |
| 4541 NumberUntagDMode mode) { |
4458 Label load_smi, done; | 4542 Label load_smi, done; |
4459 | 4543 |
4460 // Smi check. | 4544 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
4461 __ JumpIfSmi(input_reg, &load_smi, Label::kNear); | 4545 // Smi check. |
| 4546 __ JumpIfSmi(input_reg, &load_smi, Label::kNear); |
4462 | 4547 |
4463 // Heap number map check. | 4548 // Heap number map check. |
4464 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), | 4549 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), |
4465 Heap::kHeapNumberMapRootIndex); | 4550 Heap::kHeapNumberMapRootIndex); |
4466 if (deoptimize_on_undefined) { | 4551 if (deoptimize_on_undefined) { |
4467 DeoptimizeIf(not_equal, env); | 4552 DeoptimizeIf(not_equal, env); |
| 4553 } else { |
| 4554 Label heap_number; |
| 4555 __ j(equal, &heap_number, Label::kNear); |
| 4556 |
| 4557 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); |
| 4558 DeoptimizeIf(not_equal, env); |
| 4559 |
| 4560 // Convert undefined to NaN. Compute NaN as 0/0. |
| 4561 __ xorps(result_reg, result_reg); |
| 4562 __ divsd(result_reg, result_reg); |
| 4563 __ jmp(&done, Label::kNear); |
| 4564 |
| 4565 __ bind(&heap_number); |
| 4566 } |
| 4567 // Heap number to XMM conversion. |
| 4568 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| 4569 if (deoptimize_on_minus_zero) { |
| 4570 XMMRegister xmm_scratch = xmm0; |
| 4571 __ xorps(xmm_scratch, xmm_scratch); |
| 4572 __ ucomisd(xmm_scratch, result_reg); |
| 4573 __ j(not_equal, &done, Label::kNear); |
| 4574 __ movmskpd(kScratchRegister, result_reg); |
| 4575 __ testq(kScratchRegister, Immediate(1)); |
| 4576 DeoptimizeIf(not_zero, env); |
| 4577 } |
| 4578 __ jmp(&done, Label::kNear); |
| 4579 } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) { |
| 4580 __ testq(input_reg, Immediate(kSmiTagMask)); |
| 4581 DeoptimizeIf(not_equal, env); |
| 4582 } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) { |
| 4583 __ testq(input_reg, Immediate(kSmiTagMask)); |
| 4584 __ j(zero, &load_smi); |
| 4585 __ Set(kScratchRegister, BitCast<uint64_t>( |
| 4586 FixedDoubleArray::hole_nan_as_double())); |
| 4587 __ movq(result_reg, kScratchRegister); |
| 4588 __ jmp(&done, Label::kNear); |
4468 } else { | 4589 } else { |
4469 Label heap_number; | 4590 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); |
4470 __ j(equal, &heap_number, Label::kNear); | |
4471 | |
4472 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); | |
4473 DeoptimizeIf(not_equal, env); | |
4474 | |
4475 // Convert undefined to NaN. Compute NaN as 0/0. | |
4476 __ xorps(result_reg, result_reg); | |
4477 __ divsd(result_reg, result_reg); | |
4478 __ jmp(&done, Label::kNear); | |
4479 | |
4480 __ bind(&heap_number); | |
4481 } | 4591 } |
4482 // Heap number to XMM conversion. | |
4483 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); | |
4484 if (deoptimize_on_minus_zero) { | |
4485 XMMRegister xmm_scratch = xmm0; | |
4486 __ xorps(xmm_scratch, xmm_scratch); | |
4487 __ ucomisd(xmm_scratch, result_reg); | |
4488 __ j(not_equal, &done, Label::kNear); | |
4489 __ movmskpd(kScratchRegister, result_reg); | |
4490 __ testq(kScratchRegister, Immediate(1)); | |
4491 DeoptimizeIf(not_zero, env); | |
4492 } | |
4493 __ jmp(&done, Label::kNear); | |
4494 | 4592 |
4495 // Smi to XMM conversion | 4593 // Smi to XMM conversion |
4496 __ bind(&load_smi); | 4594 __ bind(&load_smi); |
4497 __ SmiToInteger32(kScratchRegister, input_reg); | 4595 __ SmiToInteger32(kScratchRegister, input_reg); |
4498 __ cvtlsi2sd(result_reg, kScratchRegister); | 4596 __ cvtlsi2sd(result_reg, kScratchRegister); |
4499 __ bind(&done); | 4597 __ bind(&done); |
4500 } | 4598 } |
4501 | 4599 |
4502 | 4600 |
4503 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { | 4601 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { |
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4572 | 4670 |
4573 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { | 4671 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { |
4574 LOperand* input = instr->value(); | 4672 LOperand* input = instr->value(); |
4575 ASSERT(input->IsRegister()); | 4673 ASSERT(input->IsRegister()); |
4576 LOperand* result = instr->result(); | 4674 LOperand* result = instr->result(); |
4577 ASSERT(result->IsDoubleRegister()); | 4675 ASSERT(result->IsDoubleRegister()); |
4578 | 4676 |
4579 Register input_reg = ToRegister(input); | 4677 Register input_reg = ToRegister(input); |
4580 XMMRegister result_reg = ToDoubleRegister(result); | 4678 XMMRegister result_reg = ToDoubleRegister(result); |
4581 | 4679 |
| 4680 NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED; |
| 4681 HValue* value = instr->hydrogen()->value(); |
| 4682 if (value->type().IsSmi()) { |
| 4683 if (value->IsLoadKeyed()) { |
| 4684 HLoadKeyed* load = HLoadKeyed::cast(value); |
| 4685 if (load->UsesMustHandleHole()) { |
| 4686 if (load->hole_mode() == ALLOW_RETURN_HOLE) { |
| 4687 mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE; |
| 4688 } else { |
| 4689 mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE; |
| 4690 } |
| 4691 } else { |
| 4692 mode = NUMBER_CANDIDATE_IS_SMI; |
| 4693 } |
| 4694 } |
| 4695 } |
| 4696 |
4582 EmitNumberUntagD(input_reg, result_reg, | 4697 EmitNumberUntagD(input_reg, result_reg, |
4583 instr->hydrogen()->deoptimize_on_undefined(), | 4698 instr->hydrogen()->deoptimize_on_undefined(), |
4584 instr->hydrogen()->deoptimize_on_minus_zero(), | 4699 instr->hydrogen()->deoptimize_on_minus_zero(), |
4585 instr->environment()); | 4700 instr->environment(), |
| 4701 mode); |
4586 } | 4702 } |
4587 | 4703 |
4588 | 4704 |
4589 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { | 4705 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { |
4590 LOperand* input = instr->value(); | 4706 LOperand* input = instr->value(); |
4591 ASSERT(input->IsDoubleRegister()); | 4707 ASSERT(input->IsDoubleRegister()); |
4592 LOperand* result = instr->result(); | 4708 LOperand* result = instr->result(); |
4593 ASSERT(result->IsRegister()); | 4709 ASSERT(result->IsRegister()); |
4594 | 4710 |
4595 XMMRegister input_reg = ToDoubleRegister(input); | 4711 XMMRegister input_reg = ToDoubleRegister(input); |
(...skipping 291 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4887 // contained in the register pointer map. | 5003 // contained in the register pointer map. |
4888 __ Set(result, 0); | 5004 __ Set(result, 0); |
4889 | 5005 |
4890 PushSafepointRegistersScope scope(this); | 5006 PushSafepointRegistersScope scope(this); |
4891 __ Push(Smi::FromInt(instance_size)); | 5007 __ Push(Smi::FromInt(instance_size)); |
4892 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr); | 5008 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr); |
4893 __ StoreToSafepointRegisterSlot(result, rax); | 5009 __ StoreToSafepointRegisterSlot(result, rax); |
4894 } | 5010 } |
4895 | 5011 |
4896 | 5012 |
| 5013 void LCodeGen::DoAllocate(LAllocate* instr) { |
| 5014 class DeferredAllocate: public LDeferredCode { |
| 5015 public: |
| 5016 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) |
| 5017 : LDeferredCode(codegen), instr_(instr) { } |
| 5018 virtual void Generate() { codegen()->DoDeferredAllocate(instr_); } |
| 5019 virtual LInstruction* instr() { return instr_; } |
| 5020 private: |
| 5021 LAllocate* instr_; |
| 5022 }; |
| 5023 |
| 5024 DeferredAllocate* deferred = |
| 5025 new(zone()) DeferredAllocate(this, instr); |
| 5026 |
| 5027 Register size = ToRegister(instr->size()); |
| 5028 Register result = ToRegister(instr->result()); |
| 5029 Register temp = ToRegister(instr->temp()); |
| 5030 |
| 5031 HAllocate* original_instr = instr->hydrogen(); |
| 5032 if (original_instr->size()->IsConstant()) { |
| 5033 UNREACHABLE(); |
| 5034 } else { |
| 5035 // Allocate memory for the object. |
| 5036 AllocationFlags flags = TAG_OBJECT; |
| 5037 if (original_instr->MustAllocateDoubleAligned()) { |
| 5038 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); |
| 5039 } |
| 5040 __ AllocateInNewSpace(size, result, temp, no_reg, |
| 5041 deferred->entry(), flags); |
| 5042 } |
| 5043 |
| 5044 __ bind(deferred->exit()); |
| 5045 } |
| 5046 |
| 5047 |
| 5048 void LCodeGen::DoDeferredAllocate(LAllocate* instr) { |
| 5049 Register size = ToRegister(instr->size()); |
| 5050 Register result = ToRegister(instr->result()); |
| 5051 |
| 5052 // TODO(3095996): Get rid of this. For now, we need to make the |
| 5053 // result register contain a valid pointer because it is already |
| 5054 // contained in the register pointer map. |
| 5055 __ Set(result, 0); |
| 5056 |
| 5057 PushSafepointRegistersScope scope(this); |
| 5058 __ Integer32ToSmi(size, size); |
| 5059 __ push(size); |
| 5060 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr); |
| 5061 __ StoreToSafepointRegisterSlot(result, rax); |
| 5062 } |
| 5063 |
| 5064 |
4897 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { | 5065 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { |
4898 Handle<FixedArray> literals(instr->environment()->closure()->literals()); | 5066 Handle<FixedArray> literals(instr->environment()->closure()->literals()); |
4899 ElementsKind boilerplate_elements_kind = | 5067 ElementsKind boilerplate_elements_kind = |
4900 instr->hydrogen()->boilerplate_elements_kind(); | 5068 instr->hydrogen()->boilerplate_elements_kind(); |
4901 AllocationSiteMode allocation_site_mode = | 5069 AllocationSiteMode allocation_site_mode = |
4902 instr->hydrogen()->allocation_site_mode(); | 5070 instr->hydrogen()->allocation_site_mode(); |
4903 | 5071 |
4904 // Deopt if the array literal boilerplate ElementsKind is of a type different | 5072 // Deopt if the array literal boilerplate ElementsKind is of a type different |
4905 // than the expected one. The check isn't necessary if the boilerplate has | 5073 // than the expected one. The check isn't necessary if the boilerplate has |
4906 // already been converted to TERMINAL_FAST_ELEMENTS_KIND. | 5074 // already been converted to TERMINAL_FAST_ELEMENTS_KIND. |
(...skipping 667 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5574 FixedArray::kHeaderSize - kPointerSize)); | 5742 FixedArray::kHeaderSize - kPointerSize)); |
5575 __ bind(&done); | 5743 __ bind(&done); |
5576 } | 5744 } |
5577 | 5745 |
5578 | 5746 |
5579 #undef __ | 5747 #undef __ |
5580 | 5748 |
5581 } } // namespace v8::internal | 5749 } } // namespace v8::internal |
5582 | 5750 |
5583 #endif // V8_TARGET_ARCH_X64 | 5751 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |