OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "v8.h" | 5 #include "v8.h" |
6 | 6 |
7 #if V8_TARGET_ARCH_IA32 | 7 #if V8_TARGET_ARCH_X87 |
8 | 8 |
9 #include "ia32/lithium-codegen-ia32.h" | 9 #include "x87/lithium-codegen-x87.h" |
10 #include "ic.h" | 10 #include "ic.h" |
11 #include "code-stubs.h" | 11 #include "code-stubs.h" |
12 #include "deoptimizer.h" | 12 #include "deoptimizer.h" |
13 #include "stub-cache.h" | 13 #include "stub-cache.h" |
14 #include "codegen.h" | 14 #include "codegen.h" |
15 #include "hydrogen-osr.h" | 15 #include "hydrogen-osr.h" |
16 | 16 |
17 namespace v8 { | 17 namespace v8 { |
18 namespace internal { | 18 namespace internal { |
19 | 19 |
| 20 |
20 // When invoking builtins, we need to record the safepoint in the middle of | 21 // When invoking builtins, we need to record the safepoint in the middle of |
21 // the invoke instruction sequence generated by the macro assembler. | 22 // the invoke instruction sequence generated by the macro assembler. |
22 class SafepointGenerator V8_FINAL : public CallWrapper { | 23 class SafepointGenerator V8_FINAL : public CallWrapper { |
23 public: | 24 public: |
24 SafepointGenerator(LCodeGen* codegen, | 25 SafepointGenerator(LCodeGen* codegen, |
25 LPointerMap* pointers, | 26 LPointerMap* pointers, |
26 Safepoint::DeoptMode mode) | 27 Safepoint::DeoptMode mode) |
27 : codegen_(codegen), | 28 : codegen_(codegen), |
28 pointers_(pointers), | 29 pointers_(pointers), |
29 deopt_mode_(mode) {} | 30 deopt_mode_(mode) {} |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
84 #ifdef _MSC_VER | 85 #ifdef _MSC_VER |
85 void LCodeGen::MakeSureStackPagesMapped(int offset) { | 86 void LCodeGen::MakeSureStackPagesMapped(int offset) { |
86 const int kPageSize = 4 * KB; | 87 const int kPageSize = 4 * KB; |
87 for (offset -= kPageSize; offset > 0; offset -= kPageSize) { | 88 for (offset -= kPageSize; offset > 0; offset -= kPageSize) { |
88 __ mov(Operand(esp, offset), eax); | 89 __ mov(Operand(esp, offset), eax); |
89 } | 90 } |
90 } | 91 } |
91 #endif | 92 #endif |
92 | 93 |
93 | 94 |
94 void LCodeGen::SaveCallerDoubles() { | |
95 ASSERT(info()->saves_caller_doubles()); | |
96 ASSERT(NeedsEagerFrame()); | |
97 Comment(";;; Save clobbered callee double registers"); | |
98 int count = 0; | |
99 BitVector* doubles = chunk()->allocated_double_registers(); | |
100 BitVector::Iterator save_iterator(doubles); | |
101 while (!save_iterator.Done()) { | |
102 __ movsd(MemOperand(esp, count * kDoubleSize), | |
103 XMMRegister::FromAllocationIndex(save_iterator.Current())); | |
104 save_iterator.Advance(); | |
105 count++; | |
106 } | |
107 } | |
108 | |
109 | |
110 void LCodeGen::RestoreCallerDoubles() { | |
111 ASSERT(info()->saves_caller_doubles()); | |
112 ASSERT(NeedsEagerFrame()); | |
113 Comment(";;; Restore clobbered callee double registers"); | |
114 BitVector* doubles = chunk()->allocated_double_registers(); | |
115 BitVector::Iterator save_iterator(doubles); | |
116 int count = 0; | |
117 while (!save_iterator.Done()) { | |
118 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()), | |
119 MemOperand(esp, count * kDoubleSize)); | |
120 save_iterator.Advance(); | |
121 count++; | |
122 } | |
123 } | |
124 | |
125 | |
126 bool LCodeGen::GeneratePrologue() { | 95 bool LCodeGen::GeneratePrologue() { |
127 ASSERT(is_generating()); | 96 ASSERT(is_generating()); |
128 | 97 |
129 if (info()->IsOptimizing()) { | 98 if (info()->IsOptimizing()) { |
130 ProfileEntryHookStub::MaybeCallEntryHook(masm_); | 99 ProfileEntryHookStub::MaybeCallEntryHook(masm_); |
131 | 100 |
132 #ifdef DEBUG | 101 #ifdef DEBUG |
133 if (strlen(FLAG_stop_at) > 0 && | 102 if (strlen(FLAG_stop_at) > 0 && |
134 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { | 103 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { |
135 __ int3(); | 104 __ int3(); |
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
239 Comment(";;; Store dynamic frame alignment tag for spilled doubles"); | 208 Comment(";;; Store dynamic frame alignment tag for spilled doubles"); |
240 // Store dynamic frame alignment state in the first local. | 209 // Store dynamic frame alignment state in the first local. |
241 int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset; | 210 int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset; |
242 if (dynamic_frame_alignment_) { | 211 if (dynamic_frame_alignment_) { |
243 __ mov(Operand(ebp, offset), edx); | 212 __ mov(Operand(ebp, offset), edx); |
244 } else { | 213 } else { |
245 __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding)); | 214 __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding)); |
246 } | 215 } |
247 } | 216 } |
248 } | 217 } |
249 | |
250 if (info()->saves_caller_doubles()) SaveCallerDoubles(); | |
251 } | 218 } |
252 | 219 |
253 // Possibly allocate a local context. | 220 // Possibly allocate a local context. |
254 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; | 221 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; |
255 if (heap_slots > 0) { | 222 if (heap_slots > 0) { |
256 Comment(";;; Allocate local context"); | 223 Comment(";;; Allocate local context"); |
257 // Argument to NewContext is the function, which is still in edi. | 224 // Argument to NewContext is the function, which is still in edi. |
258 if (heap_slots <= FastNewContextStub::kMaximumSlots) { | 225 if (heap_slots <= FastNewContextStub::kMaximumSlots) { |
259 FastNewContextStub stub(isolate(), heap_slots); | 226 FastNewContextStub stub(isolate(), heap_slots); |
260 __ CallStub(&stub); | 227 __ CallStub(&stub); |
(...skipping 16 matching lines...) Expand all Loading... |
277 (num_parameters - 1 - i) * kPointerSize; | 244 (num_parameters - 1 - i) * kPointerSize; |
278 // Load parameter from stack. | 245 // Load parameter from stack. |
279 __ mov(eax, Operand(ebp, parameter_offset)); | 246 __ mov(eax, Operand(ebp, parameter_offset)); |
280 // Store it in the context. | 247 // Store it in the context. |
281 int context_offset = Context::SlotOffset(var->index()); | 248 int context_offset = Context::SlotOffset(var->index()); |
282 __ mov(Operand(esi, context_offset), eax); | 249 __ mov(Operand(esi, context_offset), eax); |
283 // Update the write barrier. This clobbers eax and ebx. | 250 // Update the write barrier. This clobbers eax and ebx. |
284 __ RecordWriteContextSlot(esi, | 251 __ RecordWriteContextSlot(esi, |
285 context_offset, | 252 context_offset, |
286 eax, | 253 eax, |
287 ebx, | 254 ebx); |
288 kDontSaveFPRegs); | |
289 } | 255 } |
290 } | 256 } |
291 Comment(";;; End allocate local context"); | 257 Comment(";;; End allocate local context"); |
292 } | 258 } |
293 | 259 |
294 // Trace the call. | 260 // Trace the call. |
295 if (FLAG_trace && info()->IsOptimizing()) { | 261 if (FLAG_trace && info()->IsOptimizing()) { |
296 // We have not executed any compiled code yet, so esi still holds the | 262 // We have not executed any compiled code yet, so esi still holds the |
297 // incoming context. | 263 // incoming context. |
298 __ CallRuntime(Runtime::kTraceEnter, 0); | 264 __ CallRuntime(Runtime::kTraceEnter, 0); |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
352 } | 318 } |
353 | 319 |
354 | 320 |
355 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { | 321 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { |
356 if (instr->IsCall()) { | 322 if (instr->IsCall()) { |
357 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); | 323 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); |
358 } | 324 } |
359 if (!instr->IsLazyBailout() && !instr->IsGap()) { | 325 if (!instr->IsLazyBailout() && !instr->IsGap()) { |
360 safepoints_.BumpLastLazySafepointIndex(); | 326 safepoints_.BumpLastLazySafepointIndex(); |
361 } | 327 } |
| 328 FlushX87StackIfNecessary(instr); |
| 329 } |
| 330 |
| 331 |
| 332 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { |
| 333 if (instr->IsGoto()) { |
| 334 x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr)); |
| 335 } else if (FLAG_debug_code && FLAG_enable_slow_asserts && |
| 336 !instr->IsGap() && !instr->IsReturn()) { |
| 337 if (instr->ClobbersDoubleRegisters(isolate())) { |
| 338 if (instr->HasDoubleRegisterResult()) { |
| 339 ASSERT_EQ(1, x87_stack_.depth()); |
| 340 } else { |
| 341 ASSERT_EQ(0, x87_stack_.depth()); |
| 342 } |
| 343 } |
| 344 __ VerifyX87StackDepth(x87_stack_.depth()); |
| 345 } |
362 } | 346 } |
363 | 347 |
364 | 348 |
365 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { } | |
366 | |
367 | |
368 bool LCodeGen::GenerateJumpTable() { | 349 bool LCodeGen::GenerateJumpTable() { |
369 Label needs_frame; | 350 Label needs_frame; |
370 if (jump_table_.length() > 0) { | 351 if (jump_table_.length() > 0) { |
371 Comment(";;; -------------------- Jump table --------------------"); | 352 Comment(";;; -------------------- Jump table --------------------"); |
372 } | 353 } |
373 for (int i = 0; i < jump_table_.length(); i++) { | 354 for (int i = 0; i < jump_table_.length(); i++) { |
374 __ bind(&jump_table_[i].label); | 355 __ bind(&jump_table_[i].label); |
375 Address entry = jump_table_[i].address; | 356 Address entry = jump_table_[i].address; |
376 Deoptimizer::BailoutType type = jump_table_[i].bailout_type; | 357 Deoptimizer::BailoutType type = jump_table_[i].bailout_type; |
377 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); | 358 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); |
(...skipping 23 matching lines...) Expand all Loading... |
401 __ call(&push_approx_pc); | 382 __ call(&push_approx_pc); |
402 __ bind(&push_approx_pc); | 383 __ bind(&push_approx_pc); |
403 // Push the continuation which was stashed were the ebp should | 384 // Push the continuation which was stashed were the ebp should |
404 // be. Replace it with the saved ebp. | 385 // be. Replace it with the saved ebp. |
405 __ push(MemOperand(esp, 3 * kPointerSize)); | 386 __ push(MemOperand(esp, 3 * kPointerSize)); |
406 __ mov(MemOperand(esp, 4 * kPointerSize), ebp); | 387 __ mov(MemOperand(esp, 4 * kPointerSize), ebp); |
407 __ lea(ebp, MemOperand(esp, 4 * kPointerSize)); | 388 __ lea(ebp, MemOperand(esp, 4 * kPointerSize)); |
408 __ ret(0); // Call the continuation without clobbering registers. | 389 __ ret(0); // Call the continuation without clobbering registers. |
409 } | 390 } |
410 } else { | 391 } else { |
411 if (info()->saves_caller_doubles()) RestoreCallerDoubles(); | |
412 __ call(entry, RelocInfo::RUNTIME_ENTRY); | 392 __ call(entry, RelocInfo::RUNTIME_ENTRY); |
413 } | 393 } |
414 } | 394 } |
415 return !is_aborted(); | 395 return !is_aborted(); |
416 } | 396 } |
417 | 397 |
418 | 398 |
419 bool LCodeGen::GenerateDeferredCode() { | 399 bool LCodeGen::GenerateDeferredCode() { |
420 ASSERT(is_generating()); | 400 ASSERT(is_generating()); |
421 if (deferred_.length() > 0) { | 401 if (deferred_.length() > 0) { |
422 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { | 402 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { |
423 LDeferredCode* code = deferred_[i]; | 403 LDeferredCode* code = deferred_[i]; |
| 404 X87Stack copy(code->x87_stack()); |
| 405 x87_stack_ = copy; |
424 | 406 |
425 HValue* value = | 407 HValue* value = |
426 instructions_->at(code->instruction_index())->hydrogen_value(); | 408 instructions_->at(code->instruction_index())->hydrogen_value(); |
427 RecordAndWritePosition( | 409 RecordAndWritePosition( |
428 chunk()->graph()->SourcePositionToScriptPosition(value->position())); | 410 chunk()->graph()->SourcePositionToScriptPosition(value->position())); |
429 | 411 |
430 Comment(";;; <@%d,#%d> " | 412 Comment(";;; <@%d,#%d> " |
431 "-------------------- Deferred %s --------------------", | 413 "-------------------- Deferred %s --------------------", |
432 code->instruction_index(), | 414 code->instruction_index(), |
433 code->instr()->hydrogen_value()->id(), | 415 code->instr()->hydrogen_value()->id(), |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
479 safepoints_.Emit(masm(), GetStackSlotCount()); | 461 safepoints_.Emit(masm(), GetStackSlotCount()); |
480 return !is_aborted(); | 462 return !is_aborted(); |
481 } | 463 } |
482 | 464 |
483 | 465 |
484 Register LCodeGen::ToRegister(int index) const { | 466 Register LCodeGen::ToRegister(int index) const { |
485 return Register::FromAllocationIndex(index); | 467 return Register::FromAllocationIndex(index); |
486 } | 468 } |
487 | 469 |
488 | 470 |
489 XMMRegister LCodeGen::ToDoubleRegister(int index) const { | 471 X87Register LCodeGen::ToX87Register(int index) const { |
490 return XMMRegister::FromAllocationIndex(index); | 472 return X87Register::FromAllocationIndex(index); |
491 } | 473 } |
492 | 474 |
493 | 475 |
| 476 void LCodeGen::X87LoadForUsage(X87Register reg) { |
| 477 ASSERT(x87_stack_.Contains(reg)); |
| 478 x87_stack_.Fxch(reg); |
| 479 x87_stack_.pop(); |
| 480 } |
| 481 |
| 482 |
| 483 void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) { |
| 484 ASSERT(x87_stack_.Contains(reg1)); |
| 485 ASSERT(x87_stack_.Contains(reg2)); |
| 486 x87_stack_.Fxch(reg1, 1); |
| 487 x87_stack_.Fxch(reg2); |
| 488 x87_stack_.pop(); |
| 489 x87_stack_.pop(); |
| 490 } |
| 491 |
| 492 |
| 493 void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) { |
| 494 ASSERT(is_mutable_); |
| 495 ASSERT(Contains(reg) && stack_depth_ > other_slot); |
| 496 int i = ArrayIndex(reg); |
| 497 int st = st2idx(i); |
| 498 if (st != other_slot) { |
| 499 int other_i = st2idx(other_slot); |
| 500 X87Register other = stack_[other_i]; |
| 501 stack_[other_i] = reg; |
| 502 stack_[i] = other; |
| 503 if (st == 0) { |
| 504 __ fxch(other_slot); |
| 505 } else if (other_slot == 0) { |
| 506 __ fxch(st); |
| 507 } else { |
| 508 __ fxch(st); |
| 509 __ fxch(other_slot); |
| 510 __ fxch(st); |
| 511 } |
| 512 } |
| 513 } |
| 514 |
| 515 |
| 516 int LCodeGen::X87Stack::st2idx(int pos) { |
| 517 return stack_depth_ - pos - 1; |
| 518 } |
| 519 |
| 520 |
| 521 int LCodeGen::X87Stack::ArrayIndex(X87Register reg) { |
| 522 for (int i = 0; i < stack_depth_; i++) { |
| 523 if (stack_[i].is(reg)) return i; |
| 524 } |
| 525 UNREACHABLE(); |
| 526 return -1; |
| 527 } |
| 528 |
| 529 |
| 530 bool LCodeGen::X87Stack::Contains(X87Register reg) { |
| 531 for (int i = 0; i < stack_depth_; i++) { |
| 532 if (stack_[i].is(reg)) return true; |
| 533 } |
| 534 return false; |
| 535 } |
| 536 |
| 537 |
| 538 void LCodeGen::X87Stack::Free(X87Register reg) { |
| 539 ASSERT(is_mutable_); |
| 540 ASSERT(Contains(reg)); |
| 541 int i = ArrayIndex(reg); |
| 542 int st = st2idx(i); |
| 543 if (st > 0) { |
| 544 // keep track of how fstp(i) changes the order of elements |
| 545 int tos_i = st2idx(0); |
| 546 stack_[i] = stack_[tos_i]; |
| 547 } |
| 548 pop(); |
| 549 __ fstp(st); |
| 550 } |
| 551 |
| 552 |
| 553 void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) { |
| 554 if (x87_stack_.Contains(dst)) { |
| 555 x87_stack_.Fxch(dst); |
| 556 __ fstp(0); |
| 557 } else { |
| 558 x87_stack_.push(dst); |
| 559 } |
| 560 X87Fld(src, opts); |
| 561 } |
| 562 |
| 563 |
| 564 void LCodeGen::X87Fld(Operand src, X87OperandType opts) { |
| 565 ASSERT(!src.is_reg_only()); |
| 566 switch (opts) { |
| 567 case kX87DoubleOperand: |
| 568 __ fld_d(src); |
| 569 break; |
| 570 case kX87FloatOperand: |
| 571 __ fld_s(src); |
| 572 break; |
| 573 case kX87IntOperand: |
| 574 __ fild_s(src); |
| 575 break; |
| 576 default: |
| 577 UNREACHABLE(); |
| 578 } |
| 579 } |
| 580 |
| 581 |
| 582 void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) { |
| 583 ASSERT(!dst.is_reg_only()); |
| 584 x87_stack_.Fxch(src); |
| 585 switch (opts) { |
| 586 case kX87DoubleOperand: |
| 587 __ fst_d(dst); |
| 588 break; |
| 589 case kX87IntOperand: |
| 590 __ fist_s(dst); |
| 591 break; |
| 592 default: |
| 593 UNREACHABLE(); |
| 594 } |
| 595 } |
| 596 |
| 597 |
| 598 void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) { |
| 599 ASSERT(is_mutable_); |
| 600 if (Contains(reg)) { |
| 601 Free(reg); |
| 602 } |
| 603 // Mark this register as the next register to write to |
| 604 stack_[stack_depth_] = reg; |
| 605 } |
| 606 |
| 607 |
| 608 void LCodeGen::X87Stack::CommitWrite(X87Register reg) { |
| 609 ASSERT(is_mutable_); |
| 610 // Assert the reg is prepared to write, but not on the virtual stack yet |
| 611 ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) && |
| 612 stack_depth_ < X87Register::kMaxNumAllocatableRegisters); |
| 613 stack_depth_++; |
| 614 } |
| 615 |
| 616 |
| 617 void LCodeGen::X87PrepareBinaryOp( |
| 618 X87Register left, X87Register right, X87Register result) { |
| 619 // You need to use DefineSameAsFirst for x87 instructions |
| 620 ASSERT(result.is(left)); |
| 621 x87_stack_.Fxch(right, 1); |
| 622 x87_stack_.Fxch(left); |
| 623 } |
| 624 |
| 625 |
| 626 void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) { |
| 627 if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters(isolate())) { |
| 628 bool double_inputs = instr->HasDoubleRegisterInput(); |
| 629 |
| 630 // Flush stack from tos down, since FreeX87() will mess with tos |
| 631 for (int i = stack_depth_-1; i >= 0; i--) { |
| 632 X87Register reg = stack_[i]; |
| 633 // Skip registers which contain the inputs for the next instruction |
| 634 // when flushing the stack |
| 635 if (double_inputs && instr->IsDoubleInput(reg, cgen)) { |
| 636 continue; |
| 637 } |
| 638 Free(reg); |
| 639 if (i < stack_depth_-1) i++; |
| 640 } |
| 641 } |
| 642 if (instr->IsReturn()) { |
| 643 while (stack_depth_ > 0) { |
| 644 __ fstp(0); |
| 645 stack_depth_--; |
| 646 } |
| 647 if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0); |
| 648 } |
| 649 } |
| 650 |
| 651 |
| 652 void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) { |
| 653 ASSERT(stack_depth_ <= 1); |
| 654 // If ever used for new stubs producing two pairs of doubles joined into two |
| 655 // phis this assert hits. That situation is not handled, since the two stacks |
| 656 // might have st0 and st1 swapped. |
| 657 if (current_block_id + 1 != goto_instr->block_id()) { |
| 658 // If we have a value on the x87 stack on leaving a block, it must be a |
| 659 // phi input. If the next block we compile is not the join block, we have |
| 660 // to discard the stack state. |
| 661 stack_depth_ = 0; |
| 662 } |
| 663 } |
| 664 |
| 665 |
| 666 void LCodeGen::EmitFlushX87ForDeopt() { |
| 667 // The deoptimizer does not support X87 Registers. But as long as we |
| 668 // deopt from a stub its not a problem, since we will re-materialize the |
| 669 // original stub inputs, which can't be double registers. |
| 670 ASSERT(info()->IsStub()); |
| 671 if (FLAG_debug_code && FLAG_enable_slow_asserts) { |
| 672 __ pushfd(); |
| 673 __ VerifyX87StackDepth(x87_stack_.depth()); |
| 674 __ popfd(); |
| 675 } |
| 676 for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0); |
| 677 } |
| 678 |
| 679 |
494 Register LCodeGen::ToRegister(LOperand* op) const { | 680 Register LCodeGen::ToRegister(LOperand* op) const { |
495 ASSERT(op->IsRegister()); | 681 ASSERT(op->IsRegister()); |
496 return ToRegister(op->index()); | 682 return ToRegister(op->index()); |
497 } | 683 } |
498 | 684 |
499 | 685 |
500 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { | 686 X87Register LCodeGen::ToX87Register(LOperand* op) const { |
501 ASSERT(op->IsDoubleRegister()); | 687 ASSERT(op->IsDoubleRegister()); |
502 return ToDoubleRegister(op->index()); | 688 return ToX87Register(op->index()); |
503 } | 689 } |
504 | 690 |
505 | 691 |
506 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { | 692 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { |
507 return ToRepresentation(op, Representation::Integer32()); | 693 return ToRepresentation(op, Representation::Integer32()); |
508 } | 694 } |
509 | 695 |
510 | 696 |
511 int32_t LCodeGen::ToRepresentation(LConstantOperand* op, | 697 int32_t LCodeGen::ToRepresentation(LConstantOperand* op, |
512 const Representation& r) const { | 698 const Representation& r) const { |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
550 | 736 |
551 | 737 |
552 static int ArgumentsOffsetWithoutFrame(int index) { | 738 static int ArgumentsOffsetWithoutFrame(int index) { |
553 ASSERT(index < 0); | 739 ASSERT(index < 0); |
554 return -(index + 1) * kPointerSize + kPCOnStackSize; | 740 return -(index + 1) * kPointerSize + kPCOnStackSize; |
555 } | 741 } |
556 | 742 |
557 | 743 |
558 Operand LCodeGen::ToOperand(LOperand* op) const { | 744 Operand LCodeGen::ToOperand(LOperand* op) const { |
559 if (op->IsRegister()) return Operand(ToRegister(op)); | 745 if (op->IsRegister()) return Operand(ToRegister(op)); |
560 if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op)); | 746 ASSERT(!op->IsDoubleRegister()); |
561 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); | 747 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); |
562 if (NeedsEagerFrame()) { | 748 if (NeedsEagerFrame()) { |
563 return Operand(ebp, StackSlotOffset(op->index())); | 749 return Operand(ebp, StackSlotOffset(op->index())); |
564 } else { | 750 } else { |
565 // Retrieve parameter without eager stack-frame relative to the | 751 // Retrieve parameter without eager stack-frame relative to the |
566 // stack-pointer. | 752 // stack-pointer. |
567 return Operand(esp, ArgumentsOffsetWithoutFrame(op->index())); | 753 return Operand(esp, ArgumentsOffsetWithoutFrame(op->index())); |
568 } | 754 } |
569 } | 755 } |
570 | 756 |
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
687 translation->StoreDoubleStackSlot(op->index()); | 873 translation->StoreDoubleStackSlot(op->index()); |
688 } else if (op->IsRegister()) { | 874 } else if (op->IsRegister()) { |
689 Register reg = ToRegister(op); | 875 Register reg = ToRegister(op); |
690 if (is_tagged) { | 876 if (is_tagged) { |
691 translation->StoreRegister(reg); | 877 translation->StoreRegister(reg); |
692 } else if (is_uint32) { | 878 } else if (is_uint32) { |
693 translation->StoreUint32Register(reg); | 879 translation->StoreUint32Register(reg); |
694 } else { | 880 } else { |
695 translation->StoreInt32Register(reg); | 881 translation->StoreInt32Register(reg); |
696 } | 882 } |
697 } else if (op->IsDoubleRegister()) { | |
698 XMMRegister reg = ToDoubleRegister(op); | |
699 translation->StoreDoubleRegister(reg); | |
700 } else if (op->IsConstantOperand()) { | 883 } else if (op->IsConstantOperand()) { |
701 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); | 884 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); |
702 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); | 885 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); |
703 translation->StoreLiteral(src_index); | 886 translation->StoreLiteral(src_index); |
704 } else { | 887 } else { |
705 UNREACHABLE(); | 888 UNREACHABLE(); |
706 } | 889 } |
707 } | 890 } |
708 | 891 |
709 | 892 |
(...skipping 16 matching lines...) Expand all Loading... |
726 | 909 |
727 void LCodeGen::CallCode(Handle<Code> code, | 910 void LCodeGen::CallCode(Handle<Code> code, |
728 RelocInfo::Mode mode, | 911 RelocInfo::Mode mode, |
729 LInstruction* instr) { | 912 LInstruction* instr) { |
730 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); | 913 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); |
731 } | 914 } |
732 | 915 |
733 | 916 |
734 void LCodeGen::CallRuntime(const Runtime::Function* fun, | 917 void LCodeGen::CallRuntime(const Runtime::Function* fun, |
735 int argc, | 918 int argc, |
736 LInstruction* instr, | 919 LInstruction* instr) { |
737 SaveFPRegsMode save_doubles) { | |
738 ASSERT(instr != NULL); | 920 ASSERT(instr != NULL); |
739 ASSERT(instr->HasPointerMap()); | 921 ASSERT(instr->HasPointerMap()); |
740 | 922 |
741 __ CallRuntime(fun, argc, save_doubles); | 923 __ CallRuntime(fun, argc); |
742 | 924 |
743 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); | 925 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); |
744 | 926 |
745 ASSERT(info()->is_calling()); | 927 ASSERT(info()->is_calling()); |
746 } | 928 } |
747 | 929 |
748 | 930 |
749 void LCodeGen::LoadContextFromDeferred(LOperand* context) { | 931 void LCodeGen::LoadContextFromDeferred(LOperand* context) { |
750 if (context->IsRegister()) { | 932 if (context->IsRegister()) { |
751 if (!ToRegister(context).is(esi)) { | 933 if (!ToRegister(context).is(esi)) { |
752 __ mov(esi, ToRegister(context)); | 934 __ mov(esi, ToRegister(context)); |
753 } | 935 } |
754 } else if (context->IsStackSlot()) { | 936 } else if (context->IsStackSlot()) { |
755 __ mov(esi, ToOperand(context)); | 937 __ mov(esi, ToOperand(context)); |
756 } else if (context->IsConstantOperand()) { | 938 } else if (context->IsConstantOperand()) { |
757 HConstant* constant = | 939 HConstant* constant = |
758 chunk_->LookupConstant(LConstantOperand::cast(context)); | 940 chunk_->LookupConstant(LConstantOperand::cast(context)); |
759 __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate()))); | 941 __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate()))); |
760 } else { | 942 } else { |
761 UNREACHABLE(); | 943 UNREACHABLE(); |
762 } | 944 } |
763 } | 945 } |
764 | 946 |
765 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, | 947 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, |
766 int argc, | 948 int argc, |
767 LInstruction* instr, | 949 LInstruction* instr, |
768 LOperand* context) { | 950 LOperand* context) { |
769 LoadContextFromDeferred(context); | 951 LoadContextFromDeferred(context); |
770 | 952 |
771 __ CallRuntimeSaveDoubles(id); | 953 __ CallRuntime(id); |
772 RecordSafepointWithRegisters( | 954 RecordSafepointWithRegisters( |
773 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); | 955 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); |
774 | 956 |
775 ASSERT(info()->is_calling()); | 957 ASSERT(info()->is_calling()); |
776 } | 958 } |
777 | 959 |
778 | 960 |
779 void LCodeGen::RegisterEnvironmentForDeoptimization( | 961 void LCodeGen::RegisterEnvironmentForDeoptimization( |
780 LEnvironment* environment, Safepoint::DeoptMode mode) { | 962 LEnvironment* environment, Safepoint::DeoptMode mode) { |
781 environment->set_has_been_used(); | 963 environment->set_has_been_used(); |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
841 __ pop(eax); | 1023 __ pop(eax); |
842 __ popfd(); | 1024 __ popfd(); |
843 ASSERT(frame_is_built_); | 1025 ASSERT(frame_is_built_); |
844 __ call(entry, RelocInfo::RUNTIME_ENTRY); | 1026 __ call(entry, RelocInfo::RUNTIME_ENTRY); |
845 __ bind(&no_deopt); | 1027 __ bind(&no_deopt); |
846 __ mov(Operand::StaticVariable(count), eax); | 1028 __ mov(Operand::StaticVariable(count), eax); |
847 __ pop(eax); | 1029 __ pop(eax); |
848 __ popfd(); | 1030 __ popfd(); |
849 } | 1031 } |
850 | 1032 |
| 1033 // Before Instructions which can deopt, we normally flush the x87 stack. But |
| 1034 // we can have inputs or outputs of the current instruction on the stack, |
| 1035 // thus we need to flush them here from the physical stack to leave it in a |
| 1036 // consistent state. |
| 1037 if (x87_stack_.depth() > 0) { |
| 1038 Label done; |
| 1039 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); |
| 1040 EmitFlushX87ForDeopt(); |
| 1041 __ bind(&done); |
| 1042 } |
| 1043 |
851 if (info()->ShouldTrapOnDeopt()) { | 1044 if (info()->ShouldTrapOnDeopt()) { |
852 Label done; | 1045 Label done; |
853 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); | 1046 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); |
854 __ int3(); | 1047 __ int3(); |
855 __ bind(&done); | 1048 __ bind(&done); |
856 } | 1049 } |
857 | 1050 |
858 ASSERT(info()->IsStub() || frame_is_built_); | 1051 ASSERT(info()->IsStub() || frame_is_built_); |
859 if (cc == no_condition && frame_is_built_) { | 1052 if (cc == no_condition && frame_is_built_) { |
860 __ call(entry, RelocInfo::RUNTIME_ENTRY); | 1053 __ call(entry, RelocInfo::RUNTIME_ENTRY); |
(...skipping 840 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1701 } | 1894 } |
1702 | 1895 |
1703 | 1896 |
1704 void LCodeGen::DoConstantD(LConstantD* instr) { | 1897 void LCodeGen::DoConstantD(LConstantD* instr) { |
1705 double v = instr->value(); | 1898 double v = instr->value(); |
1706 uint64_t int_val = BitCast<uint64_t, double>(v); | 1899 uint64_t int_val = BitCast<uint64_t, double>(v); |
1707 int32_t lower = static_cast<int32_t>(int_val); | 1900 int32_t lower = static_cast<int32_t>(int_val); |
1708 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); | 1901 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); |
1709 ASSERT(instr->result()->IsDoubleRegister()); | 1902 ASSERT(instr->result()->IsDoubleRegister()); |
1710 | 1903 |
1711 XMMRegister res = ToDoubleRegister(instr->result()); | 1904 __ push(Immediate(upper)); |
1712 if (int_val == 0) { | 1905 __ push(Immediate(lower)); |
1713 __ xorps(res, res); | 1906 X87Register reg = ToX87Register(instr->result()); |
1714 } else { | 1907 X87Mov(reg, Operand(esp, 0)); |
1715 Register temp = ToRegister(instr->temp()); | 1908 __ add(Operand(esp), Immediate(kDoubleSize)); |
1716 if (CpuFeatures::IsSupported(SSE4_1)) { | |
1717 CpuFeatureScope scope2(masm(), SSE4_1); | |
1718 if (lower != 0) { | |
1719 __ Move(temp, Immediate(lower)); | |
1720 __ movd(res, Operand(temp)); | |
1721 __ Move(temp, Immediate(upper)); | |
1722 __ pinsrd(res, Operand(temp), 1); | |
1723 } else { | |
1724 __ xorps(res, res); | |
1725 __ Move(temp, Immediate(upper)); | |
1726 __ pinsrd(res, Operand(temp), 1); | |
1727 } | |
1728 } else { | |
1729 __ Move(temp, Immediate(upper)); | |
1730 __ movd(res, Operand(temp)); | |
1731 __ psllq(res, 32); | |
1732 if (lower != 0) { | |
1733 XMMRegister xmm_scratch = double_scratch0(); | |
1734 __ Move(temp, Immediate(lower)); | |
1735 __ movd(xmm_scratch, Operand(temp)); | |
1736 __ orps(res, xmm_scratch); | |
1737 } | |
1738 } | |
1739 } | |
1740 } | 1909 } |
1741 | 1910 |
1742 | 1911 |
1743 void LCodeGen::DoConstantE(LConstantE* instr) { | 1912 void LCodeGen::DoConstantE(LConstantE* instr) { |
1744 __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value())); | 1913 __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value())); |
1745 } | 1914 } |
1746 | 1915 |
1747 | 1916 |
1748 void LCodeGen::DoConstantT(LConstantT* instr) { | 1917 void LCodeGen::DoConstantT(LConstantT* instr) { |
1749 Register reg = ToRegister(instr->result()); | 1918 Register reg = ToRegister(instr->result()); |
(...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1927 __ mov(left_op, immediate); | 2096 __ mov(left_op, immediate); |
1928 } else { | 2097 } else { |
1929 Register left_reg = ToRegister(left); | 2098 Register left_reg = ToRegister(left); |
1930 Operand right_op = ToOperand(right); | 2099 Operand right_op = ToOperand(right); |
1931 __ cmp(left_reg, right_op); | 2100 __ cmp(left_reg, right_op); |
1932 __ j(condition, &return_left, Label::kNear); | 2101 __ j(condition, &return_left, Label::kNear); |
1933 __ mov(left_reg, right_op); | 2102 __ mov(left_reg, right_op); |
1934 } | 2103 } |
1935 __ bind(&return_left); | 2104 __ bind(&return_left); |
1936 } else { | 2105 } else { |
1937 ASSERT(instr->hydrogen()->representation().IsDouble()); | 2106 // TODO(weiliang) use X87 for double representation. |
1938 Label check_nan_left, check_zero, return_left, return_right; | 2107 UNIMPLEMENTED(); |
1939 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above; | |
1940 XMMRegister left_reg = ToDoubleRegister(left); | |
1941 XMMRegister right_reg = ToDoubleRegister(right); | |
1942 __ ucomisd(left_reg, right_reg); | |
1943 __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN. | |
1944 __ j(equal, &check_zero, Label::kNear); // left == right. | |
1945 __ j(condition, &return_left, Label::kNear); | |
1946 __ jmp(&return_right, Label::kNear); | |
1947 | |
1948 __ bind(&check_zero); | |
1949 XMMRegister xmm_scratch = double_scratch0(); | |
1950 __ xorps(xmm_scratch, xmm_scratch); | |
1951 __ ucomisd(left_reg, xmm_scratch); | |
1952 __ j(not_equal, &return_left, Label::kNear); // left == right != 0. | |
1953 // At this point, both left and right are either 0 or -0. | |
1954 if (operation == HMathMinMax::kMathMin) { | |
1955 __ orpd(left_reg, right_reg); | |
1956 } else { | |
1957 // Since we operate on +0 and/or -0, addsd and andsd have the same effect. | |
1958 __ addsd(left_reg, right_reg); | |
1959 } | |
1960 __ jmp(&return_left, Label::kNear); | |
1961 | |
1962 __ bind(&check_nan_left); | |
1963 __ ucomisd(left_reg, left_reg); // NaN check. | |
1964 __ j(parity_even, &return_left, Label::kNear); // left == NaN. | |
1965 __ bind(&return_right); | |
1966 __ movaps(left_reg, right_reg); | |
1967 | |
1968 __ bind(&return_left); | |
1969 } | 2108 } |
1970 } | 2109 } |
1971 | 2110 |
1972 | 2111 |
1973 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { | 2112 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { |
1974 XMMRegister left = ToDoubleRegister(instr->left()); | 2113 X87Register left = ToX87Register(instr->left()); |
1975 XMMRegister right = ToDoubleRegister(instr->right()); | 2114 X87Register right = ToX87Register(instr->right()); |
1976 XMMRegister result = ToDoubleRegister(instr->result()); | 2115 X87Register result = ToX87Register(instr->result()); |
| 2116 if (instr->op() != Token::MOD) { |
| 2117 X87PrepareBinaryOp(left, right, result); |
| 2118 } |
1977 switch (instr->op()) { | 2119 switch (instr->op()) { |
1978 case Token::ADD: | 2120 case Token::ADD: |
1979 __ addsd(left, right); | 2121 __ fadd_i(1); |
1980 break; | 2122 break; |
1981 case Token::SUB: | 2123 case Token::SUB: |
1982 __ subsd(left, right); | 2124 __ fsub_i(1); |
1983 break; | 2125 break; |
1984 case Token::MUL: | 2126 case Token::MUL: |
1985 __ mulsd(left, right); | 2127 __ fmul_i(1); |
1986 break; | 2128 break; |
1987 case Token::DIV: | 2129 case Token::DIV: |
1988 __ divsd(left, right); | 2130 __ fdiv_i(1); |
1989 // Don't delete this mov. It may improve performance on some CPUs, | |
1990 // when there is a mulsd depending on the result | |
1991 __ movaps(left, left); | |
1992 break; | 2131 break; |
1993 case Token::MOD: { | 2132 case Token::MOD: { |
1994 // Pass two doubles as arguments on the stack. | 2133 // Pass two doubles as arguments on the stack. |
1995 __ PrepareCallCFunction(4, eax); | 2134 __ PrepareCallCFunction(4, eax); |
1996 __ movsd(Operand(esp, 0 * kDoubleSize), left); | 2135 X87Mov(Operand(esp, 1 * kDoubleSize), right); |
1997 __ movsd(Operand(esp, 1 * kDoubleSize), right); | 2136 X87Mov(Operand(esp, 0), left); |
| 2137 X87Free(right); |
| 2138 ASSERT(left.is(result)); |
| 2139 X87PrepareToWrite(result); |
1998 __ CallCFunction( | 2140 __ CallCFunction( |
1999 ExternalReference::mod_two_doubles_operation(isolate()), | 2141 ExternalReference::mod_two_doubles_operation(isolate()), |
2000 4); | 2142 4); |
2001 | 2143 |
2002 // Return value is in st(0) on ia32. | 2144 // Return value is in st(0) on ia32. |
2003 // Store it into the result register. | 2145 X87CommitWrite(result); |
2004 __ sub(Operand(esp), Immediate(kDoubleSize)); | |
2005 __ fstp_d(Operand(esp, 0)); | |
2006 __ movsd(result, Operand(esp, 0)); | |
2007 __ add(Operand(esp), Immediate(kDoubleSize)); | |
2008 break; | 2146 break; |
2009 } | 2147 } |
2010 default: | 2148 default: |
2011 UNREACHABLE(); | 2149 UNREACHABLE(); |
2012 break; | 2150 break; |
2013 } | 2151 } |
2014 } | 2152 } |
2015 | 2153 |
2016 | 2154 |
2017 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { | 2155 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2056 } | 2194 } |
2057 | 2195 |
2058 | 2196 |
2059 void LCodeGen::DoBranch(LBranch* instr) { | 2197 void LCodeGen::DoBranch(LBranch* instr) { |
2060 Representation r = instr->hydrogen()->value()->representation(); | 2198 Representation r = instr->hydrogen()->value()->representation(); |
2061 if (r.IsSmiOrInteger32()) { | 2199 if (r.IsSmiOrInteger32()) { |
2062 Register reg = ToRegister(instr->value()); | 2200 Register reg = ToRegister(instr->value()); |
2063 __ test(reg, Operand(reg)); | 2201 __ test(reg, Operand(reg)); |
2064 EmitBranch(instr, not_zero); | 2202 EmitBranch(instr, not_zero); |
2065 } else if (r.IsDouble()) { | 2203 } else if (r.IsDouble()) { |
2066 ASSERT(!info()->IsStub()); | 2204 UNREACHABLE(); |
2067 XMMRegister reg = ToDoubleRegister(instr->value()); | |
2068 XMMRegister xmm_scratch = double_scratch0(); | |
2069 __ xorps(xmm_scratch, xmm_scratch); | |
2070 __ ucomisd(reg, xmm_scratch); | |
2071 EmitBranch(instr, not_equal); | |
2072 } else { | 2205 } else { |
2073 ASSERT(r.IsTagged()); | 2206 ASSERT(r.IsTagged()); |
2074 Register reg = ToRegister(instr->value()); | 2207 Register reg = ToRegister(instr->value()); |
2075 HType type = instr->hydrogen()->value()->type(); | 2208 HType type = instr->hydrogen()->value()->type(); |
2076 if (type.IsBoolean()) { | 2209 if (type.IsBoolean()) { |
2077 ASSERT(!info()->IsStub()); | 2210 ASSERT(!info()->IsStub()); |
2078 __ cmp(reg, factory()->true_value()); | 2211 __ cmp(reg, factory()->true_value()); |
2079 EmitBranch(instr, equal); | 2212 EmitBranch(instr, equal); |
2080 } else if (type.IsSmi()) { | 2213 } else if (type.IsSmi()) { |
2081 ASSERT(!info()->IsStub()); | 2214 ASSERT(!info()->IsStub()); |
2082 __ test(reg, Operand(reg)); | 2215 __ test(reg, Operand(reg)); |
2083 EmitBranch(instr, not_equal); | 2216 EmitBranch(instr, not_equal); |
2084 } else if (type.IsJSArray()) { | 2217 } else if (type.IsJSArray()) { |
2085 ASSERT(!info()->IsStub()); | 2218 ASSERT(!info()->IsStub()); |
2086 EmitBranch(instr, no_condition); | 2219 EmitBranch(instr, no_condition); |
2087 } else if (type.IsHeapNumber()) { | 2220 } else if (type.IsHeapNumber()) { |
2088 ASSERT(!info()->IsStub()); | 2221 UNREACHABLE(); |
2089 XMMRegister xmm_scratch = double_scratch0(); | |
2090 __ xorps(xmm_scratch, xmm_scratch); | |
2091 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); | |
2092 EmitBranch(instr, not_equal); | |
2093 } else if (type.IsString()) { | 2222 } else if (type.IsString()) { |
2094 ASSERT(!info()->IsStub()); | 2223 ASSERT(!info()->IsStub()); |
2095 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); | 2224 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); |
2096 EmitBranch(instr, not_equal); | 2225 EmitBranch(instr, not_equal); |
2097 } else { | 2226 } else { |
2098 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); | 2227 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); |
2099 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); | 2228 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); |
2100 | 2229 |
2101 if (expected.Contains(ToBooleanStub::UNDEFINED)) { | 2230 if (expected.Contains(ToBooleanStub::UNDEFINED)) { |
2102 // undefined -> false. | 2231 // undefined -> false. |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2164 __ CmpInstanceType(map, SYMBOL_TYPE); | 2293 __ CmpInstanceType(map, SYMBOL_TYPE); |
2165 __ j(equal, instr->TrueLabel(chunk_)); | 2294 __ j(equal, instr->TrueLabel(chunk_)); |
2166 } | 2295 } |
2167 | 2296 |
2168 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { | 2297 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { |
2169 // heap number -> false iff +0, -0, or NaN. | 2298 // heap number -> false iff +0, -0, or NaN. |
2170 Label not_heap_number; | 2299 Label not_heap_number; |
2171 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), | 2300 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), |
2172 factory()->heap_number_map()); | 2301 factory()->heap_number_map()); |
2173 __ j(not_equal, ¬_heap_number, Label::kNear); | 2302 __ j(not_equal, ¬_heap_number, Label::kNear); |
2174 XMMRegister xmm_scratch = double_scratch0(); | 2303 __ fldz(); |
2175 __ xorps(xmm_scratch, xmm_scratch); | 2304 __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset)); |
2176 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); | 2305 __ FCmp(); |
2177 __ j(zero, instr->FalseLabel(chunk_)); | 2306 __ j(zero, instr->FalseLabel(chunk_)); |
2178 __ jmp(instr->TrueLabel(chunk_)); | 2307 __ jmp(instr->TrueLabel(chunk_)); |
2179 __ bind(¬_heap_number); | 2308 __ bind(¬_heap_number); |
2180 } | 2309 } |
2181 | 2310 |
2182 if (!expected.IsGeneric()) { | 2311 if (!expected.IsGeneric()) { |
2183 // We've seen something for the first time -> deopt. | 2312 // We've seen something for the first time -> deopt. |
2184 // This can only happen if we are not generic already. | 2313 // This can only happen if we are not generic already. |
2185 DeoptimizeIf(no_condition, instr->environment()); | 2314 DeoptimizeIf(no_condition, instr->environment()); |
2186 } | 2315 } |
2187 } | 2316 } |
2188 } | 2317 } |
2189 } | 2318 } |
2190 | 2319 |
2191 | 2320 |
2192 void LCodeGen::EmitGoto(int block) { | 2321 void LCodeGen::EmitGoto(int block) { |
2193 if (!IsNextEmittedBlock(block)) { | 2322 if (!IsNextEmittedBlock(block)) { |
2194 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); | 2323 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); |
2195 } | 2324 } |
2196 } | 2325 } |
2197 | 2326 |
2198 | 2327 |
| 2328 void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) { |
| 2329 } |
| 2330 |
| 2331 |
2199 void LCodeGen::DoGoto(LGoto* instr) { | 2332 void LCodeGen::DoGoto(LGoto* instr) { |
2200 EmitGoto(instr->block_id()); | 2333 EmitGoto(instr->block_id()); |
2201 } | 2334 } |
2202 | 2335 |
2203 | 2336 |
2204 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { | 2337 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { |
2205 Condition cond = no_condition; | 2338 Condition cond = no_condition; |
2206 switch (op) { | 2339 switch (op) { |
2207 case Token::EQ: | 2340 case Token::EQ: |
2208 case Token::EQ_STRICT: | 2341 case Token::EQ_STRICT: |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2242 | 2375 |
2243 if (left->IsConstantOperand() && right->IsConstantOperand()) { | 2376 if (left->IsConstantOperand() && right->IsConstantOperand()) { |
2244 // We can statically evaluate the comparison. | 2377 // We can statically evaluate the comparison. |
2245 double left_val = ToDouble(LConstantOperand::cast(left)); | 2378 double left_val = ToDouble(LConstantOperand::cast(left)); |
2246 double right_val = ToDouble(LConstantOperand::cast(right)); | 2379 double right_val = ToDouble(LConstantOperand::cast(right)); |
2247 int next_block = EvalComparison(instr->op(), left_val, right_val) ? | 2380 int next_block = EvalComparison(instr->op(), left_val, right_val) ? |
2248 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); | 2381 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); |
2249 EmitGoto(next_block); | 2382 EmitGoto(next_block); |
2250 } else { | 2383 } else { |
2251 if (instr->is_double()) { | 2384 if (instr->is_double()) { |
2252 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); | 2385 X87LoadForUsage(ToX87Register(right), ToX87Register(left)); |
| 2386 __ FCmp(); |
2253 // Don't base result on EFLAGS when a NaN is involved. Instead | 2387 // Don't base result on EFLAGS when a NaN is involved. Instead |
2254 // jump to the false block. | 2388 // jump to the false block. |
2255 __ j(parity_even, instr->FalseLabel(chunk_)); | 2389 __ j(parity_even, instr->FalseLabel(chunk_)); |
2256 } else { | 2390 } else { |
2257 if (right->IsConstantOperand()) { | 2391 if (right->IsConstantOperand()) { |
2258 __ cmp(ToOperand(left), | 2392 __ cmp(ToOperand(left), |
2259 ToImmediate(right, instr->hydrogen()->representation())); | 2393 ToImmediate(right, instr->hydrogen()->representation())); |
2260 } else if (left->IsConstantOperand()) { | 2394 } else if (left->IsConstantOperand()) { |
2261 __ cmp(ToOperand(right), | 2395 __ cmp(ToOperand(right), |
2262 ToImmediate(left, instr->hydrogen()->representation())); | 2396 ToImmediate(left, instr->hydrogen()->representation())); |
(...skipping 23 matching lines...) Expand all Loading... |
2286 | 2420 |
2287 | 2421 |
2288 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { | 2422 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { |
2289 if (instr->hydrogen()->representation().IsTagged()) { | 2423 if (instr->hydrogen()->representation().IsTagged()) { |
2290 Register input_reg = ToRegister(instr->object()); | 2424 Register input_reg = ToRegister(instr->object()); |
2291 __ cmp(input_reg, factory()->the_hole_value()); | 2425 __ cmp(input_reg, factory()->the_hole_value()); |
2292 EmitBranch(instr, equal); | 2426 EmitBranch(instr, equal); |
2293 return; | 2427 return; |
2294 } | 2428 } |
2295 | 2429 |
2296 XMMRegister input_reg = ToDoubleRegister(instr->object()); | 2430 // Put the value to the top of stack |
2297 __ ucomisd(input_reg, input_reg); | 2431 X87Register src = ToX87Register(instr->object()); |
2298 EmitFalseBranch(instr, parity_odd); | 2432 X87LoadForUsage(src); |
| 2433 __ fld(0); |
| 2434 __ fld(0); |
| 2435 __ FCmp(); |
| 2436 Label ok; |
| 2437 __ j(parity_even, &ok, Label::kNear); |
| 2438 __ fstp(0); |
| 2439 EmitFalseBranch(instr, no_condition); |
| 2440 __ bind(&ok); |
| 2441 |
2299 | 2442 |
2300 __ sub(esp, Immediate(kDoubleSize)); | 2443 __ sub(esp, Immediate(kDoubleSize)); |
2301 __ movsd(MemOperand(esp, 0), input_reg); | 2444 __ fstp_d(MemOperand(esp, 0)); |
2302 | 2445 |
2303 __ add(esp, Immediate(kDoubleSize)); | 2446 __ add(esp, Immediate(kDoubleSize)); |
2304 int offset = sizeof(kHoleNanUpper32); | 2447 int offset = sizeof(kHoleNanUpper32); |
2305 __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32)); | 2448 __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32)); |
2306 EmitBranch(instr, equal); | 2449 EmitBranch(instr, equal); |
2307 } | 2450 } |
2308 | 2451 |
2309 | 2452 |
2310 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { | 2453 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { |
2311 Representation rep = instr->hydrogen()->value()->representation(); | 2454 Representation rep = instr->hydrogen()->value()->representation(); |
2312 ASSERT(!rep.IsInteger32()); | 2455 ASSERT(!rep.IsInteger32()); |
2313 Register scratch = ToRegister(instr->temp()); | |
2314 | 2456 |
2315 if (rep.IsDouble()) { | 2457 if (rep.IsDouble()) { |
2316 XMMRegister value = ToDoubleRegister(instr->value()); | 2458 UNREACHABLE(); |
2317 XMMRegister xmm_scratch = double_scratch0(); | |
2318 __ xorps(xmm_scratch, xmm_scratch); | |
2319 __ ucomisd(xmm_scratch, value); | |
2320 EmitFalseBranch(instr, not_equal); | |
2321 __ movmskpd(scratch, value); | |
2322 __ test(scratch, Immediate(1)); | |
2323 EmitBranch(instr, not_zero); | |
2324 } else { | 2459 } else { |
2325 Register value = ToRegister(instr->value()); | 2460 Register value = ToRegister(instr->value()); |
2326 Handle<Map> map = masm()->isolate()->factory()->heap_number_map(); | 2461 Handle<Map> map = masm()->isolate()->factory()->heap_number_map(); |
2327 __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK); | 2462 __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK); |
2328 __ cmp(FieldOperand(value, HeapNumber::kExponentOffset), | 2463 __ cmp(FieldOperand(value, HeapNumber::kExponentOffset), |
2329 Immediate(0x1)); | 2464 Immediate(0x1)); |
2330 EmitFalseBranch(instr, no_overflow); | 2465 EmitFalseBranch(instr, no_overflow); |
2331 __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset), | 2466 __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset), |
2332 Immediate(0x00000000)); | 2467 Immediate(0x00000000)); |
2333 EmitBranch(instr, equal); | 2468 EmitBranch(instr, equal); |
(...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2608 __ bind(&true_value); | 2743 __ bind(&true_value); |
2609 __ mov(ToRegister(instr->result()), factory()->true_value()); | 2744 __ mov(ToRegister(instr->result()), factory()->true_value()); |
2610 __ bind(&done); | 2745 __ bind(&done); |
2611 } | 2746 } |
2612 | 2747 |
2613 | 2748 |
2614 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { | 2749 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { |
2615 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode { | 2750 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode { |
2616 public: | 2751 public: |
2617 DeferredInstanceOfKnownGlobal(LCodeGen* codegen, | 2752 DeferredInstanceOfKnownGlobal(LCodeGen* codegen, |
2618 LInstanceOfKnownGlobal* instr) | 2753 LInstanceOfKnownGlobal* instr, |
2619 : LDeferredCode(codegen), instr_(instr) { } | 2754 const X87Stack& x87_stack) |
| 2755 : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
2620 virtual void Generate() V8_OVERRIDE { | 2756 virtual void Generate() V8_OVERRIDE { |
2621 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_); | 2757 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_); |
2622 } | 2758 } |
2623 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 2759 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
2624 Label* map_check() { return &map_check_; } | 2760 Label* map_check() { return &map_check_; } |
2625 private: | 2761 private: |
2626 LInstanceOfKnownGlobal* instr_; | 2762 LInstanceOfKnownGlobal* instr_; |
2627 Label map_check_; | 2763 Label map_check_; |
2628 }; | 2764 }; |
2629 | 2765 |
2630 DeferredInstanceOfKnownGlobal* deferred; | 2766 DeferredInstanceOfKnownGlobal* deferred; |
2631 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); | 2767 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr, x87_stack_); |
2632 | 2768 |
2633 Label done, false_result; | 2769 Label done, false_result; |
2634 Register object = ToRegister(instr->value()); | 2770 Register object = ToRegister(instr->value()); |
2635 Register temp = ToRegister(instr->temp()); | 2771 Register temp = ToRegister(instr->temp()); |
2636 | 2772 |
2637 // A Smi is not an instance of anything. | 2773 // A Smi is not an instance of anything. |
2638 __ JumpIfSmi(object, &false_result, Label::kNear); | 2774 __ JumpIfSmi(object, &false_result, Label::kNear); |
2639 | 2775 |
2640 // This is the inlined call site instanceof cache. The two occurences of the | 2776 // This is the inlined call site instanceof cache. The two occurences of the |
2641 // hole value will be patched to the last map/result pair generated by the | 2777 // hole value will be patched to the last map/result pair generated by the |
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2770 void LCodeGen::DoReturn(LReturn* instr) { | 2906 void LCodeGen::DoReturn(LReturn* instr) { |
2771 if (FLAG_trace && info()->IsOptimizing()) { | 2907 if (FLAG_trace && info()->IsOptimizing()) { |
2772 // Preserve the return value on the stack and rely on the runtime call | 2908 // Preserve the return value on the stack and rely on the runtime call |
2773 // to return the value in the same register. We're leaving the code | 2909 // to return the value in the same register. We're leaving the code |
2774 // managed by the register allocator and tearing down the frame, it's | 2910 // managed by the register allocator and tearing down the frame, it's |
2775 // safe to write to the context register. | 2911 // safe to write to the context register. |
2776 __ push(eax); | 2912 __ push(eax); |
2777 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); | 2913 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); |
2778 __ CallRuntime(Runtime::kTraceExit, 1); | 2914 __ CallRuntime(Runtime::kTraceExit, 1); |
2779 } | 2915 } |
2780 if (info()->saves_caller_doubles()) RestoreCallerDoubles(); | |
2781 if (dynamic_frame_alignment_) { | 2916 if (dynamic_frame_alignment_) { |
2782 // Fetch the state of the dynamic frame alignment. | 2917 // Fetch the state of the dynamic frame alignment. |
2783 __ mov(edx, Operand(ebp, | 2918 __ mov(edx, Operand(ebp, |
2784 JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); | 2919 JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); |
2785 } | 2920 } |
2786 int no_frame_start = -1; | 2921 int no_frame_start = -1; |
2787 if (NeedsEagerFrame()) { | 2922 if (NeedsEagerFrame()) { |
2788 __ mov(esp, ebp); | 2923 __ mov(esp, ebp); |
2789 __ pop(ebp); | 2924 __ pop(ebp); |
2790 no_frame_start = masm_->pc_offset(); | 2925 no_frame_start = masm_->pc_offset(); |
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2885 if (instr->hydrogen()->NeedsWriteBarrier()) { | 3020 if (instr->hydrogen()->NeedsWriteBarrier()) { |
2886 SmiCheck check_needed = | 3021 SmiCheck check_needed = |
2887 instr->hydrogen()->value()->IsHeapObject() | 3022 instr->hydrogen()->value()->IsHeapObject() |
2888 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 3023 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
2889 Register temp = ToRegister(instr->temp()); | 3024 Register temp = ToRegister(instr->temp()); |
2890 int offset = Context::SlotOffset(instr->slot_index()); | 3025 int offset = Context::SlotOffset(instr->slot_index()); |
2891 __ RecordWriteContextSlot(context, | 3026 __ RecordWriteContextSlot(context, |
2892 offset, | 3027 offset, |
2893 value, | 3028 value, |
2894 temp, | 3029 temp, |
2895 kSaveFPRegs, | |
2896 EMIT_REMEMBERED_SET, | 3030 EMIT_REMEMBERED_SET, |
2897 check_needed); | 3031 check_needed); |
2898 } | 3032 } |
2899 | 3033 |
2900 __ bind(&skip_assignment); | 3034 __ bind(&skip_assignment); |
2901 } | 3035 } |
2902 | 3036 |
2903 | 3037 |
2904 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { | 3038 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { |
2905 HObjectAccess access = instr->hydrogen()->access(); | 3039 HObjectAccess access = instr->hydrogen()->access(); |
2906 int offset = access.offset(); | 3040 int offset = access.offset(); |
2907 | 3041 |
2908 if (access.IsExternalMemory()) { | 3042 if (access.IsExternalMemory()) { |
2909 Register result = ToRegister(instr->result()); | 3043 Register result = ToRegister(instr->result()); |
2910 MemOperand operand = instr->object()->IsConstantOperand() | 3044 MemOperand operand = instr->object()->IsConstantOperand() |
2911 ? MemOperand::StaticVariable(ToExternalReference( | 3045 ? MemOperand::StaticVariable(ToExternalReference( |
2912 LConstantOperand::cast(instr->object()))) | 3046 LConstantOperand::cast(instr->object()))) |
2913 : MemOperand(ToRegister(instr->object()), offset); | 3047 : MemOperand(ToRegister(instr->object()), offset); |
2914 __ Load(result, operand, access.representation()); | 3048 __ Load(result, operand, access.representation()); |
2915 return; | 3049 return; |
2916 } | 3050 } |
2917 | 3051 |
2918 Register object = ToRegister(instr->object()); | 3052 Register object = ToRegister(instr->object()); |
2919 if (instr->hydrogen()->representation().IsDouble()) { | 3053 if (instr->hydrogen()->representation().IsDouble()) { |
2920 XMMRegister result = ToDoubleRegister(instr->result()); | 3054 X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset)); |
2921 __ movsd(result, FieldOperand(object, offset)); | |
2922 return; | 3055 return; |
2923 } | 3056 } |
2924 | 3057 |
2925 Register result = ToRegister(instr->result()); | 3058 Register result = ToRegister(instr->result()); |
2926 if (!access.IsInobject()) { | 3059 if (!access.IsInobject()) { |
2927 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset)); | 3060 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset)); |
2928 object = result; | 3061 object = result; |
2929 } | 3062 } |
2930 __ Load(result, FieldOperand(object, offset), access.representation()); | 3063 __ Load(result, FieldOperand(object, offset), access.representation()); |
2931 } | 3064 } |
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3038 } | 3171 } |
3039 Operand operand(BuildFastArrayOperand( | 3172 Operand operand(BuildFastArrayOperand( |
3040 instr->elements(), | 3173 instr->elements(), |
3041 key, | 3174 key, |
3042 instr->hydrogen()->key()->representation(), | 3175 instr->hydrogen()->key()->representation(), |
3043 elements_kind, | 3176 elements_kind, |
3044 0, | 3177 0, |
3045 instr->additional_index())); | 3178 instr->additional_index())); |
3046 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || | 3179 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || |
3047 elements_kind == FLOAT32_ELEMENTS) { | 3180 elements_kind == FLOAT32_ELEMENTS) { |
3048 XMMRegister result(ToDoubleRegister(instr->result())); | 3181 X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand); |
3049 __ movss(result, operand); | |
3050 __ cvtss2sd(result, result); | |
3051 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS || | 3182 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS || |
3052 elements_kind == FLOAT64_ELEMENTS) { | 3183 elements_kind == FLOAT64_ELEMENTS) { |
3053 __ movsd(ToDoubleRegister(instr->result()), operand); | 3184 X87Mov(ToX87Register(instr->result()), operand); |
3054 } else { | 3185 } else { |
3055 Register result(ToRegister(instr->result())); | 3186 Register result(ToRegister(instr->result())); |
3056 switch (elements_kind) { | 3187 switch (elements_kind) { |
3057 case EXTERNAL_INT8_ELEMENTS: | 3188 case EXTERNAL_INT8_ELEMENTS: |
3058 case INT8_ELEMENTS: | 3189 case INT8_ELEMENTS: |
3059 __ movsx_b(result, operand); | 3190 __ movsx_b(result, operand); |
3060 break; | 3191 break; |
3061 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: | 3192 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: |
3062 case EXTERNAL_UINT8_ELEMENTS: | 3193 case EXTERNAL_UINT8_ELEMENTS: |
3063 case UINT8_ELEMENTS: | 3194 case UINT8_ELEMENTS: |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3117 DeoptimizeIf(equal, instr->environment()); | 3248 DeoptimizeIf(equal, instr->environment()); |
3118 } | 3249 } |
3119 | 3250 |
3120 Operand double_load_operand = BuildFastArrayOperand( | 3251 Operand double_load_operand = BuildFastArrayOperand( |
3121 instr->elements(), | 3252 instr->elements(), |
3122 instr->key(), | 3253 instr->key(), |
3123 instr->hydrogen()->key()->representation(), | 3254 instr->hydrogen()->key()->representation(), |
3124 FAST_DOUBLE_ELEMENTS, | 3255 FAST_DOUBLE_ELEMENTS, |
3125 FixedDoubleArray::kHeaderSize - kHeapObjectTag, | 3256 FixedDoubleArray::kHeaderSize - kHeapObjectTag, |
3126 instr->additional_index()); | 3257 instr->additional_index()); |
3127 XMMRegister result = ToDoubleRegister(instr->result()); | 3258 X87Mov(ToX87Register(instr->result()), double_load_operand); |
3128 __ movsd(result, double_load_operand); | |
3129 } | 3259 } |
3130 | 3260 |
3131 | 3261 |
3132 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 3262 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
3133 Register result = ToRegister(instr->result()); | 3263 Register result = ToRegister(instr->result()); |
3134 | 3264 |
3135 // Load the result. | 3265 // Load the result. |
3136 __ mov(result, | 3266 __ mov(result, |
3137 BuildFastArrayOperand(instr->elements(), | 3267 BuildFastArrayOperand(instr->elements(), |
3138 instr->key(), | 3268 instr->key(), |
(...skipping 401 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3540 DeoptimizeIf(negative, instr->environment()); | 3670 DeoptimizeIf(negative, instr->environment()); |
3541 __ bind(&is_positive); | 3671 __ bind(&is_positive); |
3542 } | 3672 } |
3543 | 3673 |
3544 | 3674 |
3545 void LCodeGen::DoMathAbs(LMathAbs* instr) { | 3675 void LCodeGen::DoMathAbs(LMathAbs* instr) { |
3546 // Class for deferred case. | 3676 // Class for deferred case. |
3547 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode { | 3677 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode { |
3548 public: | 3678 public: |
3549 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, | 3679 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, |
3550 LMathAbs* instr) | 3680 LMathAbs* instr, |
3551 : LDeferredCode(codegen), instr_(instr) { } | 3681 const X87Stack& x87_stack) |
| 3682 : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
3552 virtual void Generate() V8_OVERRIDE { | 3683 virtual void Generate() V8_OVERRIDE { |
3553 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); | 3684 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); |
3554 } | 3685 } |
3555 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 3686 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
3556 private: | 3687 private: |
3557 LMathAbs* instr_; | 3688 LMathAbs* instr_; |
3558 }; | 3689 }; |
3559 | 3690 |
3560 ASSERT(instr->value()->Equals(instr->result())); | 3691 ASSERT(instr->value()->Equals(instr->result())); |
3561 Representation r = instr->hydrogen()->value()->representation(); | 3692 Representation r = instr->hydrogen()->value()->representation(); |
3562 | 3693 |
3563 if (r.IsDouble()) { | 3694 if (r.IsDouble()) { |
3564 XMMRegister scratch = double_scratch0(); | 3695 UNIMPLEMENTED(); |
3565 XMMRegister input_reg = ToDoubleRegister(instr->value()); | |
3566 __ xorps(scratch, scratch); | |
3567 __ subsd(scratch, input_reg); | |
3568 __ andps(input_reg, scratch); | |
3569 } else if (r.IsSmiOrInteger32()) { | 3696 } else if (r.IsSmiOrInteger32()) { |
3570 EmitIntegerMathAbs(instr); | 3697 EmitIntegerMathAbs(instr); |
3571 } else { // Tagged case. | 3698 } else { // Tagged case. |
3572 DeferredMathAbsTaggedHeapNumber* deferred = | 3699 DeferredMathAbsTaggedHeapNumber* deferred = |
3573 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); | 3700 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_); |
3574 Register input_reg = ToRegister(instr->value()); | 3701 Register input_reg = ToRegister(instr->value()); |
3575 // Smi check. | 3702 // Smi check. |
3576 __ JumpIfNotSmi(input_reg, deferred->entry()); | 3703 __ JumpIfNotSmi(input_reg, deferred->entry()); |
3577 EmitIntegerMathAbs(instr); | 3704 EmitIntegerMathAbs(instr); |
3578 __ bind(deferred->exit()); | 3705 __ bind(deferred->exit()); |
3579 } | 3706 } |
3580 } | 3707 } |
3581 | 3708 |
3582 | 3709 |
3583 void LCodeGen::DoMathFloor(LMathFloor* instr) { | 3710 void LCodeGen::DoMathFloor(LMathFloor* instr) { |
3584 XMMRegister xmm_scratch = double_scratch0(); | 3711 UNIMPLEMENTED(); |
3585 Register output_reg = ToRegister(instr->result()); | |
3586 XMMRegister input_reg = ToDoubleRegister(instr->value()); | |
3587 | |
3588 if (CpuFeatures::IsSupported(SSE4_1)) { | |
3589 CpuFeatureScope scope(masm(), SSE4_1); | |
3590 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
3591 // Deoptimize on negative zero. | |
3592 Label non_zero; | |
3593 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. | |
3594 __ ucomisd(input_reg, xmm_scratch); | |
3595 __ j(not_equal, &non_zero, Label::kNear); | |
3596 __ movmskpd(output_reg, input_reg); | |
3597 __ test(output_reg, Immediate(1)); | |
3598 DeoptimizeIf(not_zero, instr->environment()); | |
3599 __ bind(&non_zero); | |
3600 } | |
3601 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown); | |
3602 __ cvttsd2si(output_reg, Operand(xmm_scratch)); | |
3603 // Overflow is signalled with minint. | |
3604 __ cmp(output_reg, 0x1); | |
3605 DeoptimizeIf(overflow, instr->environment()); | |
3606 } else { | |
3607 Label negative_sign, done; | |
3608 // Deoptimize on unordered. | |
3609 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. | |
3610 __ ucomisd(input_reg, xmm_scratch); | |
3611 DeoptimizeIf(parity_even, instr->environment()); | |
3612 __ j(below, &negative_sign, Label::kNear); | |
3613 | |
3614 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
3615 // Check for negative zero. | |
3616 Label positive_sign; | |
3617 __ j(above, &positive_sign, Label::kNear); | |
3618 __ movmskpd(output_reg, input_reg); | |
3619 __ test(output_reg, Immediate(1)); | |
3620 DeoptimizeIf(not_zero, instr->environment()); | |
3621 __ Move(output_reg, Immediate(0)); | |
3622 __ jmp(&done, Label::kNear); | |
3623 __ bind(&positive_sign); | |
3624 } | |
3625 | |
3626 // Use truncating instruction (OK because input is positive). | |
3627 __ cvttsd2si(output_reg, Operand(input_reg)); | |
3628 // Overflow is signalled with minint. | |
3629 __ cmp(output_reg, 0x1); | |
3630 DeoptimizeIf(overflow, instr->environment()); | |
3631 __ jmp(&done, Label::kNear); | |
3632 | |
3633 // Non-zero negative reaches here. | |
3634 __ bind(&negative_sign); | |
3635 // Truncate, then compare and compensate. | |
3636 __ cvttsd2si(output_reg, Operand(input_reg)); | |
3637 __ Cvtsi2sd(xmm_scratch, output_reg); | |
3638 __ ucomisd(input_reg, xmm_scratch); | |
3639 __ j(equal, &done, Label::kNear); | |
3640 __ sub(output_reg, Immediate(1)); | |
3641 DeoptimizeIf(overflow, instr->environment()); | |
3642 | |
3643 __ bind(&done); | |
3644 } | |
3645 } | 3712 } |
3646 | 3713 |
3647 | 3714 |
3648 void LCodeGen::DoMathRound(LMathRound* instr) { | 3715 void LCodeGen::DoMathRound(LMathRound* instr) { |
3649 Register output_reg = ToRegister(instr->result()); | 3716 UNIMPLEMENTED(); |
3650 XMMRegister input_reg = ToDoubleRegister(instr->value()); | |
3651 XMMRegister xmm_scratch = double_scratch0(); | |
3652 XMMRegister input_temp = ToDoubleRegister(instr->temp()); | |
3653 ExternalReference one_half = ExternalReference::address_of_one_half(); | |
3654 ExternalReference minus_one_half = | |
3655 ExternalReference::address_of_minus_one_half(); | |
3656 | |
3657 Label done, round_to_zero, below_one_half, do_not_compensate; | |
3658 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; | |
3659 | |
3660 __ movsd(xmm_scratch, Operand::StaticVariable(one_half)); | |
3661 __ ucomisd(xmm_scratch, input_reg); | |
3662 __ j(above, &below_one_half, Label::kNear); | |
3663 | |
3664 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). | |
3665 __ addsd(xmm_scratch, input_reg); | |
3666 __ cvttsd2si(output_reg, Operand(xmm_scratch)); | |
3667 // Overflow is signalled with minint. | |
3668 __ cmp(output_reg, 0x1); | |
3669 __ RecordComment("D2I conversion overflow"); | |
3670 DeoptimizeIf(overflow, instr->environment()); | |
3671 __ jmp(&done, dist); | |
3672 | |
3673 __ bind(&below_one_half); | |
3674 __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half)); | |
3675 __ ucomisd(xmm_scratch, input_reg); | |
3676 __ j(below_equal, &round_to_zero, Label::kNear); | |
3677 | |
3678 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then | |
3679 // compare and compensate. | |
3680 __ movaps(input_temp, input_reg); // Do not alter input_reg. | |
3681 __ subsd(input_temp, xmm_scratch); | |
3682 __ cvttsd2si(output_reg, Operand(input_temp)); | |
3683 // Catch minint due to overflow, and to prevent overflow when compensating. | |
3684 __ cmp(output_reg, 0x1); | |
3685 __ RecordComment("D2I conversion overflow"); | |
3686 DeoptimizeIf(overflow, instr->environment()); | |
3687 | |
3688 __ Cvtsi2sd(xmm_scratch, output_reg); | |
3689 __ ucomisd(xmm_scratch, input_temp); | |
3690 __ j(equal, &done, dist); | |
3691 __ sub(output_reg, Immediate(1)); | |
3692 // No overflow because we already ruled out minint. | |
3693 __ jmp(&done, dist); | |
3694 | |
3695 __ bind(&round_to_zero); | |
3696 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if | |
3697 // we can ignore the difference between a result of -0 and +0. | |
3698 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
3699 // If the sign is positive, we return +0. | |
3700 __ movmskpd(output_reg, input_reg); | |
3701 __ test(output_reg, Immediate(1)); | |
3702 __ RecordComment("Minus zero"); | |
3703 DeoptimizeIf(not_zero, instr->environment()); | |
3704 } | |
3705 __ Move(output_reg, Immediate(0)); | |
3706 __ bind(&done); | |
3707 } | 3717 } |
3708 | 3718 |
3709 | 3719 |
3710 void LCodeGen::DoMathSqrt(LMathSqrt* instr) { | 3720 void LCodeGen::DoMathSqrt(LMathSqrt* instr) { |
3711 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3721 UNIMPLEMENTED(); |
3712 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); | |
3713 __ sqrtsd(input_reg, input_reg); | |
3714 } | 3722 } |
3715 | 3723 |
3716 | 3724 |
3717 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { | 3725 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { |
3718 XMMRegister xmm_scratch = double_scratch0(); | 3726 UNIMPLEMENTED(); |
3719 XMMRegister input_reg = ToDoubleRegister(instr->value()); | |
3720 Register scratch = ToRegister(instr->temp()); | |
3721 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); | |
3722 | |
3723 // Note that according to ECMA-262 15.8.2.13: | |
3724 // Math.pow(-Infinity, 0.5) == Infinity | |
3725 // Math.sqrt(-Infinity) == NaN | |
3726 Label done, sqrt; | |
3727 // Check base for -Infinity. According to IEEE-754, single-precision | |
3728 // -Infinity has the highest 9 bits set and the lowest 23 bits cleared. | |
3729 __ mov(scratch, 0xFF800000); | |
3730 __ movd(xmm_scratch, scratch); | |
3731 __ cvtss2sd(xmm_scratch, xmm_scratch); | |
3732 __ ucomisd(input_reg, xmm_scratch); | |
3733 // Comparing -Infinity with NaN results in "unordered", which sets the | |
3734 // zero flag as if both were equal. However, it also sets the carry flag. | |
3735 __ j(not_equal, &sqrt, Label::kNear); | |
3736 __ j(carry, &sqrt, Label::kNear); | |
3737 // If input is -Infinity, return Infinity. | |
3738 __ xorps(input_reg, input_reg); | |
3739 __ subsd(input_reg, xmm_scratch); | |
3740 __ jmp(&done, Label::kNear); | |
3741 | |
3742 // Square root. | |
3743 __ bind(&sqrt); | |
3744 __ xorps(xmm_scratch, xmm_scratch); | |
3745 __ addsd(input_reg, xmm_scratch); // Convert -0 to +0. | |
3746 __ sqrtsd(input_reg, input_reg); | |
3747 __ bind(&done); | |
3748 } | 3727 } |
3749 | 3728 |
3750 | 3729 |
3751 void LCodeGen::DoPower(LPower* instr) { | 3730 void LCodeGen::DoPower(LPower* instr) { |
3752 Representation exponent_type = instr->hydrogen()->right()->representation(); | 3731 UNIMPLEMENTED(); |
3753 // Having marked this as a call, we can use any registers. | |
3754 // Just make sure that the input/output registers are the expected ones. | |
3755 ASSERT(!instr->right()->IsDoubleRegister() || | |
3756 ToDoubleRegister(instr->right()).is(xmm1)); | |
3757 ASSERT(!instr->right()->IsRegister() || | |
3758 ToRegister(instr->right()).is(eax)); | |
3759 ASSERT(ToDoubleRegister(instr->left()).is(xmm2)); | |
3760 ASSERT(ToDoubleRegister(instr->result()).is(xmm3)); | |
3761 | |
3762 if (exponent_type.IsSmi()) { | |
3763 MathPowStub stub(isolate(), MathPowStub::TAGGED); | |
3764 __ CallStub(&stub); | |
3765 } else if (exponent_type.IsTagged()) { | |
3766 Label no_deopt; | |
3767 __ JumpIfSmi(eax, &no_deopt); | |
3768 __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx); | |
3769 DeoptimizeIf(not_equal, instr->environment()); | |
3770 __ bind(&no_deopt); | |
3771 MathPowStub stub(isolate(), MathPowStub::TAGGED); | |
3772 __ CallStub(&stub); | |
3773 } else if (exponent_type.IsInteger32()) { | |
3774 MathPowStub stub(isolate(), MathPowStub::INTEGER); | |
3775 __ CallStub(&stub); | |
3776 } else { | |
3777 ASSERT(exponent_type.IsDouble()); | |
3778 MathPowStub stub(isolate(), MathPowStub::DOUBLE); | |
3779 __ CallStub(&stub); | |
3780 } | |
3781 } | 3732 } |
3782 | 3733 |
3783 | 3734 |
3784 void LCodeGen::DoMathLog(LMathLog* instr) { | 3735 void LCodeGen::DoMathLog(LMathLog* instr) { |
3785 ASSERT(instr->value()->Equals(instr->result())); | 3736 UNIMPLEMENTED(); |
3786 XMMRegister input_reg = ToDoubleRegister(instr->value()); | |
3787 XMMRegister xmm_scratch = double_scratch0(); | |
3788 Label positive, done, zero; | |
3789 __ xorps(xmm_scratch, xmm_scratch); | |
3790 __ ucomisd(input_reg, xmm_scratch); | |
3791 __ j(above, &positive, Label::kNear); | |
3792 __ j(not_carry, &zero, Label::kNear); | |
3793 ExternalReference nan = | |
3794 ExternalReference::address_of_canonical_non_hole_nan(); | |
3795 __ movsd(input_reg, Operand::StaticVariable(nan)); | |
3796 __ jmp(&done, Label::kNear); | |
3797 __ bind(&zero); | |
3798 ExternalReference ninf = | |
3799 ExternalReference::address_of_negative_infinity(); | |
3800 __ movsd(input_reg, Operand::StaticVariable(ninf)); | |
3801 __ jmp(&done, Label::kNear); | |
3802 __ bind(&positive); | |
3803 __ fldln2(); | |
3804 __ sub(Operand(esp), Immediate(kDoubleSize)); | |
3805 __ movsd(Operand(esp, 0), input_reg); | |
3806 __ fld_d(Operand(esp, 0)); | |
3807 __ fyl2x(); | |
3808 __ fstp_d(Operand(esp, 0)); | |
3809 __ movsd(input_reg, Operand(esp, 0)); | |
3810 __ add(Operand(esp), Immediate(kDoubleSize)); | |
3811 __ bind(&done); | |
3812 } | 3737 } |
3813 | 3738 |
3814 | 3739 |
3815 void LCodeGen::DoMathClz32(LMathClz32* instr) { | 3740 void LCodeGen::DoMathClz32(LMathClz32* instr) { |
3816 Register input = ToRegister(instr->value()); | 3741 UNIMPLEMENTED(); |
3817 Register result = ToRegister(instr->result()); | |
3818 Label not_zero_input; | |
3819 __ bsr(result, input); | |
3820 | |
3821 __ j(not_zero, ¬_zero_input); | |
3822 __ Move(result, Immediate(63)); // 63^31 == 32 | |
3823 | |
3824 __ bind(¬_zero_input); | |
3825 __ xor_(result, Immediate(31)); // for x in [0..31], 31^x == 31-x. | |
3826 } | 3742 } |
3827 | 3743 |
3828 | 3744 |
3829 void LCodeGen::DoMathExp(LMathExp* instr) { | 3745 void LCodeGen::DoMathExp(LMathExp* instr) { |
3830 XMMRegister input = ToDoubleRegister(instr->value()); | 3746 UNIMPLEMENTED(); |
3831 XMMRegister result = ToDoubleRegister(instr->result()); | |
3832 XMMRegister temp0 = double_scratch0(); | |
3833 Register temp1 = ToRegister(instr->temp1()); | |
3834 Register temp2 = ToRegister(instr->temp2()); | |
3835 | |
3836 MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2); | |
3837 } | 3747 } |
3838 | 3748 |
3839 | 3749 |
3840 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { | 3750 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { |
3841 ASSERT(ToRegister(instr->context()).is(esi)); | 3751 ASSERT(ToRegister(instr->context()).is(esi)); |
3842 ASSERT(ToRegister(instr->function()).is(edi)); | 3752 ASSERT(ToRegister(instr->function()).is(edi)); |
3843 ASSERT(instr->HasPointerMap()); | 3753 ASSERT(instr->HasPointerMap()); |
3844 | 3754 |
3845 Handle<JSFunction> known_function = instr->hydrogen()->known_function(); | 3755 Handle<JSFunction> known_function = instr->hydrogen()->known_function(); |
3846 if (known_function.is_null()) { | 3756 if (known_function.is_null()) { |
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3923 __ bind(&done); | 3833 __ bind(&done); |
3924 } else { | 3834 } else { |
3925 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); | 3835 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); |
3926 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); | 3836 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); |
3927 } | 3837 } |
3928 } | 3838 } |
3929 | 3839 |
3930 | 3840 |
3931 void LCodeGen::DoCallRuntime(LCallRuntime* instr) { | 3841 void LCodeGen::DoCallRuntime(LCallRuntime* instr) { |
3932 ASSERT(ToRegister(instr->context()).is(esi)); | 3842 ASSERT(ToRegister(instr->context()).is(esi)); |
3933 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles()); | 3843 CallRuntime(instr->function(), instr->arity(), instr); |
3934 } | 3844 } |
3935 | 3845 |
3936 | 3846 |
3937 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { | 3847 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { |
3938 Register function = ToRegister(instr->function()); | 3848 Register function = ToRegister(instr->function()); |
3939 Register code_object = ToRegister(instr->code_object()); | 3849 Register code_object = ToRegister(instr->code_object()); |
3940 __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize)); | 3850 __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize)); |
3941 __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object); | 3851 __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object); |
3942 } | 3852 } |
3943 | 3853 |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3998 DeoptimizeIf(zero, instr->environment()); | 3908 DeoptimizeIf(zero, instr->environment()); |
3999 | 3909 |
4000 // We know now that value is not a smi, so we can omit the check below. | 3910 // We know now that value is not a smi, so we can omit the check below. |
4001 check_needed = OMIT_SMI_CHECK; | 3911 check_needed = OMIT_SMI_CHECK; |
4002 } | 3912 } |
4003 } | 3913 } |
4004 } else if (representation.IsDouble()) { | 3914 } else if (representation.IsDouble()) { |
4005 ASSERT(access.IsInobject()); | 3915 ASSERT(access.IsInobject()); |
4006 ASSERT(!instr->hydrogen()->has_transition()); | 3916 ASSERT(!instr->hydrogen()->has_transition()); |
4007 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); | 3917 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); |
4008 XMMRegister value = ToDoubleRegister(instr->value()); | 3918 X87Register value = ToX87Register(instr->value()); |
4009 __ movsd(FieldOperand(object, offset), value); | 3919 X87Mov(FieldOperand(object, offset), value); |
4010 return; | 3920 return; |
4011 } | 3921 } |
4012 | 3922 |
4013 if (instr->hydrogen()->has_transition()) { | 3923 if (instr->hydrogen()->has_transition()) { |
4014 Handle<Map> transition = instr->hydrogen()->transition_map(); | 3924 Handle<Map> transition = instr->hydrogen()->transition_map(); |
4015 AddDeprecationDependency(transition); | 3925 AddDeprecationDependency(transition); |
4016 if (!instr->hydrogen()->NeedsWriteBarrierForMap()) { | 3926 if (!instr->hydrogen()->NeedsWriteBarrierForMap()) { |
4017 __ mov(FieldOperand(object, HeapObject::kMapOffset), transition); | 3927 __ mov(FieldOperand(object, HeapObject::kMapOffset), transition); |
4018 } else { | 3928 } else { |
4019 Register temp = ToRegister(instr->temp()); | 3929 Register temp = ToRegister(instr->temp()); |
4020 Register temp_map = ToRegister(instr->temp_map()); | 3930 Register temp_map = ToRegister(instr->temp_map()); |
4021 __ mov(temp_map, transition); | 3931 __ mov(temp_map, transition); |
4022 __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map); | 3932 __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map); |
4023 // Update the write barrier for the map field. | 3933 // Update the write barrier for the map field. |
4024 __ RecordWriteField(object, | 3934 __ RecordWriteField(object, |
4025 HeapObject::kMapOffset, | 3935 HeapObject::kMapOffset, |
4026 temp_map, | 3936 temp_map, |
4027 temp, | 3937 temp, |
4028 kSaveFPRegs, | |
4029 OMIT_REMEMBERED_SET, | 3938 OMIT_REMEMBERED_SET, |
4030 OMIT_SMI_CHECK); | 3939 OMIT_SMI_CHECK); |
4031 } | 3940 } |
4032 } | 3941 } |
4033 | 3942 |
4034 // Do the store. | 3943 // Do the store. |
4035 Register write_register = object; | 3944 Register write_register = object; |
4036 if (!access.IsInobject()) { | 3945 if (!access.IsInobject()) { |
4037 write_register = ToRegister(instr->temp()); | 3946 write_register = ToRegister(instr->temp()); |
4038 __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset)); | 3947 __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset)); |
(...skipping 20 matching lines...) Expand all Loading... |
4059 } | 3968 } |
4060 | 3969 |
4061 if (instr->hydrogen()->NeedsWriteBarrier()) { | 3970 if (instr->hydrogen()->NeedsWriteBarrier()) { |
4062 Register value = ToRegister(instr->value()); | 3971 Register value = ToRegister(instr->value()); |
4063 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object; | 3972 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object; |
4064 // Update the write barrier for the object for in-object properties. | 3973 // Update the write barrier for the object for in-object properties. |
4065 __ RecordWriteField(write_register, | 3974 __ RecordWriteField(write_register, |
4066 offset, | 3975 offset, |
4067 value, | 3976 value, |
4068 temp, | 3977 temp, |
4069 kSaveFPRegs, | |
4070 EMIT_REMEMBERED_SET, | 3978 EMIT_REMEMBERED_SET, |
4071 check_needed); | 3979 check_needed); |
4072 } | 3980 } |
4073 } | 3981 } |
4074 | 3982 |
4075 | 3983 |
4076 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { | 3984 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { |
4077 ASSERT(ToRegister(instr->context()).is(esi)); | 3985 ASSERT(ToRegister(instr->context()).is(esi)); |
4078 ASSERT(ToRegister(instr->object()).is(edx)); | 3986 ASSERT(ToRegister(instr->object()).is(edx)); |
4079 ASSERT(ToRegister(instr->value()).is(eax)); | 3987 ASSERT(ToRegister(instr->value()).is(eax)); |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4119 } | 4027 } |
4120 Operand operand(BuildFastArrayOperand( | 4028 Operand operand(BuildFastArrayOperand( |
4121 instr->elements(), | 4029 instr->elements(), |
4122 key, | 4030 key, |
4123 instr->hydrogen()->key()->representation(), | 4031 instr->hydrogen()->key()->representation(), |
4124 elements_kind, | 4032 elements_kind, |
4125 0, | 4033 0, |
4126 instr->additional_index())); | 4034 instr->additional_index())); |
4127 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || | 4035 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || |
4128 elements_kind == FLOAT32_ELEMENTS) { | 4036 elements_kind == FLOAT32_ELEMENTS) { |
4129 XMMRegister xmm_scratch = double_scratch0(); | 4037 __ fld(0); |
4130 __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value())); | 4038 __ fstp_s(operand); |
4131 __ movss(operand, xmm_scratch); | |
4132 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS || | 4039 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS || |
4133 elements_kind == FLOAT64_ELEMENTS) { | 4040 elements_kind == FLOAT64_ELEMENTS) { |
4134 __ movsd(operand, ToDoubleRegister(instr->value())); | 4041 X87Mov(operand, ToX87Register(instr->value())); |
4135 } else { | 4042 } else { |
4136 Register value = ToRegister(instr->value()); | 4043 Register value = ToRegister(instr->value()); |
4137 switch (elements_kind) { | 4044 switch (elements_kind) { |
4138 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: | 4045 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: |
4139 case EXTERNAL_UINT8_ELEMENTS: | 4046 case EXTERNAL_UINT8_ELEMENTS: |
4140 case EXTERNAL_INT8_ELEMENTS: | 4047 case EXTERNAL_INT8_ELEMENTS: |
4141 case UINT8_ELEMENTS: | 4048 case UINT8_ELEMENTS: |
4142 case INT8_ELEMENTS: | 4049 case INT8_ELEMENTS: |
4143 case UINT8_CLAMPED_ELEMENTS: | 4050 case UINT8_CLAMPED_ELEMENTS: |
4144 __ mov_b(operand, value); | 4051 __ mov_b(operand, value); |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4178 ExternalReference canonical_nan_reference = | 4085 ExternalReference canonical_nan_reference = |
4179 ExternalReference::address_of_canonical_non_hole_nan(); | 4086 ExternalReference::address_of_canonical_non_hole_nan(); |
4180 Operand double_store_operand = BuildFastArrayOperand( | 4087 Operand double_store_operand = BuildFastArrayOperand( |
4181 instr->elements(), | 4088 instr->elements(), |
4182 instr->key(), | 4089 instr->key(), |
4183 instr->hydrogen()->key()->representation(), | 4090 instr->hydrogen()->key()->representation(), |
4184 FAST_DOUBLE_ELEMENTS, | 4091 FAST_DOUBLE_ELEMENTS, |
4185 FixedDoubleArray::kHeaderSize - kHeapObjectTag, | 4092 FixedDoubleArray::kHeaderSize - kHeapObjectTag, |
4186 instr->additional_index()); | 4093 instr->additional_index()); |
4187 | 4094 |
4188 XMMRegister value = ToDoubleRegister(instr->value()); | 4095 // Can't use SSE2 in the serializer |
| 4096 if (instr->hydrogen()->IsConstantHoleStore()) { |
| 4097 // This means we should store the (double) hole. No floating point |
| 4098 // registers required. |
| 4099 double nan_double = FixedDoubleArray::hole_nan_as_double(); |
| 4100 uint64_t int_val = BitCast<uint64_t, double>(nan_double); |
| 4101 int32_t lower = static_cast<int32_t>(int_val); |
| 4102 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); |
4189 | 4103 |
4190 if (instr->NeedsCanonicalization()) { | 4104 __ mov(double_store_operand, Immediate(lower)); |
4191 Label have_value; | 4105 Operand double_store_operand2 = BuildFastArrayOperand( |
| 4106 instr->elements(), |
| 4107 instr->key(), |
| 4108 instr->hydrogen()->key()->representation(), |
| 4109 FAST_DOUBLE_ELEMENTS, |
| 4110 FixedDoubleArray::kHeaderSize - kHeapObjectTag + kPointerSize, |
| 4111 instr->additional_index()); |
| 4112 __ mov(double_store_operand2, Immediate(upper)); |
| 4113 } else { |
| 4114 Label no_special_nan_handling; |
| 4115 X87Register value = ToX87Register(instr->value()); |
| 4116 X87Fxch(value); |
4192 | 4117 |
4193 __ ucomisd(value, value); | 4118 if (instr->NeedsCanonicalization()) { |
4194 __ j(parity_odd, &have_value, Label::kNear); // NaN. | 4119 __ fld(0); |
| 4120 __ fld(0); |
| 4121 __ FCmp(); |
4195 | 4122 |
4196 __ movsd(value, Operand::StaticVariable(canonical_nan_reference)); | 4123 __ j(parity_odd, &no_special_nan_handling, Label::kNear); |
4197 __ bind(&have_value); | 4124 __ sub(esp, Immediate(kDoubleSize)); |
| 4125 __ fst_d(MemOperand(esp, 0)); |
| 4126 __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)), |
| 4127 Immediate(kHoleNanUpper32)); |
| 4128 __ add(esp, Immediate(kDoubleSize)); |
| 4129 Label canonicalize; |
| 4130 __ j(not_equal, &canonicalize, Label::kNear); |
| 4131 __ jmp(&no_special_nan_handling, Label::kNear); |
| 4132 __ bind(&canonicalize); |
| 4133 __ fstp(0); |
| 4134 __ fld_d(Operand::StaticVariable(canonical_nan_reference)); |
| 4135 } |
| 4136 |
| 4137 __ bind(&no_special_nan_handling); |
| 4138 __ fst_d(double_store_operand); |
4198 } | 4139 } |
4199 | |
4200 __ movsd(double_store_operand, value); | |
4201 } | 4140 } |
4202 | 4141 |
4203 | 4142 |
4204 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { | 4143 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { |
4205 Register elements = ToRegister(instr->elements()); | 4144 Register elements = ToRegister(instr->elements()); |
4206 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; | 4145 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; |
4207 | 4146 |
4208 Operand operand = BuildFastArrayOperand( | 4147 Operand operand = BuildFastArrayOperand( |
4209 instr->elements(), | 4148 instr->elements(), |
4210 instr->key(), | 4149 instr->key(), |
(...skipping 20 matching lines...) Expand all Loading... |
4231 Register value = ToRegister(instr->value()); | 4170 Register value = ToRegister(instr->value()); |
4232 ASSERT(!instr->key()->IsConstantOperand()); | 4171 ASSERT(!instr->key()->IsConstantOperand()); |
4233 SmiCheck check_needed = | 4172 SmiCheck check_needed = |
4234 instr->hydrogen()->value()->IsHeapObject() | 4173 instr->hydrogen()->value()->IsHeapObject() |
4235 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 4174 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
4236 // Compute address of modified element and store it into key register. | 4175 // Compute address of modified element and store it into key register. |
4237 __ lea(key, operand); | 4176 __ lea(key, operand); |
4238 __ RecordWrite(elements, | 4177 __ RecordWrite(elements, |
4239 key, | 4178 key, |
4240 value, | 4179 value, |
4241 kSaveFPRegs, | |
4242 EMIT_REMEMBERED_SET, | 4180 EMIT_REMEMBERED_SET, |
4243 check_needed); | 4181 check_needed); |
4244 } | 4182 } |
4245 } | 4183 } |
4246 | 4184 |
4247 | 4185 |
4248 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { | 4186 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { |
4249 // By cases...external, fast-double, fast | 4187 // By cases...external, fast-double, fast |
4250 if (instr->is_typed_elements()) { | 4188 if (instr->is_typed_elements()) { |
4251 DoStoreKeyedExternalArray(instr); | 4189 DoStoreKeyedExternalArray(instr); |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4295 is_simple_map_transition ? Label::kNear : Label::kFar; | 4233 is_simple_map_transition ? Label::kNear : Label::kFar; |
4296 __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map); | 4234 __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map); |
4297 __ j(not_equal, ¬_applicable, branch_distance); | 4235 __ j(not_equal, ¬_applicable, branch_distance); |
4298 if (is_simple_map_transition) { | 4236 if (is_simple_map_transition) { |
4299 Register new_map_reg = ToRegister(instr->new_map_temp()); | 4237 Register new_map_reg = ToRegister(instr->new_map_temp()); |
4300 __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), | 4238 __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), |
4301 Immediate(to_map)); | 4239 Immediate(to_map)); |
4302 // Write barrier. | 4240 // Write barrier. |
4303 ASSERT_NE(instr->temp(), NULL); | 4241 ASSERT_NE(instr->temp(), NULL); |
4304 __ RecordWriteForMap(object_reg, to_map, new_map_reg, | 4242 __ RecordWriteForMap(object_reg, to_map, new_map_reg, |
4305 ToRegister(instr->temp()), | 4243 ToRegister(instr->temp())); |
4306 kDontSaveFPRegs); | |
4307 } else { | 4244 } else { |
4308 ASSERT(ToRegister(instr->context()).is(esi)); | 4245 ASSERT(ToRegister(instr->context()).is(esi)); |
4309 ASSERT(object_reg.is(eax)); | 4246 ASSERT(object_reg.is(eax)); |
4310 PushSafepointRegistersScope scope(this); | 4247 PushSafepointRegistersScope scope(this); |
4311 __ mov(ebx, to_map); | 4248 __ mov(ebx, to_map); |
4312 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; | 4249 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; |
4313 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); | 4250 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); |
4314 __ CallStub(&stub); | 4251 __ CallStub(&stub); |
4315 RecordSafepointWithLazyDeopt(instr, | 4252 RecordSafepointWithLazyDeopt(instr, |
4316 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | 4253 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
4317 } | 4254 } |
4318 __ bind(¬_applicable); | 4255 __ bind(¬_applicable); |
4319 } | 4256 } |
4320 | 4257 |
4321 | 4258 |
4322 void LCodeGen::DoArrayShift(LArrayShift* instr) { | 4259 void LCodeGen::DoArrayShift(LArrayShift* instr) { |
4323 ASSERT(ToRegister(instr->context()).is(esi)); | 4260 ASSERT(ToRegister(instr->context()).is(esi)); |
4324 ASSERT(ToRegister(instr->object()).is(eax)); | 4261 ASSERT(ToRegister(instr->object()).is(eax)); |
4325 ASSERT(ToRegister(instr->result()).is(eax)); | 4262 ASSERT(ToRegister(instr->result()).is(eax)); |
4326 ArrayShiftStub stub(isolate(), instr->hydrogen()->kind()); | 4263 ArrayShiftStub stub(isolate(), instr->hydrogen()->kind()); |
4327 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 4264 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
4328 } | 4265 } |
4329 | 4266 |
4330 | 4267 |
4331 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { | 4268 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { |
4332 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode { | 4269 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode { |
4333 public: | 4270 public: |
4334 DeferredStringCharCodeAt(LCodeGen* codegen, | 4271 DeferredStringCharCodeAt(LCodeGen* codegen, |
4335 LStringCharCodeAt* instr) | 4272 LStringCharCodeAt* instr, |
4336 : LDeferredCode(codegen), instr_(instr) { } | 4273 const X87Stack& x87_stack) |
| 4274 : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
4337 virtual void Generate() V8_OVERRIDE { | 4275 virtual void Generate() V8_OVERRIDE { |
4338 codegen()->DoDeferredStringCharCodeAt(instr_); | 4276 codegen()->DoDeferredStringCharCodeAt(instr_); |
4339 } | 4277 } |
4340 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4278 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4341 private: | 4279 private: |
4342 LStringCharCodeAt* instr_; | 4280 LStringCharCodeAt* instr_; |
4343 }; | 4281 }; |
4344 | 4282 |
4345 DeferredStringCharCodeAt* deferred = | 4283 DeferredStringCharCodeAt* deferred = |
4346 new(zone()) DeferredStringCharCodeAt(this, instr); | 4284 new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_); |
4347 | 4285 |
4348 StringCharLoadGenerator::Generate(masm(), | 4286 StringCharLoadGenerator::Generate(masm(), |
4349 factory(), | 4287 factory(), |
4350 ToRegister(instr->string()), | 4288 ToRegister(instr->string()), |
4351 ToRegister(instr->index()), | 4289 ToRegister(instr->index()), |
4352 ToRegister(instr->result()), | 4290 ToRegister(instr->result()), |
4353 deferred->entry()); | 4291 deferred->entry()); |
4354 __ bind(deferred->exit()); | 4292 __ bind(deferred->exit()); |
4355 } | 4293 } |
4356 | 4294 |
(...skipping 26 matching lines...) Expand all Loading... |
4383 __ AssertSmi(eax); | 4321 __ AssertSmi(eax); |
4384 __ SmiUntag(eax); | 4322 __ SmiUntag(eax); |
4385 __ StoreToSafepointRegisterSlot(result, eax); | 4323 __ StoreToSafepointRegisterSlot(result, eax); |
4386 } | 4324 } |
4387 | 4325 |
4388 | 4326 |
4389 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { | 4327 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { |
4390 class DeferredStringCharFromCode V8_FINAL : public LDeferredCode { | 4328 class DeferredStringCharFromCode V8_FINAL : public LDeferredCode { |
4391 public: | 4329 public: |
4392 DeferredStringCharFromCode(LCodeGen* codegen, | 4330 DeferredStringCharFromCode(LCodeGen* codegen, |
4393 LStringCharFromCode* instr) | 4331 LStringCharFromCode* instr, |
4394 : LDeferredCode(codegen), instr_(instr) { } | 4332 const X87Stack& x87_stack) |
| 4333 : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
4395 virtual void Generate() V8_OVERRIDE { | 4334 virtual void Generate() V8_OVERRIDE { |
4396 codegen()->DoDeferredStringCharFromCode(instr_); | 4335 codegen()->DoDeferredStringCharFromCode(instr_); |
4397 } | 4336 } |
4398 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4337 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4399 private: | 4338 private: |
4400 LStringCharFromCode* instr_; | 4339 LStringCharFromCode* instr_; |
4401 }; | 4340 }; |
4402 | 4341 |
4403 DeferredStringCharFromCode* deferred = | 4342 DeferredStringCharFromCode* deferred = |
4404 new(zone()) DeferredStringCharFromCode(this, instr); | 4343 new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_); |
4405 | 4344 |
4406 ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); | 4345 ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); |
4407 Register char_code = ToRegister(instr->char_code()); | 4346 Register char_code = ToRegister(instr->char_code()); |
4408 Register result = ToRegister(instr->result()); | 4347 Register result = ToRegister(instr->result()); |
4409 ASSERT(!char_code.is(result)); | 4348 ASSERT(!char_code.is(result)); |
4410 | 4349 |
4411 __ cmp(char_code, String::kMaxOneByteCharCode); | 4350 __ cmp(char_code, String::kMaxOneByteCharCode); |
4412 __ j(above, deferred->entry()); | 4351 __ j(above, deferred->entry()); |
4413 __ Move(result, Immediate(factory()->single_character_string_cache())); | 4352 __ Move(result, Immediate(factory()->single_character_string_cache())); |
4414 __ mov(result, FieldOperand(result, | 4353 __ mov(result, FieldOperand(result, |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4446 instr->hydrogen()->pretenure_flag()); | 4385 instr->hydrogen()->pretenure_flag()); |
4447 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 4386 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
4448 } | 4387 } |
4449 | 4388 |
4450 | 4389 |
4451 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { | 4390 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { |
4452 LOperand* input = instr->value(); | 4391 LOperand* input = instr->value(); |
4453 LOperand* output = instr->result(); | 4392 LOperand* output = instr->result(); |
4454 ASSERT(input->IsRegister() || input->IsStackSlot()); | 4393 ASSERT(input->IsRegister() || input->IsStackSlot()); |
4455 ASSERT(output->IsDoubleRegister()); | 4394 ASSERT(output->IsDoubleRegister()); |
4456 __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); | 4395 if (input->IsRegister()) { |
| 4396 Register input_reg = ToRegister(input); |
| 4397 __ push(input_reg); |
| 4398 X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand); |
| 4399 __ pop(input_reg); |
| 4400 } else { |
| 4401 X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand); |
| 4402 } |
4457 } | 4403 } |
4458 | 4404 |
4459 | 4405 |
4460 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { | 4406 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { |
4461 LOperand* input = instr->value(); | 4407 LOperand* input = instr->value(); |
4462 LOperand* output = instr->result(); | 4408 LOperand* output = instr->result(); |
4463 LOperand* temp = instr->temp(); | 4409 X87Register res = ToX87Register(output); |
4464 __ LoadUint32(ToDoubleRegister(output), | 4410 X87PrepareToWrite(res); |
4465 ToRegister(input), | 4411 __ LoadUint32NoSSE2(ToRegister(input)); |
4466 ToDoubleRegister(temp)); | 4412 X87CommitWrite(res); |
4467 } | 4413 } |
4468 | 4414 |
4469 | 4415 |
4470 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { | 4416 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { |
4471 class DeferredNumberTagI V8_FINAL : public LDeferredCode { | 4417 class DeferredNumberTagI V8_FINAL : public LDeferredCode { |
4472 public: | 4418 public: |
4473 DeferredNumberTagI(LCodeGen* codegen, | 4419 DeferredNumberTagI(LCodeGen* codegen, |
4474 LNumberTagI* instr) | 4420 LNumberTagI* instr, |
4475 : LDeferredCode(codegen), instr_(instr) { } | 4421 const X87Stack& x87_stack) |
| 4422 : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
4476 virtual void Generate() V8_OVERRIDE { | 4423 virtual void Generate() V8_OVERRIDE { |
4477 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(), | 4424 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(), |
4478 NULL, SIGNED_INT32); | 4425 SIGNED_INT32); |
4479 } | 4426 } |
4480 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4427 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4481 private: | 4428 private: |
4482 LNumberTagI* instr_; | 4429 LNumberTagI* instr_; |
4483 }; | 4430 }; |
4484 | 4431 |
4485 LOperand* input = instr->value(); | 4432 LOperand* input = instr->value(); |
4486 ASSERT(input->IsRegister() && input->Equals(instr->result())); | 4433 ASSERT(input->IsRegister() && input->Equals(instr->result())); |
4487 Register reg = ToRegister(input); | 4434 Register reg = ToRegister(input); |
4488 | 4435 |
4489 DeferredNumberTagI* deferred = | 4436 DeferredNumberTagI* deferred = |
4490 new(zone()) DeferredNumberTagI(this, instr); | 4437 new(zone()) DeferredNumberTagI(this, instr, x87_stack_); |
4491 __ SmiTag(reg); | 4438 __ SmiTag(reg); |
4492 __ j(overflow, deferred->entry()); | 4439 __ j(overflow, deferred->entry()); |
4493 __ bind(deferred->exit()); | 4440 __ bind(deferred->exit()); |
4494 } | 4441 } |
4495 | 4442 |
4496 | 4443 |
4497 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { | 4444 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { |
4498 class DeferredNumberTagU V8_FINAL : public LDeferredCode { | 4445 class DeferredNumberTagU V8_FINAL : public LDeferredCode { |
4499 public: | 4446 public: |
4500 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) | 4447 DeferredNumberTagU(LCodeGen* codegen, |
4501 : LDeferredCode(codegen), instr_(instr) { } | 4448 LNumberTagU* instr, |
| 4449 const X87Stack& x87_stack) |
| 4450 : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
4502 virtual void Generate() V8_OVERRIDE { | 4451 virtual void Generate() V8_OVERRIDE { |
4503 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), | 4452 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(), |
4504 instr_->temp2(), UNSIGNED_INT32); | 4453 UNSIGNED_INT32); |
4505 } | 4454 } |
4506 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4455 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4507 private: | 4456 private: |
4508 LNumberTagU* instr_; | 4457 LNumberTagU* instr_; |
4509 }; | 4458 }; |
4510 | 4459 |
4511 LOperand* input = instr->value(); | 4460 LOperand* input = instr->value(); |
4512 ASSERT(input->IsRegister() && input->Equals(instr->result())); | 4461 ASSERT(input->IsRegister() && input->Equals(instr->result())); |
4513 Register reg = ToRegister(input); | 4462 Register reg = ToRegister(input); |
4514 | 4463 |
4515 DeferredNumberTagU* deferred = | 4464 DeferredNumberTagU* deferred = |
4516 new(zone()) DeferredNumberTagU(this, instr); | 4465 new(zone()) DeferredNumberTagU(this, instr, x87_stack_); |
4517 __ cmp(reg, Immediate(Smi::kMaxValue)); | 4466 __ cmp(reg, Immediate(Smi::kMaxValue)); |
4518 __ j(above, deferred->entry()); | 4467 __ j(above, deferred->entry()); |
4519 __ SmiTag(reg); | 4468 __ SmiTag(reg); |
4520 __ bind(deferred->exit()); | 4469 __ bind(deferred->exit()); |
4521 } | 4470 } |
4522 | 4471 |
4523 | 4472 |
4524 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, | 4473 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, |
4525 LOperand* value, | 4474 LOperand* value, |
4526 LOperand* temp1, | 4475 LOperand* temp, |
4527 LOperand* temp2, | |
4528 IntegerSignedness signedness) { | 4476 IntegerSignedness signedness) { |
4529 Label done, slow; | 4477 Label done, slow; |
4530 Register reg = ToRegister(value); | 4478 Register reg = ToRegister(value); |
4531 Register tmp = ToRegister(temp1); | 4479 Register tmp = ToRegister(temp); |
4532 XMMRegister xmm_scratch = double_scratch0(); | |
4533 | 4480 |
4534 if (signedness == SIGNED_INT32) { | 4481 if (signedness == SIGNED_INT32) { |
4535 // There was overflow, so bits 30 and 31 of the original integer | 4482 // There was overflow, so bits 30 and 31 of the original integer |
4536 // disagree. Try to allocate a heap number in new space and store | 4483 // disagree. Try to allocate a heap number in new space and store |
4537 // the value in there. If that fails, call the runtime system. | 4484 // the value in there. If that fails, call the runtime system. |
4538 __ SmiUntag(reg); | 4485 __ SmiUntag(reg); |
4539 __ xor_(reg, 0x80000000); | 4486 __ xor_(reg, 0x80000000); |
4540 __ Cvtsi2sd(xmm_scratch, Operand(reg)); | 4487 __ push(reg); |
| 4488 __ fild_s(Operand(esp, 0)); |
| 4489 __ pop(reg); |
4541 } else { | 4490 } else { |
4542 __ LoadUint32(xmm_scratch, reg, ToDoubleRegister(temp2)); | 4491 // There's no fild variant for unsigned values, so zero-extend to a 64-bit |
| 4492 // int manually. |
| 4493 __ push(Immediate(0)); |
| 4494 __ push(reg); |
| 4495 __ fild_d(Operand(esp, 0)); |
| 4496 __ pop(reg); |
| 4497 __ pop(reg); |
4543 } | 4498 } |
4544 | 4499 |
4545 if (FLAG_inline_new) { | 4500 if (FLAG_inline_new) { |
4546 __ AllocateHeapNumber(reg, tmp, no_reg, &slow); | 4501 __ AllocateHeapNumber(reg, tmp, no_reg, &slow); |
4547 __ jmp(&done, Label::kNear); | 4502 __ jmp(&done, Label::kNear); |
4548 } | 4503 } |
4549 | 4504 |
4550 // Slow case: Call the runtime system to do the number allocation. | 4505 // Slow case: Call the runtime system to do the number allocation. |
4551 __ bind(&slow); | 4506 __ bind(&slow); |
4552 { | 4507 { |
4553 // TODO(3095996): Put a valid pointer value in the stack slot where the | 4508 // TODO(3095996): Put a valid pointer value in the stack slot where the |
4554 // result register is stored, as this register is in the pointer map, but | 4509 // result register is stored, as this register is in the pointer map, but |
4555 // contains an integer value. | 4510 // contains an integer value. |
4556 __ Move(reg, Immediate(0)); | 4511 __ Move(reg, Immediate(0)); |
4557 | 4512 |
4558 // Preserve the value of all registers. | 4513 // Preserve the value of all registers. |
4559 PushSafepointRegistersScope scope(this); | 4514 PushSafepointRegistersScope scope(this); |
4560 | 4515 |
4561 // NumberTagI and NumberTagD use the context from the frame, rather than | 4516 // NumberTagI and NumberTagD use the context from the frame, rather than |
4562 // the environment's HContext or HInlinedContext value. | 4517 // the environment's HContext or HInlinedContext value. |
4563 // They only call Runtime::kHiddenAllocateHeapNumber. | 4518 // They only call Runtime::kHiddenAllocateHeapNumber. |
4564 // The corresponding HChange instructions are added in a phase that does | 4519 // The corresponding HChange instructions are added in a phase that does |
4565 // not have easy access to the local context. | 4520 // not have easy access to the local context. |
4566 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); | 4521 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); |
4567 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber); | 4522 __ CallRuntime(Runtime::kHiddenAllocateHeapNumber); |
4568 RecordSafepointWithRegisters( | 4523 RecordSafepointWithRegisters( |
4569 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); | 4524 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); |
4570 __ StoreToSafepointRegisterSlot(reg, eax); | 4525 __ StoreToSafepointRegisterSlot(reg, eax); |
4571 } | 4526 } |
4572 | 4527 |
4573 // Done. Put the value in xmm_scratch into the value of the allocated heap | |
4574 // number. | |
4575 __ bind(&done); | 4528 __ bind(&done); |
4576 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch); | 4529 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); |
4577 } | 4530 } |
4578 | 4531 |
4579 | 4532 |
4580 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { | 4533 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { |
4581 class DeferredNumberTagD V8_FINAL : public LDeferredCode { | 4534 class DeferredNumberTagD V8_FINAL : public LDeferredCode { |
4582 public: | 4535 public: |
4583 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) | 4536 DeferredNumberTagD(LCodeGen* codegen, |
4584 : LDeferredCode(codegen), instr_(instr) { } | 4537 LNumberTagD* instr, |
| 4538 const X87Stack& x87_stack) |
| 4539 : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
4585 virtual void Generate() V8_OVERRIDE { | 4540 virtual void Generate() V8_OVERRIDE { |
4586 codegen()->DoDeferredNumberTagD(instr_); | 4541 codegen()->DoDeferredNumberTagD(instr_); |
4587 } | 4542 } |
4588 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4543 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4589 private: | 4544 private: |
4590 LNumberTagD* instr_; | 4545 LNumberTagD* instr_; |
4591 }; | 4546 }; |
4592 | 4547 |
4593 Register reg = ToRegister(instr->result()); | 4548 Register reg = ToRegister(instr->result()); |
4594 | 4549 |
| 4550 // Put the value to the top of stack |
| 4551 X87Register src = ToX87Register(instr->value()); |
| 4552 X87LoadForUsage(src); |
| 4553 |
4595 DeferredNumberTagD* deferred = | 4554 DeferredNumberTagD* deferred = |
4596 new(zone()) DeferredNumberTagD(this, instr); | 4555 new(zone()) DeferredNumberTagD(this, instr, x87_stack_); |
4597 if (FLAG_inline_new) { | 4556 if (FLAG_inline_new) { |
4598 Register tmp = ToRegister(instr->temp()); | 4557 Register tmp = ToRegister(instr->temp()); |
4599 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); | 4558 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); |
4600 } else { | 4559 } else { |
4601 __ jmp(deferred->entry()); | 4560 __ jmp(deferred->entry()); |
4602 } | 4561 } |
4603 __ bind(deferred->exit()); | 4562 __ bind(deferred->exit()); |
4604 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 4563 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); |
4605 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); | |
4606 } | 4564 } |
4607 | 4565 |
4608 | 4566 |
4609 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { | 4567 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { |
4610 // TODO(3095996): Get rid of this. For now, we need to make the | 4568 // TODO(3095996): Get rid of this. For now, we need to make the |
4611 // result register contain a valid pointer because it is already | 4569 // result register contain a valid pointer because it is already |
4612 // contained in the register pointer map. | 4570 // contained in the register pointer map. |
4613 Register reg = ToRegister(instr->result()); | 4571 Register reg = ToRegister(instr->result()); |
4614 __ Move(reg, Immediate(0)); | 4572 __ Move(reg, Immediate(0)); |
4615 | 4573 |
4616 PushSafepointRegistersScope scope(this); | 4574 PushSafepointRegistersScope scope(this); |
4617 // NumberTagI and NumberTagD use the context from the frame, rather than | 4575 // NumberTagI and NumberTagD use the context from the frame, rather than |
4618 // the environment's HContext or HInlinedContext value. | 4576 // the environment's HContext or HInlinedContext value. |
4619 // They only call Runtime::kHiddenAllocateHeapNumber. | 4577 // They only call Runtime::kHiddenAllocateHeapNumber. |
4620 // The corresponding HChange instructions are added in a phase that does | 4578 // The corresponding HChange instructions are added in a phase that does |
4621 // not have easy access to the local context. | 4579 // not have easy access to the local context. |
4622 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); | 4580 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); |
4623 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber); | 4581 __ CallRuntime(Runtime::kHiddenAllocateHeapNumber); |
4624 RecordSafepointWithRegisters( | 4582 RecordSafepointWithRegisters( |
4625 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); | 4583 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); |
4626 __ StoreToSafepointRegisterSlot(reg, eax); | 4584 __ StoreToSafepointRegisterSlot(reg, eax); |
4627 } | 4585 } |
4628 | 4586 |
4629 | 4587 |
4630 void LCodeGen::DoSmiTag(LSmiTag* instr) { | 4588 void LCodeGen::DoSmiTag(LSmiTag* instr) { |
4631 HChange* hchange = instr->hydrogen(); | 4589 HChange* hchange = instr->hydrogen(); |
4632 Register input = ToRegister(instr->value()); | 4590 Register input = ToRegister(instr->value()); |
4633 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4591 if (hchange->CheckFlag(HValue::kCanOverflow) && |
(...skipping 16 matching lines...) Expand all Loading... |
4650 if (instr->needs_check()) { | 4608 if (instr->needs_check()) { |
4651 __ test(result, Immediate(kSmiTagMask)); | 4609 __ test(result, Immediate(kSmiTagMask)); |
4652 DeoptimizeIf(not_zero, instr->environment()); | 4610 DeoptimizeIf(not_zero, instr->environment()); |
4653 } else { | 4611 } else { |
4654 __ AssertSmi(result); | 4612 __ AssertSmi(result); |
4655 } | 4613 } |
4656 __ SmiUntag(result); | 4614 __ SmiUntag(result); |
4657 } | 4615 } |
4658 | 4616 |
4659 | 4617 |
4660 void LCodeGen::EmitNumberUntagD(Register input_reg, | 4618 void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg, |
4661 Register temp_reg, | 4619 Register temp_reg, |
4662 XMMRegister result_reg, | 4620 X87Register res_reg, |
4663 bool can_convert_undefined_to_nan, | 4621 bool can_convert_undefined_to_nan, |
4664 bool deoptimize_on_minus_zero, | 4622 bool deoptimize_on_minus_zero, |
4665 LEnvironment* env, | 4623 LEnvironment* env, |
4666 NumberUntagDMode mode) { | 4624 NumberUntagDMode mode) { |
4667 Label convert, load_smi, done; | 4625 Label load_smi, done; |
4668 | 4626 |
| 4627 X87PrepareToWrite(res_reg); |
4669 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { | 4628 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
4670 // Smi check. | 4629 // Smi check. |
4671 __ JumpIfSmi(input_reg, &load_smi, Label::kNear); | 4630 __ JumpIfSmi(input_reg, &load_smi, Label::kNear); |
4672 | 4631 |
4673 // Heap number map check. | 4632 // Heap number map check. |
4674 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), | 4633 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), |
4675 factory()->heap_number_map()); | 4634 factory()->heap_number_map()); |
4676 if (can_convert_undefined_to_nan) { | 4635 if (!can_convert_undefined_to_nan) { |
4677 __ j(not_equal, &convert, Label::kNear); | 4636 DeoptimizeIf(not_equal, env); |
4678 } else { | 4637 } else { |
| 4638 Label heap_number, convert; |
| 4639 __ j(equal, &heap_number, Label::kNear); |
| 4640 |
| 4641 // Convert undefined (or hole) to NaN. |
| 4642 __ cmp(input_reg, factory()->undefined_value()); |
4679 DeoptimizeIf(not_equal, env); | 4643 DeoptimizeIf(not_equal, env); |
| 4644 |
| 4645 __ bind(&convert); |
| 4646 ExternalReference nan = |
| 4647 ExternalReference::address_of_canonical_non_hole_nan(); |
| 4648 __ fld_d(Operand::StaticVariable(nan)); |
| 4649 __ jmp(&done, Label::kNear); |
| 4650 |
| 4651 __ bind(&heap_number); |
4680 } | 4652 } |
| 4653 // Heap number to x87 conversion. |
| 4654 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| 4655 if (deoptimize_on_minus_zero) { |
| 4656 __ fldz(); |
| 4657 __ FCmp(); |
| 4658 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| 4659 __ j(not_zero, &done, Label::kNear); |
4681 | 4660 |
4682 // Heap number to XMM conversion. | 4661 // Use general purpose registers to check if we have -0.0 |
4683 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 4662 __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset)); |
| 4663 __ test(temp_reg, Immediate(HeapNumber::kSignMask)); |
| 4664 __ j(zero, &done, Label::kNear); |
4684 | 4665 |
4685 if (deoptimize_on_minus_zero) { | 4666 // Pop FPU stack before deoptimizing. |
4686 XMMRegister xmm_scratch = double_scratch0(); | 4667 __ fstp(0); |
4687 __ xorps(xmm_scratch, xmm_scratch); | |
4688 __ ucomisd(result_reg, xmm_scratch); | |
4689 __ j(not_zero, &done, Label::kNear); | |
4690 __ movmskpd(temp_reg, result_reg); | |
4691 __ test_b(temp_reg, 1); | |
4692 DeoptimizeIf(not_zero, env); | 4668 DeoptimizeIf(not_zero, env); |
4693 } | 4669 } |
4694 __ jmp(&done, Label::kNear); | 4670 __ jmp(&done, Label::kNear); |
4695 | |
4696 if (can_convert_undefined_to_nan) { | |
4697 __ bind(&convert); | |
4698 | |
4699 // Convert undefined (and hole) to NaN. | |
4700 __ cmp(input_reg, factory()->undefined_value()); | |
4701 DeoptimizeIf(not_equal, env); | |
4702 | |
4703 ExternalReference nan = | |
4704 ExternalReference::address_of_canonical_non_hole_nan(); | |
4705 __ movsd(result_reg, Operand::StaticVariable(nan)); | |
4706 __ jmp(&done, Label::kNear); | |
4707 } | |
4708 } else { | 4671 } else { |
4709 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); | 4672 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); |
4710 } | 4673 } |
4711 | 4674 |
4712 __ bind(&load_smi); | 4675 __ bind(&load_smi); |
4713 // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the | 4676 // Clobbering a temp is faster than re-tagging the |
4714 // input register since we avoid dependencies. | 4677 // input register since we avoid dependencies. |
4715 __ mov(temp_reg, input_reg); | 4678 __ mov(temp_reg, input_reg); |
4716 __ SmiUntag(temp_reg); // Untag smi before converting to float. | 4679 __ SmiUntag(temp_reg); // Untag smi before converting to float. |
4717 __ Cvtsi2sd(result_reg, Operand(temp_reg)); | 4680 __ push(temp_reg); |
| 4681 __ fild_s(Operand(esp, 0)); |
| 4682 __ add(esp, Immediate(kPointerSize)); |
4718 __ bind(&done); | 4683 __ bind(&done); |
| 4684 X87CommitWrite(res_reg); |
4719 } | 4685 } |
4720 | 4686 |
4721 | 4687 |
4722 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { | 4688 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { |
4723 Register input_reg = ToRegister(instr->value()); | 4689 Register input_reg = ToRegister(instr->value()); |
4724 | 4690 |
4725 // The input was optimistically untagged; revert it. | 4691 // The input was optimistically untagged; revert it. |
4726 STATIC_ASSERT(kSmiTagSize == 1); | 4692 STATIC_ASSERT(kSmiTagSize == 1); |
4727 __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag)); | 4693 __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag)); |
4728 | 4694 |
(...skipping 21 matching lines...) Expand all Loading... |
4750 __ Move(input_reg, Immediate(1)); | 4716 __ Move(input_reg, Immediate(1)); |
4751 __ jmp(done); | 4717 __ jmp(done); |
4752 | 4718 |
4753 __ bind(&check_false); | 4719 __ bind(&check_false); |
4754 __ cmp(input_reg, factory()->false_value()); | 4720 __ cmp(input_reg, factory()->false_value()); |
4755 __ RecordComment("Deferred TaggedToI: cannot truncate"); | 4721 __ RecordComment("Deferred TaggedToI: cannot truncate"); |
4756 DeoptimizeIf(not_equal, instr->environment()); | 4722 DeoptimizeIf(not_equal, instr->environment()); |
4757 __ Move(input_reg, Immediate(0)); | 4723 __ Move(input_reg, Immediate(0)); |
4758 } else { | 4724 } else { |
4759 Label bailout; | 4725 Label bailout; |
4760 XMMRegister scratch = (instr->temp() != NULL) | 4726 __ TaggedToI(input_reg, input_reg, |
4761 ? ToDoubleRegister(instr->temp()) | |
4762 : no_xmm_reg; | |
4763 __ TaggedToI(input_reg, input_reg, scratch, | |
4764 instr->hydrogen()->GetMinusZeroMode(), &bailout); | 4727 instr->hydrogen()->GetMinusZeroMode(), &bailout); |
4765 __ jmp(done); | 4728 __ jmp(done); |
4766 __ bind(&bailout); | 4729 __ bind(&bailout); |
4767 DeoptimizeIf(no_condition, instr->environment()); | 4730 DeoptimizeIf(no_condition, instr->environment()); |
4768 } | 4731 } |
4769 } | 4732 } |
4770 | 4733 |
4771 | 4734 |
4772 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | 4735 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
4773 class DeferredTaggedToI V8_FINAL : public LDeferredCode { | 4736 class DeferredTaggedToI V8_FINAL : public LDeferredCode { |
4774 public: | 4737 public: |
4775 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | 4738 DeferredTaggedToI(LCodeGen* codegen, |
4776 : LDeferredCode(codegen), instr_(instr) { } | 4739 LTaggedToI* instr, |
| 4740 const X87Stack& x87_stack) |
| 4741 : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
4777 virtual void Generate() V8_OVERRIDE { | 4742 virtual void Generate() V8_OVERRIDE { |
4778 codegen()->DoDeferredTaggedToI(instr_, done()); | 4743 codegen()->DoDeferredTaggedToI(instr_, done()); |
4779 } | 4744 } |
4780 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4745 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4781 private: | 4746 private: |
4782 LTaggedToI* instr_; | 4747 LTaggedToI* instr_; |
4783 }; | 4748 }; |
4784 | 4749 |
4785 LOperand* input = instr->value(); | 4750 LOperand* input = instr->value(); |
4786 ASSERT(input->IsRegister()); | 4751 ASSERT(input->IsRegister()); |
4787 Register input_reg = ToRegister(input); | 4752 Register input_reg = ToRegister(input); |
4788 ASSERT(input_reg.is(ToRegister(instr->result()))); | 4753 ASSERT(input_reg.is(ToRegister(instr->result()))); |
4789 | 4754 |
4790 if (instr->hydrogen()->value()->representation().IsSmi()) { | 4755 if (instr->hydrogen()->value()->representation().IsSmi()) { |
4791 __ SmiUntag(input_reg); | 4756 __ SmiUntag(input_reg); |
4792 } else { | 4757 } else { |
4793 DeferredTaggedToI* deferred = | 4758 DeferredTaggedToI* deferred = |
4794 new(zone()) DeferredTaggedToI(this, instr); | 4759 new(zone()) DeferredTaggedToI(this, instr, x87_stack_); |
4795 // Optimistically untag the input. | 4760 // Optimistically untag the input. |
4796 // If the input is a HeapObject, SmiUntag will set the carry flag. | 4761 // If the input is a HeapObject, SmiUntag will set the carry flag. |
4797 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); | 4762 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); |
4798 __ SmiUntag(input_reg); | 4763 __ SmiUntag(input_reg); |
4799 // Branch to deferred code if the input was tagged. | 4764 // Branch to deferred code if the input was tagged. |
4800 // The deferred code will take care of restoring the tag. | 4765 // The deferred code will take care of restoring the tag. |
4801 __ j(carry, deferred->entry()); | 4766 __ j(carry, deferred->entry()); |
4802 __ bind(deferred->exit()); | 4767 __ bind(deferred->exit()); |
4803 } | 4768 } |
4804 } | 4769 } |
4805 | 4770 |
4806 | 4771 |
4807 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { | 4772 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { |
4808 LOperand* input = instr->value(); | 4773 LOperand* input = instr->value(); |
4809 ASSERT(input->IsRegister()); | 4774 ASSERT(input->IsRegister()); |
4810 LOperand* temp = instr->temp(); | 4775 LOperand* temp = instr->temp(); |
4811 ASSERT(temp->IsRegister()); | 4776 ASSERT(temp->IsRegister()); |
4812 LOperand* result = instr->result(); | 4777 LOperand* result = instr->result(); |
4813 ASSERT(result->IsDoubleRegister()); | 4778 ASSERT(result->IsDoubleRegister()); |
4814 | 4779 |
4815 Register input_reg = ToRegister(input); | 4780 Register input_reg = ToRegister(input); |
4816 bool deoptimize_on_minus_zero = | 4781 bool deoptimize_on_minus_zero = |
4817 instr->hydrogen()->deoptimize_on_minus_zero(); | 4782 instr->hydrogen()->deoptimize_on_minus_zero(); |
4818 Register temp_reg = ToRegister(temp); | 4783 Register temp_reg = ToRegister(temp); |
4819 | 4784 |
4820 HValue* value = instr->hydrogen()->value(); | 4785 HValue* value = instr->hydrogen()->value(); |
4821 NumberUntagDMode mode = value->representation().IsSmi() | 4786 NumberUntagDMode mode = value->representation().IsSmi() |
4822 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; | 4787 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; |
4823 | 4788 |
4824 XMMRegister result_reg = ToDoubleRegister(result); | 4789 EmitNumberUntagDNoSSE2(input_reg, |
4825 EmitNumberUntagD(input_reg, | 4790 temp_reg, |
4826 temp_reg, | 4791 ToX87Register(result), |
4827 result_reg, | 4792 instr->hydrogen()->can_convert_undefined_to_nan(), |
4828 instr->hydrogen()->can_convert_undefined_to_nan(), | 4793 deoptimize_on_minus_zero, |
4829 deoptimize_on_minus_zero, | 4794 instr->environment(), |
4830 instr->environment(), | 4795 mode); |
4831 mode); | |
4832 } | 4796 } |
4833 | 4797 |
4834 | 4798 |
4835 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { | 4799 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { |
4836 LOperand* input = instr->value(); | 4800 LOperand* input = instr->value(); |
4837 ASSERT(input->IsDoubleRegister()); | 4801 ASSERT(input->IsDoubleRegister()); |
4838 LOperand* result = instr->result(); | 4802 LOperand* result = instr->result(); |
4839 ASSERT(result->IsRegister()); | 4803 ASSERT(result->IsRegister()); |
4840 Register result_reg = ToRegister(result); | 4804 Register result_reg = ToRegister(result); |
4841 | 4805 |
4842 if (instr->truncating()) { | 4806 if (instr->truncating()) { |
4843 XMMRegister input_reg = ToDoubleRegister(input); | 4807 X87Register input_reg = ToX87Register(input); |
4844 __ TruncateDoubleToI(result_reg, input_reg); | 4808 X87Fxch(input_reg); |
| 4809 __ TruncateX87TOSToI(result_reg); |
4845 } else { | 4810 } else { |
4846 Label bailout, done; | 4811 Label bailout, done; |
4847 XMMRegister input_reg = ToDoubleRegister(input); | 4812 X87Register input_reg = ToX87Register(input); |
4848 XMMRegister xmm_scratch = double_scratch0(); | 4813 X87Fxch(input_reg); |
4849 __ DoubleToI(result_reg, input_reg, xmm_scratch, | 4814 __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(), |
4850 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); | 4815 &bailout, Label::kNear); |
4851 __ jmp(&done, Label::kNear); | 4816 __ jmp(&done, Label::kNear); |
4852 __ bind(&bailout); | 4817 __ bind(&bailout); |
4853 DeoptimizeIf(no_condition, instr->environment()); | 4818 DeoptimizeIf(no_condition, instr->environment()); |
4854 __ bind(&done); | 4819 __ bind(&done); |
4855 } | 4820 } |
4856 } | 4821 } |
4857 | 4822 |
4858 | 4823 |
4859 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { | 4824 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { |
4860 LOperand* input = instr->value(); | 4825 LOperand* input = instr->value(); |
4861 ASSERT(input->IsDoubleRegister()); | 4826 ASSERT(input->IsDoubleRegister()); |
4862 LOperand* result = instr->result(); | 4827 LOperand* result = instr->result(); |
4863 ASSERT(result->IsRegister()); | 4828 ASSERT(result->IsRegister()); |
4864 Register result_reg = ToRegister(result); | 4829 Register result_reg = ToRegister(result); |
4865 | 4830 |
4866 Label bailout, done; | 4831 Label bailout, done; |
4867 XMMRegister input_reg = ToDoubleRegister(input); | 4832 X87Register input_reg = ToX87Register(input); |
4868 XMMRegister xmm_scratch = double_scratch0(); | 4833 X87Fxch(input_reg); |
4869 __ DoubleToI(result_reg, input_reg, xmm_scratch, | 4834 __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(), |
4870 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); | 4835 &bailout, Label::kNear); |
4871 __ jmp(&done, Label::kNear); | 4836 __ jmp(&done, Label::kNear); |
4872 __ bind(&bailout); | 4837 __ bind(&bailout); |
4873 DeoptimizeIf(no_condition, instr->environment()); | 4838 DeoptimizeIf(no_condition, instr->environment()); |
4874 __ bind(&done); | 4839 __ bind(&done); |
4875 | 4840 |
4876 __ SmiTag(result_reg); | 4841 __ SmiTag(result_reg); |
4877 DeoptimizeIf(overflow, instr->environment()); | 4842 DeoptimizeIf(overflow, instr->environment()); |
4878 } | 4843 } |
4879 | 4844 |
4880 | 4845 |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4951 } | 4916 } |
4952 DeoptimizeIf(not_equal, instr->environment()); | 4917 DeoptimizeIf(not_equal, instr->environment()); |
4953 } | 4918 } |
4954 | 4919 |
4955 | 4920 |
4956 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { | 4921 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { |
4957 { | 4922 { |
4958 PushSafepointRegistersScope scope(this); | 4923 PushSafepointRegistersScope scope(this); |
4959 __ push(object); | 4924 __ push(object); |
4960 __ xor_(esi, esi); | 4925 __ xor_(esi, esi); |
4961 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); | 4926 __ CallRuntime(Runtime::kTryMigrateInstance); |
4962 RecordSafepointWithRegisters( | 4927 RecordSafepointWithRegisters( |
4963 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); | 4928 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); |
4964 | 4929 |
4965 __ test(eax, Immediate(kSmiTagMask)); | 4930 __ test(eax, Immediate(kSmiTagMask)); |
4966 } | 4931 } |
4967 DeoptimizeIf(zero, instr->environment()); | 4932 DeoptimizeIf(zero, instr->environment()); |
4968 } | 4933 } |
4969 | 4934 |
4970 | 4935 |
4971 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 4936 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
4972 class DeferredCheckMaps V8_FINAL : public LDeferredCode { | 4937 class DeferredCheckMaps V8_FINAL : public LDeferredCode { |
4973 public: | 4938 public: |
4974 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) | 4939 DeferredCheckMaps(LCodeGen* codegen, |
4975 : LDeferredCode(codegen), instr_(instr), object_(object) { | 4940 LCheckMaps* instr, |
| 4941 Register object, |
| 4942 const X87Stack& x87_stack) |
| 4943 : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) { |
4976 SetExit(check_maps()); | 4944 SetExit(check_maps()); |
4977 } | 4945 } |
4978 virtual void Generate() V8_OVERRIDE { | 4946 virtual void Generate() V8_OVERRIDE { |
4979 codegen()->DoDeferredInstanceMigration(instr_, object_); | 4947 codegen()->DoDeferredInstanceMigration(instr_, object_); |
4980 } | 4948 } |
4981 Label* check_maps() { return &check_maps_; } | 4949 Label* check_maps() { return &check_maps_; } |
4982 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4950 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4983 private: | 4951 private: |
4984 LCheckMaps* instr_; | 4952 LCheckMaps* instr_; |
4985 Label check_maps_; | 4953 Label check_maps_; |
4986 Register object_; | 4954 Register object_; |
4987 }; | 4955 }; |
4988 | 4956 |
4989 if (instr->hydrogen()->IsStabilityCheck()) { | 4957 if (instr->hydrogen()->IsStabilityCheck()) { |
4990 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); | 4958 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); |
4991 for (int i = 0; i < maps->size(); ++i) { | 4959 for (int i = 0; i < maps->size(); ++i) { |
4992 AddStabilityDependency(maps->at(i).handle()); | 4960 AddStabilityDependency(maps->at(i).handle()); |
4993 } | 4961 } |
4994 return; | 4962 return; |
4995 } | 4963 } |
4996 | 4964 |
4997 LOperand* input = instr->value(); | 4965 LOperand* input = instr->value(); |
4998 ASSERT(input->IsRegister()); | 4966 ASSERT(input->IsRegister()); |
4999 Register reg = ToRegister(input); | 4967 Register reg = ToRegister(input); |
5000 | 4968 |
5001 DeferredCheckMaps* deferred = NULL; | 4969 DeferredCheckMaps* deferred = NULL; |
5002 if (instr->hydrogen()->HasMigrationTarget()) { | 4970 if (instr->hydrogen()->HasMigrationTarget()) { |
5003 deferred = new(zone()) DeferredCheckMaps(this, instr, reg); | 4971 deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_); |
5004 __ bind(deferred->check_maps()); | 4972 __ bind(deferred->check_maps()); |
5005 } | 4973 } |
5006 | 4974 |
5007 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); | 4975 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); |
5008 Label success; | 4976 Label success; |
5009 for (int i = 0; i < maps->size() - 1; i++) { | 4977 for (int i = 0; i < maps->size() - 1; i++) { |
5010 Handle<Map> map = maps->at(i).handle(); | 4978 Handle<Map> map = maps->at(i).handle(); |
5011 __ CompareMap(reg, map); | 4979 __ CompareMap(reg, map); |
5012 __ j(equal, &success, Label::kNear); | 4980 __ j(equal, &success, Label::kNear); |
5013 } | 4981 } |
5014 | 4982 |
5015 Handle<Map> map = maps->at(maps->size() - 1).handle(); | 4983 Handle<Map> map = maps->at(maps->size() - 1).handle(); |
5016 __ CompareMap(reg, map); | 4984 __ CompareMap(reg, map); |
5017 if (instr->hydrogen()->HasMigrationTarget()) { | 4985 if (instr->hydrogen()->HasMigrationTarget()) { |
5018 __ j(not_equal, deferred->entry()); | 4986 __ j(not_equal, deferred->entry()); |
5019 } else { | 4987 } else { |
5020 DeoptimizeIf(not_equal, instr->environment()); | 4988 DeoptimizeIf(not_equal, instr->environment()); |
5021 } | 4989 } |
5022 | 4990 |
5023 __ bind(&success); | 4991 __ bind(&success); |
5024 } | 4992 } |
5025 | 4993 |
5026 | 4994 |
5027 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 4995 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
5028 XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); | 4996 UNREACHABLE(); |
5029 XMMRegister xmm_scratch = double_scratch0(); | |
5030 Register result_reg = ToRegister(instr->result()); | |
5031 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg); | |
5032 } | 4997 } |
5033 | 4998 |
5034 | 4999 |
5035 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { | 5000 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { |
5036 ASSERT(instr->unclamped()->Equals(instr->result())); | 5001 ASSERT(instr->unclamped()->Equals(instr->result())); |
5037 Register value_reg = ToRegister(instr->result()); | 5002 Register value_reg = ToRegister(instr->result()); |
5038 __ ClampUint8(value_reg); | 5003 __ ClampUint8(value_reg); |
5039 } | 5004 } |
5040 | 5005 |
5041 | 5006 |
5042 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { | 5007 void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) { |
5043 ASSERT(instr->unclamped()->Equals(instr->result())); | |
5044 Register input_reg = ToRegister(instr->unclamped()); | 5008 Register input_reg = ToRegister(instr->unclamped()); |
5045 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm()); | 5009 Register result_reg = ToRegister(instr->result()); |
5046 XMMRegister xmm_scratch = double_scratch0(); | 5010 Register scratch = ToRegister(instr->scratch()); |
5047 Label is_smi, done, heap_number; | 5011 Register scratch2 = ToRegister(instr->scratch2()); |
| 5012 Register scratch3 = ToRegister(instr->scratch3()); |
| 5013 Label is_smi, done, heap_number, valid_exponent, |
| 5014 largest_value, zero_result, maybe_nan_or_infinity; |
5048 | 5015 |
5049 __ JumpIfSmi(input_reg, &is_smi); | 5016 __ JumpIfSmi(input_reg, &is_smi); |
5050 | 5017 |
5051 // Check for heap number | 5018 // Check for heap number |
5052 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), | 5019 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), |
5053 factory()->heap_number_map()); | 5020 factory()->heap_number_map()); |
5054 __ j(equal, &heap_number, Label::kNear); | 5021 __ j(equal, &heap_number, Label::kNear); |
5055 | 5022 |
5056 // Check for undefined. Undefined is converted to zero for clamping | 5023 // Check for undefined. Undefined is converted to zero for clamping |
5057 // conversions. | 5024 // conversions. |
5058 __ cmp(input_reg, factory()->undefined_value()); | 5025 __ cmp(input_reg, factory()->undefined_value()); |
5059 DeoptimizeIf(not_equal, instr->environment()); | 5026 DeoptimizeIf(not_equal, instr->environment()); |
5060 __ mov(input_reg, 0); | 5027 __ jmp(&zero_result, Label::kNear); |
5061 __ jmp(&done, Label::kNear); | |
5062 | 5028 |
5063 // Heap number | 5029 // Heap number |
5064 __ bind(&heap_number); | 5030 __ bind(&heap_number); |
5065 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 5031 |
5066 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg); | 5032 // Surprisingly, all of the hand-crafted bit-manipulations below are much |
| 5033 // faster than the x86 FPU built-in instruction, especially since "banker's |
| 5034 // rounding" would be additionally very expensive |
| 5035 |
| 5036 // Get exponent word. |
| 5037 __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset)); |
| 5038 __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); |
| 5039 |
| 5040 // Test for negative values --> clamp to zero |
| 5041 __ test(scratch, scratch); |
| 5042 __ j(negative, &zero_result, Label::kNear); |
| 5043 |
| 5044 // Get exponent alone in scratch2. |
| 5045 __ mov(scratch2, scratch); |
| 5046 __ and_(scratch2, HeapNumber::kExponentMask); |
| 5047 __ shr(scratch2, HeapNumber::kExponentShift); |
| 5048 __ j(zero, &zero_result, Label::kNear); |
| 5049 __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1)); |
| 5050 __ j(negative, &zero_result, Label::kNear); |
| 5051 |
| 5052 const uint32_t non_int8_exponent = 7; |
| 5053 __ cmp(scratch2, Immediate(non_int8_exponent + 1)); |
| 5054 // If the exponent is too big, check for special values. |
| 5055 __ j(greater, &maybe_nan_or_infinity, Label::kNear); |
| 5056 |
| 5057 __ bind(&valid_exponent); |
| 5058 // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent |
| 5059 // < 7. The shift bias is the number of bits to shift the mantissa such that |
| 5060 // with an exponent of 7 such the that top-most one is in bit 30, allowing |
| 5061 // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to |
| 5062 // 1). |
| 5063 int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1; |
| 5064 __ lea(result_reg, MemOperand(scratch2, shift_bias)); |
| 5065 // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the |
| 5066 // top bits of the mantissa. |
| 5067 __ and_(scratch, HeapNumber::kMantissaMask); |
| 5068 // Put back the implicit 1 of the mantissa |
| 5069 __ or_(scratch, 1 << HeapNumber::kExponentShift); |
| 5070 // Shift up to round |
| 5071 __ shl_cl(scratch); |
| 5072 // Use "banker's rounding" to spec: If fractional part of number is 0.5, then |
| 5073 // use the bit in the "ones" place and add it to the "halves" place, which has |
| 5074 // the effect of rounding to even. |
| 5075 __ mov(scratch2, scratch); |
| 5076 const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8; |
| 5077 const uint32_t one_bit_shift = one_half_bit_shift + 1; |
| 5078 __ and_(scratch2, Immediate((1 << one_bit_shift) - 1)); |
| 5079 __ cmp(scratch2, Immediate(1 << one_half_bit_shift)); |
| 5080 Label no_round; |
| 5081 __ j(less, &no_round, Label::kNear); |
| 5082 Label round_up; |
| 5083 __ mov(scratch2, Immediate(1 << one_half_bit_shift)); |
| 5084 __ j(greater, &round_up, Label::kNear); |
| 5085 __ test(scratch3, scratch3); |
| 5086 __ j(not_zero, &round_up, Label::kNear); |
| 5087 __ mov(scratch2, scratch); |
| 5088 __ and_(scratch2, Immediate(1 << one_bit_shift)); |
| 5089 __ shr(scratch2, 1); |
| 5090 __ bind(&round_up); |
| 5091 __ add(scratch, scratch2); |
| 5092 __ j(overflow, &largest_value, Label::kNear); |
| 5093 __ bind(&no_round); |
| 5094 __ shr(scratch, 23); |
| 5095 __ mov(result_reg, scratch); |
| 5096 __ jmp(&done, Label::kNear); |
| 5097 |
| 5098 __ bind(&maybe_nan_or_infinity); |
| 5099 // Check for NaN/Infinity, all other values map to 255 |
| 5100 __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1)); |
| 5101 __ j(not_equal, &largest_value, Label::kNear); |
| 5102 |
| 5103 // Check for NaN, which differs from Infinity in that at least one mantissa |
| 5104 // bit is set. |
| 5105 __ and_(scratch, HeapNumber::kMantissaMask); |
| 5106 __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); |
| 5107 __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN |
| 5108 // Infinity -> Fall through to map to 255. |
| 5109 |
| 5110 __ bind(&largest_value); |
| 5111 __ mov(result_reg, Immediate(255)); |
| 5112 __ jmp(&done, Label::kNear); |
| 5113 |
| 5114 __ bind(&zero_result); |
| 5115 __ xor_(result_reg, result_reg); |
5067 __ jmp(&done, Label::kNear); | 5116 __ jmp(&done, Label::kNear); |
5068 | 5117 |
5069 // smi | 5118 // smi |
5070 __ bind(&is_smi); | 5119 __ bind(&is_smi); |
5071 __ SmiUntag(input_reg); | 5120 if (!input_reg.is(result_reg)) { |
5072 __ ClampUint8(input_reg); | 5121 __ mov(result_reg, input_reg); |
| 5122 } |
| 5123 __ SmiUntag(result_reg); |
| 5124 __ ClampUint8(result_reg); |
5073 __ bind(&done); | 5125 __ bind(&done); |
5074 } | 5126 } |
5075 | 5127 |
5076 | 5128 |
5077 void LCodeGen::DoDoubleBits(LDoubleBits* instr) { | 5129 void LCodeGen::DoDoubleBits(LDoubleBits* instr) { |
5078 XMMRegister value_reg = ToDoubleRegister(instr->value()); | 5130 UNREACHABLE(); |
5079 Register result_reg = ToRegister(instr->result()); | |
5080 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { | |
5081 if (CpuFeatures::IsSupported(SSE4_1)) { | |
5082 CpuFeatureScope scope2(masm(), SSE4_1); | |
5083 __ pextrd(result_reg, value_reg, 1); | |
5084 } else { | |
5085 XMMRegister xmm_scratch = double_scratch0(); | |
5086 __ pshufd(xmm_scratch, value_reg, 1); | |
5087 __ movd(result_reg, xmm_scratch); | |
5088 } | |
5089 } else { | |
5090 __ movd(result_reg, value_reg); | |
5091 } | |
5092 } | 5131 } |
5093 | 5132 |
5094 | 5133 |
5095 void LCodeGen::DoConstructDouble(LConstructDouble* instr) { | 5134 void LCodeGen::DoConstructDouble(LConstructDouble* instr) { |
5096 Register hi_reg = ToRegister(instr->hi()); | 5135 UNREACHABLE(); |
5097 Register lo_reg = ToRegister(instr->lo()); | |
5098 XMMRegister result_reg = ToDoubleRegister(instr->result()); | |
5099 | |
5100 if (CpuFeatures::IsSupported(SSE4_1)) { | |
5101 CpuFeatureScope scope2(masm(), SSE4_1); | |
5102 __ movd(result_reg, lo_reg); | |
5103 __ pinsrd(result_reg, hi_reg, 1); | |
5104 } else { | |
5105 XMMRegister xmm_scratch = double_scratch0(); | |
5106 __ movd(result_reg, hi_reg); | |
5107 __ psllq(result_reg, 32); | |
5108 __ movd(xmm_scratch, lo_reg); | |
5109 __ orps(result_reg, xmm_scratch); | |
5110 } | |
5111 } | 5136 } |
5112 | 5137 |
5113 | 5138 |
5114 void LCodeGen::DoAllocate(LAllocate* instr) { | 5139 void LCodeGen::DoAllocate(LAllocate* instr) { |
5115 class DeferredAllocate V8_FINAL : public LDeferredCode { | 5140 class DeferredAllocate V8_FINAL : public LDeferredCode { |
5116 public: | 5141 public: |
5117 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) | 5142 DeferredAllocate(LCodeGen* codegen, |
5118 : LDeferredCode(codegen), instr_(instr) { } | 5143 LAllocate* instr, |
| 5144 const X87Stack& x87_stack) |
| 5145 : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
5119 virtual void Generate() V8_OVERRIDE { | 5146 virtual void Generate() V8_OVERRIDE { |
5120 codegen()->DoDeferredAllocate(instr_); | 5147 codegen()->DoDeferredAllocate(instr_); |
5121 } | 5148 } |
5122 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 5149 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
5123 private: | 5150 private: |
5124 LAllocate* instr_; | 5151 LAllocate* instr_; |
5125 }; | 5152 }; |
5126 | 5153 |
5127 DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr); | 5154 DeferredAllocate* deferred = |
| 5155 new(zone()) DeferredAllocate(this, instr, x87_stack_); |
5128 | 5156 |
5129 Register result = ToRegister(instr->result()); | 5157 Register result = ToRegister(instr->result()); |
5130 Register temp = ToRegister(instr->temp()); | 5158 Register temp = ToRegister(instr->temp()); |
5131 | 5159 |
5132 // Allocate memory for the object. | 5160 // Allocate memory for the object. |
5133 AllocationFlags flags = TAG_OBJECT; | 5161 AllocationFlags flags = TAG_OBJECT; |
5134 if (instr->hydrogen()->MustAllocateDoubleAligned()) { | 5162 if (instr->hydrogen()->MustAllocateDoubleAligned()) { |
5135 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); | 5163 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); |
5136 } | 5164 } |
5137 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { | 5165 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { |
(...skipping 332 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5470 | 5498 |
5471 | 5499 |
5472 void LCodeGen::DoDummyUse(LDummyUse* instr) { | 5500 void LCodeGen::DoDummyUse(LDummyUse* instr) { |
5473 // Nothing to see here, move on! | 5501 // Nothing to see here, move on! |
5474 } | 5502 } |
5475 | 5503 |
5476 | 5504 |
5477 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { | 5505 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { |
5478 PushSafepointRegistersScope scope(this); | 5506 PushSafepointRegistersScope scope(this); |
5479 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); | 5507 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); |
5480 __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard); | 5508 __ CallRuntime(Runtime::kHiddenStackGuard); |
5481 RecordSafepointWithLazyDeopt( | 5509 RecordSafepointWithLazyDeopt( |
5482 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | 5510 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
5483 ASSERT(instr->HasEnvironment()); | 5511 ASSERT(instr->HasEnvironment()); |
5484 LEnvironment* env = instr->environment(); | 5512 LEnvironment* env = instr->environment(); |
5485 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); | 5513 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
5486 } | 5514 } |
5487 | 5515 |
5488 | 5516 |
5489 void LCodeGen::DoStackCheck(LStackCheck* instr) { | 5517 void LCodeGen::DoStackCheck(LStackCheck* instr) { |
5490 class DeferredStackCheck V8_FINAL : public LDeferredCode { | 5518 class DeferredStackCheck V8_FINAL : public LDeferredCode { |
5491 public: | 5519 public: |
5492 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) | 5520 DeferredStackCheck(LCodeGen* codegen, |
5493 : LDeferredCode(codegen), instr_(instr) { } | 5521 LStackCheck* instr, |
| 5522 const X87Stack& x87_stack) |
| 5523 : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
5494 virtual void Generate() V8_OVERRIDE { | 5524 virtual void Generate() V8_OVERRIDE { |
5495 codegen()->DoDeferredStackCheck(instr_); | 5525 codegen()->DoDeferredStackCheck(instr_); |
5496 } | 5526 } |
5497 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 5527 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
5498 private: | 5528 private: |
5499 LStackCheck* instr_; | 5529 LStackCheck* instr_; |
5500 }; | 5530 }; |
5501 | 5531 |
5502 ASSERT(instr->HasEnvironment()); | 5532 ASSERT(instr->HasEnvironment()); |
5503 LEnvironment* env = instr->environment(); | 5533 LEnvironment* env = instr->environment(); |
(...skipping 10 matching lines...) Expand all Loading... |
5514 ASSERT(instr->context()->IsRegister()); | 5544 ASSERT(instr->context()->IsRegister()); |
5515 ASSERT(ToRegister(instr->context()).is(esi)); | 5545 ASSERT(ToRegister(instr->context()).is(esi)); |
5516 CallCode(isolate()->builtins()->StackCheck(), | 5546 CallCode(isolate()->builtins()->StackCheck(), |
5517 RelocInfo::CODE_TARGET, | 5547 RelocInfo::CODE_TARGET, |
5518 instr); | 5548 instr); |
5519 __ bind(&done); | 5549 __ bind(&done); |
5520 } else { | 5550 } else { |
5521 ASSERT(instr->hydrogen()->is_backwards_branch()); | 5551 ASSERT(instr->hydrogen()->is_backwards_branch()); |
5522 // Perform stack overflow check if this goto needs it before jumping. | 5552 // Perform stack overflow check if this goto needs it before jumping. |
5523 DeferredStackCheck* deferred_stack_check = | 5553 DeferredStackCheck* deferred_stack_check = |
5524 new(zone()) DeferredStackCheck(this, instr); | 5554 new(zone()) DeferredStackCheck(this, instr, x87_stack_); |
5525 ExternalReference stack_limit = | 5555 ExternalReference stack_limit = |
5526 ExternalReference::address_of_stack_limit(isolate()); | 5556 ExternalReference::address_of_stack_limit(isolate()); |
5527 __ cmp(esp, Operand::StaticVariable(stack_limit)); | 5557 __ cmp(esp, Operand::StaticVariable(stack_limit)); |
5528 __ j(below, deferred_stack_check->entry()); | 5558 __ j(below, deferred_stack_check->entry()); |
5529 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); | 5559 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); |
5530 __ bind(instr->done_label()); | 5560 __ bind(instr->done_label()); |
5531 deferred_stack_check->SetExit(instr->done_label()); | 5561 deferred_stack_check->SetExit(instr->done_label()); |
5532 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); | 5562 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); |
5533 // Don't record a deoptimization index for the safepoint here. | 5563 // Don't record a deoptimization index for the safepoint here. |
5534 // This will be done explicitly when emitting call and the safepoint in | 5564 // This will be done explicitly when emitting call and the safepoint in |
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5615 } | 5645 } |
5616 | 5646 |
5617 | 5647 |
5618 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, | 5648 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, |
5619 Register object, | 5649 Register object, |
5620 Register index) { | 5650 Register index) { |
5621 PushSafepointRegistersScope scope(this); | 5651 PushSafepointRegistersScope scope(this); |
5622 __ push(object); | 5652 __ push(object); |
5623 __ push(index); | 5653 __ push(index); |
5624 __ xor_(esi, esi); | 5654 __ xor_(esi, esi); |
5625 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); | 5655 __ CallRuntime(Runtime::kLoadMutableDouble); |
5626 RecordSafepointWithRegisters( | 5656 RecordSafepointWithRegisters( |
5627 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); | 5657 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); |
5628 __ StoreToSafepointRegisterSlot(object, eax); | 5658 __ StoreToSafepointRegisterSlot(object, eax); |
5629 } | 5659 } |
5630 | 5660 |
5631 | 5661 |
5632 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { | 5662 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { |
5633 class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode { | 5663 class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode { |
5634 public: | 5664 public: |
5635 DeferredLoadMutableDouble(LCodeGen* codegen, | 5665 DeferredLoadMutableDouble(LCodeGen* codegen, |
5636 LLoadFieldByIndex* instr, | 5666 LLoadFieldByIndex* instr, |
5637 Register object, | 5667 Register object, |
5638 Register index) | 5668 Register index, |
5639 : LDeferredCode(codegen), | 5669 const X87Stack& x87_stack) |
| 5670 : LDeferredCode(codegen, x87_stack), |
5640 instr_(instr), | 5671 instr_(instr), |
5641 object_(object), | 5672 object_(object), |
5642 index_(index) { | 5673 index_(index) { |
5643 } | 5674 } |
5644 virtual void Generate() V8_OVERRIDE { | 5675 virtual void Generate() V8_OVERRIDE { |
5645 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_); | 5676 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_); |
5646 } | 5677 } |
5647 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 5678 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
5648 private: | 5679 private: |
5649 LLoadFieldByIndex* instr_; | 5680 LLoadFieldByIndex* instr_; |
5650 Register object_; | 5681 Register object_; |
5651 Register index_; | 5682 Register index_; |
5652 }; | 5683 }; |
5653 | 5684 |
5654 Register object = ToRegister(instr->object()); | 5685 Register object = ToRegister(instr->object()); |
5655 Register index = ToRegister(instr->index()); | 5686 Register index = ToRegister(instr->index()); |
5656 | 5687 |
5657 DeferredLoadMutableDouble* deferred; | 5688 DeferredLoadMutableDouble* deferred; |
5658 deferred = new(zone()) DeferredLoadMutableDouble( | 5689 deferred = new(zone()) DeferredLoadMutableDouble( |
5659 this, instr, object, index); | 5690 this, instr, object, index, x87_stack_); |
5660 | 5691 |
5661 Label out_of_object, done; | 5692 Label out_of_object, done; |
5662 __ test(index, Immediate(Smi::FromInt(1))); | 5693 __ test(index, Immediate(Smi::FromInt(1))); |
5663 __ j(not_zero, deferred->entry()); | 5694 __ j(not_zero, deferred->entry()); |
5664 | 5695 |
5665 __ sar(index, 1); | 5696 __ sar(index, 1); |
5666 | 5697 |
5667 __ cmp(index, Immediate(0)); | 5698 __ cmp(index, Immediate(0)); |
5668 __ j(less, &out_of_object, Label::kNear); | 5699 __ j(less, &out_of_object, Label::kNear); |
5669 __ mov(object, FieldOperand(object, | 5700 __ mov(object, FieldOperand(object, |
(...skipping 12 matching lines...) Expand all Loading... |
5682 FixedArray::kHeaderSize - kPointerSize)); | 5713 FixedArray::kHeaderSize - kPointerSize)); |
5683 __ bind(deferred->exit()); | 5714 __ bind(deferred->exit()); |
5684 __ bind(&done); | 5715 __ bind(&done); |
5685 } | 5716 } |
5686 | 5717 |
5687 | 5718 |
5688 #undef __ | 5719 #undef __ |
5689 | 5720 |
5690 } } // namespace v8::internal | 5721 } } // namespace v8::internal |
5691 | 5722 |
5692 #endif // V8_TARGET_ARCH_IA32 | 5723 #endif // V8_TARGET_ARCH_X87 |
OLD | NEW |