OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "v8.h" | 5 #include "v8.h" |
6 | 6 |
7 #if V8_TARGET_ARCH_IA32 | 7 #if V8_TARGET_ARCH_X87 |
8 | 8 |
9 #include "ia32/lithium-codegen-ia32.h" | 9 #include "x87/lithium-codegen-x87.h" |
10 #include "ic.h" | 10 #include "ic.h" |
11 #include "code-stubs.h" | 11 #include "code-stubs.h" |
12 #include "deoptimizer.h" | 12 #include "deoptimizer.h" |
13 #include "stub-cache.h" | 13 #include "stub-cache.h" |
14 #include "codegen.h" | 14 #include "codegen.h" |
15 #include "hydrogen-osr.h" | 15 #include "hydrogen-osr.h" |
16 | 16 |
17 namespace v8 { | 17 namespace v8 { |
18 namespace internal { | 18 namespace internal { |
19 | 19 |
| 20 |
20 // When invoking builtins, we need to record the safepoint in the middle of | 21 // When invoking builtins, we need to record the safepoint in the middle of |
21 // the invoke instruction sequence generated by the macro assembler. | 22 // the invoke instruction sequence generated by the macro assembler. |
22 class SafepointGenerator V8_FINAL : public CallWrapper { | 23 class SafepointGenerator V8_FINAL : public CallWrapper { |
23 public: | 24 public: |
24 SafepointGenerator(LCodeGen* codegen, | 25 SafepointGenerator(LCodeGen* codegen, |
25 LPointerMap* pointers, | 26 LPointerMap* pointers, |
26 Safepoint::DeoptMode mode) | 27 Safepoint::DeoptMode mode) |
27 : codegen_(codegen), | 28 : codegen_(codegen), |
28 pointers_(pointers), | 29 pointers_(pointers), |
29 deopt_mode_(mode) {} | 30 deopt_mode_(mode) {} |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
84 #ifdef _MSC_VER | 85 #ifdef _MSC_VER |
85 void LCodeGen::MakeSureStackPagesMapped(int offset) { | 86 void LCodeGen::MakeSureStackPagesMapped(int offset) { |
86 const int kPageSize = 4 * KB; | 87 const int kPageSize = 4 * KB; |
87 for (offset -= kPageSize; offset > 0; offset -= kPageSize) { | 88 for (offset -= kPageSize; offset > 0; offset -= kPageSize) { |
88 __ mov(Operand(esp, offset), eax); | 89 __ mov(Operand(esp, offset), eax); |
89 } | 90 } |
90 } | 91 } |
91 #endif | 92 #endif |
92 | 93 |
93 | 94 |
94 void LCodeGen::SaveCallerDoubles() { | |
95 ASSERT(info()->saves_caller_doubles()); | |
96 ASSERT(NeedsEagerFrame()); | |
97 Comment(";;; Save clobbered callee double registers"); | |
98 int count = 0; | |
99 BitVector* doubles = chunk()->allocated_double_registers(); | |
100 BitVector::Iterator save_iterator(doubles); | |
101 while (!save_iterator.Done()) { | |
102 __ movsd(MemOperand(esp, count * kDoubleSize), | |
103 XMMRegister::FromAllocationIndex(save_iterator.Current())); | |
104 save_iterator.Advance(); | |
105 count++; | |
106 } | |
107 } | |
108 | |
109 | |
110 void LCodeGen::RestoreCallerDoubles() { | |
111 ASSERT(info()->saves_caller_doubles()); | |
112 ASSERT(NeedsEagerFrame()); | |
113 Comment(";;; Restore clobbered callee double registers"); | |
114 BitVector* doubles = chunk()->allocated_double_registers(); | |
115 BitVector::Iterator save_iterator(doubles); | |
116 int count = 0; | |
117 while (!save_iterator.Done()) { | |
118 __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()), | |
119 MemOperand(esp, count * kDoubleSize)); | |
120 save_iterator.Advance(); | |
121 count++; | |
122 } | |
123 } | |
124 | |
125 | |
126 bool LCodeGen::GeneratePrologue() { | 95 bool LCodeGen::GeneratePrologue() { |
127 ASSERT(is_generating()); | 96 ASSERT(is_generating()); |
128 | 97 |
129 if (info()->IsOptimizing()) { | 98 if (info()->IsOptimizing()) { |
130 ProfileEntryHookStub::MaybeCallEntryHook(masm_); | 99 ProfileEntryHookStub::MaybeCallEntryHook(masm_); |
131 | 100 |
132 #ifdef DEBUG | 101 #ifdef DEBUG |
133 if (strlen(FLAG_stop_at) > 0 && | 102 if (strlen(FLAG_stop_at) > 0 && |
134 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { | 103 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { |
135 __ int3(); | 104 __ int3(); |
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
239 Comment(";;; Store dynamic frame alignment tag for spilled doubles"); | 208 Comment(";;; Store dynamic frame alignment tag for spilled doubles"); |
240 // Store dynamic frame alignment state in the first local. | 209 // Store dynamic frame alignment state in the first local. |
241 int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset; | 210 int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset; |
242 if (dynamic_frame_alignment_) { | 211 if (dynamic_frame_alignment_) { |
243 __ mov(Operand(ebp, offset), edx); | 212 __ mov(Operand(ebp, offset), edx); |
244 } else { | 213 } else { |
245 __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding)); | 214 __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding)); |
246 } | 215 } |
247 } | 216 } |
248 } | 217 } |
249 | |
250 if (info()->saves_caller_doubles()) SaveCallerDoubles(); | |
251 } | 218 } |
252 | 219 |
253 // Possibly allocate a local context. | 220 // Possibly allocate a local context. |
254 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; | 221 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; |
255 if (heap_slots > 0) { | 222 if (heap_slots > 0) { |
256 Comment(";;; Allocate local context"); | 223 Comment(";;; Allocate local context"); |
257 // Argument to NewContext is the function, which is still in edi. | 224 // Argument to NewContext is the function, which is still in edi. |
258 if (heap_slots <= FastNewContextStub::kMaximumSlots) { | 225 if (heap_slots <= FastNewContextStub::kMaximumSlots) { |
259 FastNewContextStub stub(isolate(), heap_slots); | 226 FastNewContextStub stub(isolate(), heap_slots); |
260 __ CallStub(&stub); | 227 __ CallStub(&stub); |
(...skipping 16 matching lines...) Expand all Loading... |
277 (num_parameters - 1 - i) * kPointerSize; | 244 (num_parameters - 1 - i) * kPointerSize; |
278 // Load parameter from stack. | 245 // Load parameter from stack. |
279 __ mov(eax, Operand(ebp, parameter_offset)); | 246 __ mov(eax, Operand(ebp, parameter_offset)); |
280 // Store it in the context. | 247 // Store it in the context. |
281 int context_offset = Context::SlotOffset(var->index()); | 248 int context_offset = Context::SlotOffset(var->index()); |
282 __ mov(Operand(esi, context_offset), eax); | 249 __ mov(Operand(esi, context_offset), eax); |
283 // Update the write barrier. This clobbers eax and ebx. | 250 // Update the write barrier. This clobbers eax and ebx. |
284 __ RecordWriteContextSlot(esi, | 251 __ RecordWriteContextSlot(esi, |
285 context_offset, | 252 context_offset, |
286 eax, | 253 eax, |
287 ebx, | 254 ebx); |
288 kDontSaveFPRegs); | |
289 } | 255 } |
290 } | 256 } |
291 Comment(";;; End allocate local context"); | 257 Comment(";;; End allocate local context"); |
292 } | 258 } |
293 | 259 |
294 // Trace the call. | 260 // Trace the call. |
295 if (FLAG_trace && info()->IsOptimizing()) { | 261 if (FLAG_trace && info()->IsOptimizing()) { |
296 // We have not executed any compiled code yet, so esi still holds the | 262 // We have not executed any compiled code yet, so esi still holds the |
297 // incoming context. | 263 // incoming context. |
298 __ CallRuntime(Runtime::kTraceEnter, 0); | 264 __ CallRuntime(Runtime::kTraceEnter, 0); |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
352 } | 318 } |
353 | 319 |
354 | 320 |
355 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { | 321 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { |
356 if (instr->IsCall()) { | 322 if (instr->IsCall()) { |
357 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); | 323 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); |
358 } | 324 } |
359 if (!instr->IsLazyBailout() && !instr->IsGap()) { | 325 if (!instr->IsLazyBailout() && !instr->IsGap()) { |
360 safepoints_.BumpLastLazySafepointIndex(); | 326 safepoints_.BumpLastLazySafepointIndex(); |
361 } | 327 } |
| 328 FlushX87StackIfNecessary(instr); |
| 329 } |
| 330 |
| 331 |
| 332 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { |
| 333 if (instr->IsGoto()) { |
| 334 x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr)); |
| 335 } else if (FLAG_debug_code && FLAG_enable_slow_asserts && |
| 336 !instr->IsGap() && !instr->IsReturn()) { |
| 337 if (instr->ClobbersDoubleRegisters(isolate())) { |
| 338 if (instr->HasDoubleRegisterResult()) { |
| 339 ASSERT_EQ(1, x87_stack_.depth()); |
| 340 } else { |
| 341 ASSERT_EQ(0, x87_stack_.depth()); |
| 342 } |
| 343 } |
| 344 __ VerifyX87StackDepth(x87_stack_.depth()); |
| 345 } |
362 } | 346 } |
363 | 347 |
364 | 348 |
365 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { } | |
366 | |
367 | |
368 bool LCodeGen::GenerateJumpTable() { | 349 bool LCodeGen::GenerateJumpTable() { |
369 Label needs_frame; | 350 Label needs_frame; |
370 if (jump_table_.length() > 0) { | 351 if (jump_table_.length() > 0) { |
371 Comment(";;; -------------------- Jump table --------------------"); | 352 Comment(";;; -------------------- Jump table --------------------"); |
372 } | 353 } |
373 for (int i = 0; i < jump_table_.length(); i++) { | 354 for (int i = 0; i < jump_table_.length(); i++) { |
374 __ bind(&jump_table_[i].label); | 355 __ bind(&jump_table_[i].label); |
375 Address entry = jump_table_[i].address; | 356 Address entry = jump_table_[i].address; |
376 Deoptimizer::BailoutType type = jump_table_[i].bailout_type; | 357 Deoptimizer::BailoutType type = jump_table_[i].bailout_type; |
377 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); | 358 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); |
(...skipping 23 matching lines...) Expand all Loading... |
401 __ call(&push_approx_pc); | 382 __ call(&push_approx_pc); |
402 __ bind(&push_approx_pc); | 383 __ bind(&push_approx_pc); |
403 // Push the continuation which was stashed were the ebp should | 384 // Push the continuation which was stashed were the ebp should |
404 // be. Replace it with the saved ebp. | 385 // be. Replace it with the saved ebp. |
405 __ push(MemOperand(esp, 3 * kPointerSize)); | 386 __ push(MemOperand(esp, 3 * kPointerSize)); |
406 __ mov(MemOperand(esp, 4 * kPointerSize), ebp); | 387 __ mov(MemOperand(esp, 4 * kPointerSize), ebp); |
407 __ lea(ebp, MemOperand(esp, 4 * kPointerSize)); | 388 __ lea(ebp, MemOperand(esp, 4 * kPointerSize)); |
408 __ ret(0); // Call the continuation without clobbering registers. | 389 __ ret(0); // Call the continuation without clobbering registers. |
409 } | 390 } |
410 } else { | 391 } else { |
411 if (info()->saves_caller_doubles()) RestoreCallerDoubles(); | |
412 __ call(entry, RelocInfo::RUNTIME_ENTRY); | 392 __ call(entry, RelocInfo::RUNTIME_ENTRY); |
413 } | 393 } |
414 } | 394 } |
415 return !is_aborted(); | 395 return !is_aborted(); |
416 } | 396 } |
417 | 397 |
418 | 398 |
419 bool LCodeGen::GenerateDeferredCode() { | 399 bool LCodeGen::GenerateDeferredCode() { |
420 ASSERT(is_generating()); | 400 ASSERT(is_generating()); |
421 if (deferred_.length() > 0) { | 401 if (deferred_.length() > 0) { |
422 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { | 402 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { |
423 LDeferredCode* code = deferred_[i]; | 403 LDeferredCode* code = deferred_[i]; |
| 404 X87Stack copy(code->x87_stack()); |
| 405 x87_stack_ = copy; |
424 | 406 |
425 HValue* value = | 407 HValue* value = |
426 instructions_->at(code->instruction_index())->hydrogen_value(); | 408 instructions_->at(code->instruction_index())->hydrogen_value(); |
427 RecordAndWritePosition( | 409 RecordAndWritePosition( |
428 chunk()->graph()->SourcePositionToScriptPosition(value->position())); | 410 chunk()->graph()->SourcePositionToScriptPosition(value->position())); |
429 | 411 |
430 Comment(";;; <@%d,#%d> " | 412 Comment(";;; <@%d,#%d> " |
431 "-------------------- Deferred %s --------------------", | 413 "-------------------- Deferred %s --------------------", |
432 code->instruction_index(), | 414 code->instruction_index(), |
433 code->instr()->hydrogen_value()->id(), | 415 code->instr()->hydrogen_value()->id(), |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
479 safepoints_.Emit(masm(), GetStackSlotCount()); | 461 safepoints_.Emit(masm(), GetStackSlotCount()); |
480 return !is_aborted(); | 462 return !is_aborted(); |
481 } | 463 } |
482 | 464 |
483 | 465 |
484 Register LCodeGen::ToRegister(int index) const { | 466 Register LCodeGen::ToRegister(int index) const { |
485 return Register::FromAllocationIndex(index); | 467 return Register::FromAllocationIndex(index); |
486 } | 468 } |
487 | 469 |
488 | 470 |
489 XMMRegister LCodeGen::ToDoubleRegister(int index) const { | 471 X87Register LCodeGen::ToX87Register(int index) const { |
490 return XMMRegister::FromAllocationIndex(index); | 472 return X87Register::FromAllocationIndex(index); |
491 } | 473 } |
492 | 474 |
493 | 475 |
| 476 void LCodeGen::X87LoadForUsage(X87Register reg) { |
| 477 ASSERT(x87_stack_.Contains(reg)); |
| 478 x87_stack_.Fxch(reg); |
| 479 x87_stack_.pop(); |
| 480 } |
| 481 |
| 482 |
| 483 void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) { |
| 484 ASSERT(x87_stack_.Contains(reg1)); |
| 485 ASSERT(x87_stack_.Contains(reg2)); |
| 486 x87_stack_.Fxch(reg1, 1); |
| 487 x87_stack_.Fxch(reg2); |
| 488 x87_stack_.pop(); |
| 489 x87_stack_.pop(); |
| 490 } |
| 491 |
| 492 |
| 493 void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) { |
| 494 ASSERT(is_mutable_); |
| 495 ASSERT(Contains(reg) && stack_depth_ > other_slot); |
| 496 int i = ArrayIndex(reg); |
| 497 int st = st2idx(i); |
| 498 if (st != other_slot) { |
| 499 int other_i = st2idx(other_slot); |
| 500 X87Register other = stack_[other_i]; |
| 501 stack_[other_i] = reg; |
| 502 stack_[i] = other; |
| 503 if (st == 0) { |
| 504 __ fxch(other_slot); |
| 505 } else if (other_slot == 0) { |
| 506 __ fxch(st); |
| 507 } else { |
| 508 __ fxch(st); |
| 509 __ fxch(other_slot); |
| 510 __ fxch(st); |
| 511 } |
| 512 } |
| 513 } |
| 514 |
| 515 |
| 516 int LCodeGen::X87Stack::st2idx(int pos) { |
| 517 return stack_depth_ - pos - 1; |
| 518 } |
| 519 |
| 520 |
| 521 int LCodeGen::X87Stack::ArrayIndex(X87Register reg) { |
| 522 for (int i = 0; i < stack_depth_; i++) { |
| 523 if (stack_[i].is(reg)) return i; |
| 524 } |
| 525 UNREACHABLE(); |
| 526 return -1; |
| 527 } |
| 528 |
| 529 |
| 530 bool LCodeGen::X87Stack::Contains(X87Register reg) { |
| 531 for (int i = 0; i < stack_depth_; i++) { |
| 532 if (stack_[i].is(reg)) return true; |
| 533 } |
| 534 return false; |
| 535 } |
| 536 |
| 537 |
| 538 void LCodeGen::X87Stack::Free(X87Register reg) { |
| 539 ASSERT(is_mutable_); |
| 540 ASSERT(Contains(reg)); |
| 541 int i = ArrayIndex(reg); |
| 542 int st = st2idx(i); |
| 543 if (st > 0) { |
| 544 // keep track of how fstp(i) changes the order of elements |
| 545 int tos_i = st2idx(0); |
| 546 stack_[i] = stack_[tos_i]; |
| 547 } |
| 548 pop(); |
| 549 __ fstp(st); |
| 550 } |
| 551 |
| 552 |
| 553 void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) { |
| 554 if (x87_stack_.Contains(dst)) { |
| 555 x87_stack_.Fxch(dst); |
| 556 __ fstp(0); |
| 557 } else { |
| 558 x87_stack_.push(dst); |
| 559 } |
| 560 X87Fld(src, opts); |
| 561 } |
| 562 |
| 563 |
| 564 void LCodeGen::X87Fld(Operand src, X87OperandType opts) { |
| 565 ASSERT(!src.is_reg_only()); |
| 566 switch (opts) { |
| 567 case kX87DoubleOperand: |
| 568 __ fld_d(src); |
| 569 break; |
| 570 case kX87FloatOperand: |
| 571 __ fld_s(src); |
| 572 break; |
| 573 case kX87IntOperand: |
| 574 __ fild_s(src); |
| 575 break; |
| 576 default: |
| 577 UNREACHABLE(); |
| 578 } |
| 579 } |
| 580 |
| 581 |
| 582 void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) { |
| 583 ASSERT(!dst.is_reg_only()); |
| 584 x87_stack_.Fxch(src); |
| 585 switch (opts) { |
| 586 case kX87DoubleOperand: |
| 587 __ fst_d(dst); |
| 588 break; |
| 589 case kX87IntOperand: |
| 590 __ fist_s(dst); |
| 591 break; |
| 592 default: |
| 593 UNREACHABLE(); |
| 594 } |
| 595 } |
| 596 |
| 597 |
| 598 void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) { |
| 599 ASSERT(is_mutable_); |
| 600 if (Contains(reg)) { |
| 601 Free(reg); |
| 602 } |
| 603 // Mark this register as the next register to write to |
| 604 stack_[stack_depth_] = reg; |
| 605 } |
| 606 |
| 607 |
| 608 void LCodeGen::X87Stack::CommitWrite(X87Register reg) { |
| 609 ASSERT(is_mutable_); |
| 610 // Assert the reg is prepared to write, but not on the virtual stack yet |
| 611 ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) && |
| 612 stack_depth_ < X87Register::kMaxNumAllocatableRegisters); |
| 613 stack_depth_++; |
| 614 } |
| 615 |
| 616 |
| 617 void LCodeGen::X87PrepareBinaryOp( |
| 618 X87Register left, X87Register right, X87Register result) { |
| 619 // You need to use DefineSameAsFirst for x87 instructions |
| 620 ASSERT(result.is(left)); |
| 621 x87_stack_.Fxch(right, 1); |
| 622 x87_stack_.Fxch(left); |
| 623 } |
| 624 |
| 625 |
| 626 void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) { |
| 627 if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters(isolate())) { |
| 628 bool double_inputs = instr->HasDoubleRegisterInput(); |
| 629 |
| 630 // Flush stack from tos down, since FreeX87() will mess with tos |
| 631 for (int i = stack_depth_-1; i >= 0; i--) { |
| 632 X87Register reg = stack_[i]; |
| 633 // Skip registers which contain the inputs for the next instruction |
| 634 // when flushing the stack |
| 635 if (double_inputs && instr->IsDoubleInput(reg, cgen)) { |
| 636 continue; |
| 637 } |
| 638 Free(reg); |
| 639 if (i < stack_depth_-1) i++; |
| 640 } |
| 641 } |
| 642 if (instr->IsReturn()) { |
| 643 while (stack_depth_ > 0) { |
| 644 __ fstp(0); |
| 645 stack_depth_--; |
| 646 } |
| 647 if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0); |
| 648 } |
| 649 } |
| 650 |
| 651 |
| 652 void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) { |
| 653 ASSERT(stack_depth_ <= 1); |
| 654 // If ever used for new stubs producing two pairs of doubles joined into two |
| 655 // phis this assert hits. That situation is not handled, since the two stacks |
| 656 // might have st0 and st1 swapped. |
| 657 if (current_block_id + 1 != goto_instr->block_id()) { |
| 658 // If we have a value on the x87 stack on leaving a block, it must be a |
| 659 // phi input. If the next block we compile is not the join block, we have |
| 660 // to discard the stack state. |
| 661 stack_depth_ = 0; |
| 662 } |
| 663 } |
| 664 |
| 665 |
| 666 void LCodeGen::EmitFlushX87ForDeopt() { |
| 667 // The deoptimizer does not support X87 Registers. But as long as we |
| 668 // deopt from a stub its not a problem, since we will re-materialize the |
| 669 // original stub inputs, which can't be double registers. |
| 670 ASSERT(info()->IsStub()); |
| 671 if (FLAG_debug_code && FLAG_enable_slow_asserts) { |
| 672 __ pushfd(); |
| 673 __ VerifyX87StackDepth(x87_stack_.depth()); |
| 674 __ popfd(); |
| 675 } |
| 676 for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0); |
| 677 } |
| 678 |
| 679 |
494 Register LCodeGen::ToRegister(LOperand* op) const { | 680 Register LCodeGen::ToRegister(LOperand* op) const { |
495 ASSERT(op->IsRegister()); | 681 ASSERT(op->IsRegister()); |
496 return ToRegister(op->index()); | 682 return ToRegister(op->index()); |
497 } | 683 } |
498 | 684 |
499 | 685 |
500 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { | 686 X87Register LCodeGen::ToX87Register(LOperand* op) const { |
501 ASSERT(op->IsDoubleRegister()); | 687 ASSERT(op->IsDoubleRegister()); |
502 return ToDoubleRegister(op->index()); | 688 return ToX87Register(op->index()); |
503 } | 689 } |
504 | 690 |
505 | 691 |
506 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { | 692 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { |
507 return ToRepresentation(op, Representation::Integer32()); | 693 return ToRepresentation(op, Representation::Integer32()); |
508 } | 694 } |
509 | 695 |
510 | 696 |
511 int32_t LCodeGen::ToRepresentation(LConstantOperand* op, | 697 int32_t LCodeGen::ToRepresentation(LConstantOperand* op, |
512 const Representation& r) const { | 698 const Representation& r) const { |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
550 | 736 |
551 | 737 |
552 static int ArgumentsOffsetWithoutFrame(int index) { | 738 static int ArgumentsOffsetWithoutFrame(int index) { |
553 ASSERT(index < 0); | 739 ASSERT(index < 0); |
554 return -(index + 1) * kPointerSize + kPCOnStackSize; | 740 return -(index + 1) * kPointerSize + kPCOnStackSize; |
555 } | 741 } |
556 | 742 |
557 | 743 |
558 Operand LCodeGen::ToOperand(LOperand* op) const { | 744 Operand LCodeGen::ToOperand(LOperand* op) const { |
559 if (op->IsRegister()) return Operand(ToRegister(op)); | 745 if (op->IsRegister()) return Operand(ToRegister(op)); |
560 if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op)); | 746 ASSERT(!op->IsDoubleRegister()); |
561 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); | 747 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); |
562 if (NeedsEagerFrame()) { | 748 if (NeedsEagerFrame()) { |
563 return Operand(ebp, StackSlotOffset(op->index())); | 749 return Operand(ebp, StackSlotOffset(op->index())); |
564 } else { | 750 } else { |
565 // Retrieve parameter without eager stack-frame relative to the | 751 // Retrieve parameter without eager stack-frame relative to the |
566 // stack-pointer. | 752 // stack-pointer. |
567 return Operand(esp, ArgumentsOffsetWithoutFrame(op->index())); | 753 return Operand(esp, ArgumentsOffsetWithoutFrame(op->index())); |
568 } | 754 } |
569 } | 755 } |
570 | 756 |
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
687 translation->StoreDoubleStackSlot(op->index()); | 873 translation->StoreDoubleStackSlot(op->index()); |
688 } else if (op->IsRegister()) { | 874 } else if (op->IsRegister()) { |
689 Register reg = ToRegister(op); | 875 Register reg = ToRegister(op); |
690 if (is_tagged) { | 876 if (is_tagged) { |
691 translation->StoreRegister(reg); | 877 translation->StoreRegister(reg); |
692 } else if (is_uint32) { | 878 } else if (is_uint32) { |
693 translation->StoreUint32Register(reg); | 879 translation->StoreUint32Register(reg); |
694 } else { | 880 } else { |
695 translation->StoreInt32Register(reg); | 881 translation->StoreInt32Register(reg); |
696 } | 882 } |
697 } else if (op->IsDoubleRegister()) { | |
698 XMMRegister reg = ToDoubleRegister(op); | |
699 translation->StoreDoubleRegister(reg); | |
700 } else if (op->IsConstantOperand()) { | 883 } else if (op->IsConstantOperand()) { |
701 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); | 884 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); |
702 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); | 885 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); |
703 translation->StoreLiteral(src_index); | 886 translation->StoreLiteral(src_index); |
704 } else { | 887 } else { |
705 UNREACHABLE(); | 888 UNREACHABLE(); |
706 } | 889 } |
707 } | 890 } |
708 | 891 |
709 | 892 |
(...skipping 16 matching lines...) Expand all Loading... |
726 | 909 |
727 void LCodeGen::CallCode(Handle<Code> code, | 910 void LCodeGen::CallCode(Handle<Code> code, |
728 RelocInfo::Mode mode, | 911 RelocInfo::Mode mode, |
729 LInstruction* instr) { | 912 LInstruction* instr) { |
730 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); | 913 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); |
731 } | 914 } |
732 | 915 |
733 | 916 |
734 void LCodeGen::CallRuntime(const Runtime::Function* fun, | 917 void LCodeGen::CallRuntime(const Runtime::Function* fun, |
735 int argc, | 918 int argc, |
736 LInstruction* instr, | 919 LInstruction* instr) { |
737 SaveFPRegsMode save_doubles) { | |
738 ASSERT(instr != NULL); | 920 ASSERT(instr != NULL); |
739 ASSERT(instr->HasPointerMap()); | 921 ASSERT(instr->HasPointerMap()); |
740 | 922 |
741 __ CallRuntime(fun, argc, save_doubles); | 923 __ CallRuntime(fun, argc); |
742 | 924 |
743 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); | 925 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); |
744 | 926 |
745 ASSERT(info()->is_calling()); | 927 ASSERT(info()->is_calling()); |
746 } | 928 } |
747 | 929 |
748 | 930 |
749 void LCodeGen::LoadContextFromDeferred(LOperand* context) { | 931 void LCodeGen::LoadContextFromDeferred(LOperand* context) { |
750 if (context->IsRegister()) { | 932 if (context->IsRegister()) { |
751 if (!ToRegister(context).is(esi)) { | 933 if (!ToRegister(context).is(esi)) { |
752 __ mov(esi, ToRegister(context)); | 934 __ mov(esi, ToRegister(context)); |
753 } | 935 } |
754 } else if (context->IsStackSlot()) { | 936 } else if (context->IsStackSlot()) { |
755 __ mov(esi, ToOperand(context)); | 937 __ mov(esi, ToOperand(context)); |
756 } else if (context->IsConstantOperand()) { | 938 } else if (context->IsConstantOperand()) { |
757 HConstant* constant = | 939 HConstant* constant = |
758 chunk_->LookupConstant(LConstantOperand::cast(context)); | 940 chunk_->LookupConstant(LConstantOperand::cast(context)); |
759 __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate()))); | 941 __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate()))); |
760 } else { | 942 } else { |
761 UNREACHABLE(); | 943 UNREACHABLE(); |
762 } | 944 } |
763 } | 945 } |
764 | 946 |
765 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, | 947 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, |
766 int argc, | 948 int argc, |
767 LInstruction* instr, | 949 LInstruction* instr, |
768 LOperand* context) { | 950 LOperand* context) { |
769 LoadContextFromDeferred(context); | 951 LoadContextFromDeferred(context); |
770 | 952 |
771 __ CallRuntimeSaveDoubles(id); | 953 __ CallRuntime(id); |
772 RecordSafepointWithRegisters( | 954 RecordSafepointWithRegisters( |
773 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); | 955 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); |
774 | 956 |
775 ASSERT(info()->is_calling()); | 957 ASSERT(info()->is_calling()); |
776 } | 958 } |
777 | 959 |
778 | 960 |
779 void LCodeGen::RegisterEnvironmentForDeoptimization( | 961 void LCodeGen::RegisterEnvironmentForDeoptimization( |
780 LEnvironment* environment, Safepoint::DeoptMode mode) { | 962 LEnvironment* environment, Safepoint::DeoptMode mode) { |
781 environment->set_has_been_used(); | 963 environment->set_has_been_used(); |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
841 __ pop(eax); | 1023 __ pop(eax); |
842 __ popfd(); | 1024 __ popfd(); |
843 ASSERT(frame_is_built_); | 1025 ASSERT(frame_is_built_); |
844 __ call(entry, RelocInfo::RUNTIME_ENTRY); | 1026 __ call(entry, RelocInfo::RUNTIME_ENTRY); |
845 __ bind(&no_deopt); | 1027 __ bind(&no_deopt); |
846 __ mov(Operand::StaticVariable(count), eax); | 1028 __ mov(Operand::StaticVariable(count), eax); |
847 __ pop(eax); | 1029 __ pop(eax); |
848 __ popfd(); | 1030 __ popfd(); |
849 } | 1031 } |
850 | 1032 |
| 1033 // Before Instructions which can deopt, we normally flush the x87 stack. But |
| 1034 // we can have inputs or outputs of the current instruction on the stack, |
| 1035 // thus we need to flush them here from the physical stack to leave it in a |
| 1036 // consistent state. |
| 1037 if (x87_stack_.depth() > 0) { |
| 1038 Label done; |
| 1039 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); |
| 1040 EmitFlushX87ForDeopt(); |
| 1041 __ bind(&done); |
| 1042 } |
| 1043 |
851 if (info()->ShouldTrapOnDeopt()) { | 1044 if (info()->ShouldTrapOnDeopt()) { |
852 Label done; | 1045 Label done; |
853 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); | 1046 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); |
854 __ int3(); | 1047 __ int3(); |
855 __ bind(&done); | 1048 __ bind(&done); |
856 } | 1049 } |
857 | 1050 |
858 ASSERT(info()->IsStub() || frame_is_built_); | 1051 ASSERT(info()->IsStub() || frame_is_built_); |
859 if (cc == no_condition && frame_is_built_) { | 1052 if (cc == no_condition && frame_is_built_) { |
860 __ call(entry, RelocInfo::RUNTIME_ENTRY); | 1053 __ call(entry, RelocInfo::RUNTIME_ENTRY); |
(...skipping 840 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1701 } | 1894 } |
1702 | 1895 |
1703 | 1896 |
1704 void LCodeGen::DoConstantD(LConstantD* instr) { | 1897 void LCodeGen::DoConstantD(LConstantD* instr) { |
1705 double v = instr->value(); | 1898 double v = instr->value(); |
1706 uint64_t int_val = BitCast<uint64_t, double>(v); | 1899 uint64_t int_val = BitCast<uint64_t, double>(v); |
1707 int32_t lower = static_cast<int32_t>(int_val); | 1900 int32_t lower = static_cast<int32_t>(int_val); |
1708 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); | 1901 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); |
1709 ASSERT(instr->result()->IsDoubleRegister()); | 1902 ASSERT(instr->result()->IsDoubleRegister()); |
1710 | 1903 |
1711 XMMRegister res = ToDoubleRegister(instr->result()); | 1904 __ push(Immediate(upper)); |
1712 if (int_val == 0) { | 1905 __ push(Immediate(lower)); |
1713 __ xorps(res, res); | 1906 X87Register reg = ToX87Register(instr->result()); |
1714 } else { | 1907 X87Mov(reg, Operand(esp, 0)); |
1715 Register temp = ToRegister(instr->temp()); | 1908 __ add(Operand(esp), Immediate(kDoubleSize)); |
1716 if (CpuFeatures::IsSupported(SSE4_1)) { | |
1717 CpuFeatureScope scope2(masm(), SSE4_1); | |
1718 if (lower != 0) { | |
1719 __ Move(temp, Immediate(lower)); | |
1720 __ movd(res, Operand(temp)); | |
1721 __ Move(temp, Immediate(upper)); | |
1722 __ pinsrd(res, Operand(temp), 1); | |
1723 } else { | |
1724 __ xorps(res, res); | |
1725 __ Move(temp, Immediate(upper)); | |
1726 __ pinsrd(res, Operand(temp), 1); | |
1727 } | |
1728 } else { | |
1729 __ Move(temp, Immediate(upper)); | |
1730 __ movd(res, Operand(temp)); | |
1731 __ psllq(res, 32); | |
1732 if (lower != 0) { | |
1733 XMMRegister xmm_scratch = double_scratch0(); | |
1734 __ Move(temp, Immediate(lower)); | |
1735 __ movd(xmm_scratch, Operand(temp)); | |
1736 __ orps(res, xmm_scratch); | |
1737 } | |
1738 } | |
1739 } | |
1740 } | 1909 } |
1741 | 1910 |
1742 | 1911 |
1743 void LCodeGen::DoConstantE(LConstantE* instr) { | 1912 void LCodeGen::DoConstantE(LConstantE* instr) { |
1744 __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value())); | 1913 __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value())); |
1745 } | 1914 } |
1746 | 1915 |
1747 | 1916 |
1748 void LCodeGen::DoConstantT(LConstantT* instr) { | 1917 void LCodeGen::DoConstantT(LConstantT* instr) { |
1749 Register reg = ToRegister(instr->result()); | 1918 Register reg = ToRegister(instr->result()); |
(...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1927 __ mov(left_op, immediate); | 2096 __ mov(left_op, immediate); |
1928 } else { | 2097 } else { |
1929 Register left_reg = ToRegister(left); | 2098 Register left_reg = ToRegister(left); |
1930 Operand right_op = ToOperand(right); | 2099 Operand right_op = ToOperand(right); |
1931 __ cmp(left_reg, right_op); | 2100 __ cmp(left_reg, right_op); |
1932 __ j(condition, &return_left, Label::kNear); | 2101 __ j(condition, &return_left, Label::kNear); |
1933 __ mov(left_reg, right_op); | 2102 __ mov(left_reg, right_op); |
1934 } | 2103 } |
1935 __ bind(&return_left); | 2104 __ bind(&return_left); |
1936 } else { | 2105 } else { |
1937 ASSERT(instr->hydrogen()->representation().IsDouble()); | 2106 // TODO(weiliang) use X87 for double representation. |
1938 Label check_nan_left, check_zero, return_left, return_right; | 2107 UNIMPLEMENTED(); |
1939 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above; | |
1940 XMMRegister left_reg = ToDoubleRegister(left); | |
1941 XMMRegister right_reg = ToDoubleRegister(right); | |
1942 __ ucomisd(left_reg, right_reg); | |
1943 __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN. | |
1944 __ j(equal, &check_zero, Label::kNear); // left == right. | |
1945 __ j(condition, &return_left, Label::kNear); | |
1946 __ jmp(&return_right, Label::kNear); | |
1947 | |
1948 __ bind(&check_zero); | |
1949 XMMRegister xmm_scratch = double_scratch0(); | |
1950 __ xorps(xmm_scratch, xmm_scratch); | |
1951 __ ucomisd(left_reg, xmm_scratch); | |
1952 __ j(not_equal, &return_left, Label::kNear); // left == right != 0. | |
1953 // At this point, both left and right are either 0 or -0. | |
1954 if (operation == HMathMinMax::kMathMin) { | |
1955 __ orpd(left_reg, right_reg); | |
1956 } else { | |
1957 // Since we operate on +0 and/or -0, addsd and andsd have the same effect. | |
1958 __ addsd(left_reg, right_reg); | |
1959 } | |
1960 __ jmp(&return_left, Label::kNear); | |
1961 | |
1962 __ bind(&check_nan_left); | |
1963 __ ucomisd(left_reg, left_reg); // NaN check. | |
1964 __ j(parity_even, &return_left, Label::kNear); // left == NaN. | |
1965 __ bind(&return_right); | |
1966 __ movaps(left_reg, right_reg); | |
1967 | |
1968 __ bind(&return_left); | |
1969 } | 2108 } |
1970 } | 2109 } |
1971 | 2110 |
1972 | 2111 |
1973 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { | 2112 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { |
1974 XMMRegister left = ToDoubleRegister(instr->left()); | 2113 X87Register left = ToX87Register(instr->left()); |
1975 XMMRegister right = ToDoubleRegister(instr->right()); | 2114 X87Register right = ToX87Register(instr->right()); |
1976 XMMRegister result = ToDoubleRegister(instr->result()); | 2115 X87Register result = ToX87Register(instr->result()); |
| 2116 if (instr->op() != Token::MOD) { |
| 2117 X87PrepareBinaryOp(left, right, result); |
| 2118 } |
1977 switch (instr->op()) { | 2119 switch (instr->op()) { |
1978 case Token::ADD: | 2120 case Token::ADD: |
1979 __ addsd(left, right); | 2121 __ fadd_i(1); |
1980 break; | 2122 break; |
1981 case Token::SUB: | 2123 case Token::SUB: |
1982 __ subsd(left, right); | 2124 __ fsub_i(1); |
1983 break; | 2125 break; |
1984 case Token::MUL: | 2126 case Token::MUL: |
1985 __ mulsd(left, right); | 2127 __ fmul_i(1); |
1986 break; | 2128 break; |
1987 case Token::DIV: | 2129 case Token::DIV: |
1988 __ divsd(left, right); | 2130 __ fdiv_i(1); |
1989 // Don't delete this mov. It may improve performance on some CPUs, | |
1990 // when there is a mulsd depending on the result | |
1991 __ movaps(left, left); | |
1992 break; | 2131 break; |
1993 case Token::MOD: { | 2132 case Token::MOD: { |
1994 // Pass two doubles as arguments on the stack. | 2133 // Pass two doubles as arguments on the stack. |
1995 __ PrepareCallCFunction(4, eax); | 2134 __ PrepareCallCFunction(4, eax); |
1996 __ movsd(Operand(esp, 0 * kDoubleSize), left); | 2135 X87Mov(Operand(esp, 1 * kDoubleSize), right); |
1997 __ movsd(Operand(esp, 1 * kDoubleSize), right); | 2136 X87Mov(Operand(esp, 0), left); |
| 2137 X87Free(right); |
| 2138 ASSERT(left.is(result)); |
| 2139 X87PrepareToWrite(result); |
1998 __ CallCFunction( | 2140 __ CallCFunction( |
1999 ExternalReference::mod_two_doubles_operation(isolate()), | 2141 ExternalReference::mod_two_doubles_operation(isolate()), |
2000 4); | 2142 4); |
2001 | 2143 |
2002 // Return value is in st(0) on ia32. | 2144 // Return value is in st(0) on ia32. |
2003 // Store it into the result register. | 2145 X87CommitWrite(result); |
2004 __ sub(Operand(esp), Immediate(kDoubleSize)); | |
2005 __ fstp_d(Operand(esp, 0)); | |
2006 __ movsd(result, Operand(esp, 0)); | |
2007 __ add(Operand(esp), Immediate(kDoubleSize)); | |
2008 break; | 2146 break; |
2009 } | 2147 } |
2010 default: | 2148 default: |
2011 UNREACHABLE(); | 2149 UNREACHABLE(); |
2012 break; | 2150 break; |
2013 } | 2151 } |
2014 } | 2152 } |
2015 | 2153 |
2016 | 2154 |
2017 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { | 2155 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2056 } | 2194 } |
2057 | 2195 |
2058 | 2196 |
2059 void LCodeGen::DoBranch(LBranch* instr) { | 2197 void LCodeGen::DoBranch(LBranch* instr) { |
2060 Representation r = instr->hydrogen()->value()->representation(); | 2198 Representation r = instr->hydrogen()->value()->representation(); |
2061 if (r.IsSmiOrInteger32()) { | 2199 if (r.IsSmiOrInteger32()) { |
2062 Register reg = ToRegister(instr->value()); | 2200 Register reg = ToRegister(instr->value()); |
2063 __ test(reg, Operand(reg)); | 2201 __ test(reg, Operand(reg)); |
2064 EmitBranch(instr, not_zero); | 2202 EmitBranch(instr, not_zero); |
2065 } else if (r.IsDouble()) { | 2203 } else if (r.IsDouble()) { |
2066 ASSERT(!info()->IsStub()); | 2204 UNREACHABLE(); |
2067 XMMRegister reg = ToDoubleRegister(instr->value()); | |
2068 XMMRegister xmm_scratch = double_scratch0(); | |
2069 __ xorps(xmm_scratch, xmm_scratch); | |
2070 __ ucomisd(reg, xmm_scratch); | |
2071 EmitBranch(instr, not_equal); | |
2072 } else { | 2205 } else { |
2073 ASSERT(r.IsTagged()); | 2206 ASSERT(r.IsTagged()); |
2074 Register reg = ToRegister(instr->value()); | 2207 Register reg = ToRegister(instr->value()); |
2075 HType type = instr->hydrogen()->value()->type(); | 2208 HType type = instr->hydrogen()->value()->type(); |
2076 if (type.IsBoolean()) { | 2209 if (type.IsBoolean()) { |
2077 ASSERT(!info()->IsStub()); | 2210 ASSERT(!info()->IsStub()); |
2078 __ cmp(reg, factory()->true_value()); | 2211 __ cmp(reg, factory()->true_value()); |
2079 EmitBranch(instr, equal); | 2212 EmitBranch(instr, equal); |
2080 } else if (type.IsSmi()) { | 2213 } else if (type.IsSmi()) { |
2081 ASSERT(!info()->IsStub()); | 2214 ASSERT(!info()->IsStub()); |
2082 __ test(reg, Operand(reg)); | 2215 __ test(reg, Operand(reg)); |
2083 EmitBranch(instr, not_equal); | 2216 EmitBranch(instr, not_equal); |
2084 } else if (type.IsJSArray()) { | 2217 } else if (type.IsJSArray()) { |
2085 ASSERT(!info()->IsStub()); | 2218 ASSERT(!info()->IsStub()); |
2086 EmitBranch(instr, no_condition); | 2219 EmitBranch(instr, no_condition); |
2087 } else if (type.IsHeapNumber()) { | 2220 } else if (type.IsHeapNumber()) { |
2088 ASSERT(!info()->IsStub()); | 2221 UNREACHABLE(); |
2089 XMMRegister xmm_scratch = double_scratch0(); | |
2090 __ xorps(xmm_scratch, xmm_scratch); | |
2091 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); | |
2092 EmitBranch(instr, not_equal); | |
2093 } else if (type.IsString()) { | 2222 } else if (type.IsString()) { |
2094 ASSERT(!info()->IsStub()); | 2223 ASSERT(!info()->IsStub()); |
2095 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); | 2224 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); |
2096 EmitBranch(instr, not_equal); | 2225 EmitBranch(instr, not_equal); |
2097 } else { | 2226 } else { |
2098 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); | 2227 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); |
2099 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); | 2228 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); |
2100 | 2229 |
2101 if (expected.Contains(ToBooleanStub::UNDEFINED)) { | 2230 if (expected.Contains(ToBooleanStub::UNDEFINED)) { |
2102 // undefined -> false. | 2231 // undefined -> false. |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2164 __ CmpInstanceType(map, SYMBOL_TYPE); | 2293 __ CmpInstanceType(map, SYMBOL_TYPE); |
2165 __ j(equal, instr->TrueLabel(chunk_)); | 2294 __ j(equal, instr->TrueLabel(chunk_)); |
2166 } | 2295 } |
2167 | 2296 |
2168 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { | 2297 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { |
2169 // heap number -> false iff +0, -0, or NaN. | 2298 // heap number -> false iff +0, -0, or NaN. |
2170 Label not_heap_number; | 2299 Label not_heap_number; |
2171 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), | 2300 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), |
2172 factory()->heap_number_map()); | 2301 factory()->heap_number_map()); |
2173 __ j(not_equal, ¬_heap_number, Label::kNear); | 2302 __ j(not_equal, ¬_heap_number, Label::kNear); |
2174 XMMRegister xmm_scratch = double_scratch0(); | 2303 __ fldz(); |
2175 __ xorps(xmm_scratch, xmm_scratch); | 2304 __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset)); |
2176 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); | 2305 __ FCmp(); |
2177 __ j(zero, instr->FalseLabel(chunk_)); | 2306 __ j(zero, instr->FalseLabel(chunk_)); |
2178 __ jmp(instr->TrueLabel(chunk_)); | 2307 __ jmp(instr->TrueLabel(chunk_)); |
2179 __ bind(¬_heap_number); | 2308 __ bind(¬_heap_number); |
2180 } | 2309 } |
2181 | 2310 |
2182 if (!expected.IsGeneric()) { | 2311 if (!expected.IsGeneric()) { |
2183 // We've seen something for the first time -> deopt. | 2312 // We've seen something for the first time -> deopt. |
2184 // This can only happen if we are not generic already. | 2313 // This can only happen if we are not generic already. |
2185 DeoptimizeIf(no_condition, instr->environment()); | 2314 DeoptimizeIf(no_condition, instr->environment()); |
2186 } | 2315 } |
2187 } | 2316 } |
2188 } | 2317 } |
2189 } | 2318 } |
2190 | 2319 |
2191 | 2320 |
2192 void LCodeGen::EmitGoto(int block) { | 2321 void LCodeGen::EmitGoto(int block) { |
2193 if (!IsNextEmittedBlock(block)) { | 2322 if (!IsNextEmittedBlock(block)) { |
2194 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); | 2323 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); |
2195 } | 2324 } |
2196 } | 2325 } |
2197 | 2326 |
2198 | 2327 |
| 2328 void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) { |
| 2329 } |
| 2330 |
| 2331 |
2199 void LCodeGen::DoGoto(LGoto* instr) { | 2332 void LCodeGen::DoGoto(LGoto* instr) { |
2200 EmitGoto(instr->block_id()); | 2333 EmitGoto(instr->block_id()); |
2201 } | 2334 } |
2202 | 2335 |
2203 | 2336 |
2204 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { | 2337 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { |
2205 Condition cond = no_condition; | 2338 Condition cond = no_condition; |
2206 switch (op) { | 2339 switch (op) { |
2207 case Token::EQ: | 2340 case Token::EQ: |
2208 case Token::EQ_STRICT: | 2341 case Token::EQ_STRICT: |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2242 | 2375 |
2243 if (left->IsConstantOperand() && right->IsConstantOperand()) { | 2376 if (left->IsConstantOperand() && right->IsConstantOperand()) { |
2244 // We can statically evaluate the comparison. | 2377 // We can statically evaluate the comparison. |
2245 double left_val = ToDouble(LConstantOperand::cast(left)); | 2378 double left_val = ToDouble(LConstantOperand::cast(left)); |
2246 double right_val = ToDouble(LConstantOperand::cast(right)); | 2379 double right_val = ToDouble(LConstantOperand::cast(right)); |
2247 int next_block = EvalComparison(instr->op(), left_val, right_val) ? | 2380 int next_block = EvalComparison(instr->op(), left_val, right_val) ? |
2248 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); | 2381 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); |
2249 EmitGoto(next_block); | 2382 EmitGoto(next_block); |
2250 } else { | 2383 } else { |
2251 if (instr->is_double()) { | 2384 if (instr->is_double()) { |
2252 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); | 2385 X87LoadForUsage(ToX87Register(right), ToX87Register(left)); |
| 2386 __ FCmp(); |
2253 // Don't base result on EFLAGS when a NaN is involved. Instead | 2387 // Don't base result on EFLAGS when a NaN is involved. Instead |
2254 // jump to the false block. | 2388 // jump to the false block. |
2255 __ j(parity_even, instr->FalseLabel(chunk_)); | 2389 __ j(parity_even, instr->FalseLabel(chunk_)); |
2256 } else { | 2390 } else { |
2257 if (right->IsConstantOperand()) { | 2391 if (right->IsConstantOperand()) { |
2258 __ cmp(ToOperand(left), | 2392 __ cmp(ToOperand(left), |
2259 ToImmediate(right, instr->hydrogen()->representation())); | 2393 ToImmediate(right, instr->hydrogen()->representation())); |
2260 } else if (left->IsConstantOperand()) { | 2394 } else if (left->IsConstantOperand()) { |
2261 __ cmp(ToOperand(right), | 2395 __ cmp(ToOperand(right), |
2262 ToImmediate(left, instr->hydrogen()->representation())); | 2396 ToImmediate(left, instr->hydrogen()->representation())); |
(...skipping 23 matching lines...) Expand all Loading... |
2286 | 2420 |
2287 | 2421 |
2288 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { | 2422 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { |
2289 if (instr->hydrogen()->representation().IsTagged()) { | 2423 if (instr->hydrogen()->representation().IsTagged()) { |
2290 Register input_reg = ToRegister(instr->object()); | 2424 Register input_reg = ToRegister(instr->object()); |
2291 __ cmp(input_reg, factory()->the_hole_value()); | 2425 __ cmp(input_reg, factory()->the_hole_value()); |
2292 EmitBranch(instr, equal); | 2426 EmitBranch(instr, equal); |
2293 return; | 2427 return; |
2294 } | 2428 } |
2295 | 2429 |
2296 XMMRegister input_reg = ToDoubleRegister(instr->object()); | 2430 // Put the value to the top of stack |
2297 __ ucomisd(input_reg, input_reg); | 2431 X87Register src = ToX87Register(instr->object()); |
2298 EmitFalseBranch(instr, parity_odd); | 2432 X87LoadForUsage(src); |
| 2433 __ fld(0); |
| 2434 __ fld(0); |
| 2435 __ FCmp(); |
| 2436 Label ok; |
| 2437 __ j(parity_even, &ok, Label::kNear); |
| 2438 __ fstp(0); |
| 2439 EmitFalseBranch(instr, no_condition); |
| 2440 __ bind(&ok); |
| 2441 |
2299 | 2442 |
2300 __ sub(esp, Immediate(kDoubleSize)); | 2443 __ sub(esp, Immediate(kDoubleSize)); |
2301 __ movsd(MemOperand(esp, 0), input_reg); | 2444 __ fstp_d(MemOperand(esp, 0)); |
2302 | 2445 |
2303 __ add(esp, Immediate(kDoubleSize)); | 2446 __ add(esp, Immediate(kDoubleSize)); |
2304 int offset = sizeof(kHoleNanUpper32); | 2447 int offset = sizeof(kHoleNanUpper32); |
2305 __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32)); | 2448 __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32)); |
2306 EmitBranch(instr, equal); | 2449 EmitBranch(instr, equal); |
2307 } | 2450 } |
2308 | 2451 |
2309 | 2452 |
2310 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { | 2453 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { |
2311 Representation rep = instr->hydrogen()->value()->representation(); | 2454 Representation rep = instr->hydrogen()->value()->representation(); |
2312 ASSERT(!rep.IsInteger32()); | 2455 ASSERT(!rep.IsInteger32()); |
2313 Register scratch = ToRegister(instr->temp()); | |
2314 | 2456 |
2315 if (rep.IsDouble()) { | 2457 if (rep.IsDouble()) { |
2316 XMMRegister value = ToDoubleRegister(instr->value()); | 2458 UNREACHABLE(); |
2317 XMMRegister xmm_scratch = double_scratch0(); | |
2318 __ xorps(xmm_scratch, xmm_scratch); | |
2319 __ ucomisd(xmm_scratch, value); | |
2320 EmitFalseBranch(instr, not_equal); | |
2321 __ movmskpd(scratch, value); | |
2322 __ test(scratch, Immediate(1)); | |
2323 EmitBranch(instr, not_zero); | |
2324 } else { | 2459 } else { |
2325 Register value = ToRegister(instr->value()); | 2460 Register value = ToRegister(instr->value()); |
2326 Handle<Map> map = masm()->isolate()->factory()->heap_number_map(); | 2461 Handle<Map> map = masm()->isolate()->factory()->heap_number_map(); |
2327 __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK); | 2462 __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK); |
2328 __ cmp(FieldOperand(value, HeapNumber::kExponentOffset), | 2463 __ cmp(FieldOperand(value, HeapNumber::kExponentOffset), |
2329 Immediate(0x1)); | 2464 Immediate(0x1)); |
2330 EmitFalseBranch(instr, no_overflow); | 2465 EmitFalseBranch(instr, no_overflow); |
2331 __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset), | 2466 __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset), |
2332 Immediate(0x00000000)); | 2467 Immediate(0x00000000)); |
2333 EmitBranch(instr, equal); | 2468 EmitBranch(instr, equal); |
(...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2608 __ bind(&true_value); | 2743 __ bind(&true_value); |
2609 __ mov(ToRegister(instr->result()), factory()->true_value()); | 2744 __ mov(ToRegister(instr->result()), factory()->true_value()); |
2610 __ bind(&done); | 2745 __ bind(&done); |
2611 } | 2746 } |
2612 | 2747 |
2613 | 2748 |
2614 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { | 2749 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { |
2615 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode { | 2750 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode { |
2616 public: | 2751 public: |
2617 DeferredInstanceOfKnownGlobal(LCodeGen* codegen, | 2752 DeferredInstanceOfKnownGlobal(LCodeGen* codegen, |
2618 LInstanceOfKnownGlobal* instr) | 2753 LInstanceOfKnownGlobal* instr, |
2619 : LDeferredCode(codegen), instr_(instr) { } | 2754 const X87Stack& x87_stack) |
| 2755 : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
2620 virtual void Generate() V8_OVERRIDE { | 2756 virtual void Generate() V8_OVERRIDE { |
2621 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_); | 2757 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_); |
2622 } | 2758 } |
2623 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 2759 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
2624 Label* map_check() { return &map_check_; } | 2760 Label* map_check() { return &map_check_; } |
2625 private: | 2761 private: |
2626 LInstanceOfKnownGlobal* instr_; | 2762 LInstanceOfKnownGlobal* instr_; |
2627 Label map_check_; | 2763 Label map_check_; |
2628 }; | 2764 }; |
2629 | 2765 |
2630 DeferredInstanceOfKnownGlobal* deferred; | 2766 DeferredInstanceOfKnownGlobal* deferred; |
2631 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); | 2767 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr, x87_stack_); |
2632 | 2768 |
2633 Label done, false_result; | 2769 Label done, false_result; |
2634 Register object = ToRegister(instr->value()); | 2770 Register object = ToRegister(instr->value()); |
2635 Register temp = ToRegister(instr->temp()); | 2771 Register temp = ToRegister(instr->temp()); |
2636 | 2772 |
2637 // A Smi is not an instance of anything. | 2773 // A Smi is not an instance of anything. |
2638 __ JumpIfSmi(object, &false_result, Label::kNear); | 2774 __ JumpIfSmi(object, &false_result, Label::kNear); |
2639 | 2775 |
2640 // This is the inlined call site instanceof cache. The two occurences of the | 2776 // This is the inlined call site instanceof cache. The two occurences of the |
2641 // hole value will be patched to the last map/result pair generated by the | 2777 // hole value will be patched to the last map/result pair generated by the |
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2770 void LCodeGen::DoReturn(LReturn* instr) { | 2906 void LCodeGen::DoReturn(LReturn* instr) { |
2771 if (FLAG_trace && info()->IsOptimizing()) { | 2907 if (FLAG_trace && info()->IsOptimizing()) { |
2772 // Preserve the return value on the stack and rely on the runtime call | 2908 // Preserve the return value on the stack and rely on the runtime call |
2773 // to return the value in the same register. We're leaving the code | 2909 // to return the value in the same register. We're leaving the code |
2774 // managed by the register allocator and tearing down the frame, it's | 2910 // managed by the register allocator and tearing down the frame, it's |
2775 // safe to write to the context register. | 2911 // safe to write to the context register. |
2776 __ push(eax); | 2912 __ push(eax); |
2777 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); | 2913 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); |
2778 __ CallRuntime(Runtime::kTraceExit, 1); | 2914 __ CallRuntime(Runtime::kTraceExit, 1); |
2779 } | 2915 } |
2780 if (info()->saves_caller_doubles()) RestoreCallerDoubles(); | |
2781 if (dynamic_frame_alignment_) { | 2916 if (dynamic_frame_alignment_) { |
2782 // Fetch the state of the dynamic frame alignment. | 2917 // Fetch the state of the dynamic frame alignment. |
2783 __ mov(edx, Operand(ebp, | 2918 __ mov(edx, Operand(ebp, |
2784 JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); | 2919 JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); |
2785 } | 2920 } |
2786 int no_frame_start = -1; | 2921 int no_frame_start = -1; |
2787 if (NeedsEagerFrame()) { | 2922 if (NeedsEagerFrame()) { |
2788 __ mov(esp, ebp); | 2923 __ mov(esp, ebp); |
2789 __ pop(ebp); | 2924 __ pop(ebp); |
2790 no_frame_start = masm_->pc_offset(); | 2925 no_frame_start = masm_->pc_offset(); |
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2885 if (instr->hydrogen()->NeedsWriteBarrier()) { | 3020 if (instr->hydrogen()->NeedsWriteBarrier()) { |
2886 SmiCheck check_needed = | 3021 SmiCheck check_needed = |
2887 instr->hydrogen()->value()->IsHeapObject() | 3022 instr->hydrogen()->value()->IsHeapObject() |
2888 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 3023 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
2889 Register temp = ToRegister(instr->temp()); | 3024 Register temp = ToRegister(instr->temp()); |
2890 int offset = Context::SlotOffset(instr->slot_index()); | 3025 int offset = Context::SlotOffset(instr->slot_index()); |
2891 __ RecordWriteContextSlot(context, | 3026 __ RecordWriteContextSlot(context, |
2892 offset, | 3027 offset, |
2893 value, | 3028 value, |
2894 temp, | 3029 temp, |
2895 kSaveFPRegs, | |
2896 EMIT_REMEMBERED_SET, | 3030 EMIT_REMEMBERED_SET, |
2897 check_needed); | 3031 check_needed); |
2898 } | 3032 } |
2899 | 3033 |
2900 __ bind(&skip_assignment); | 3034 __ bind(&skip_assignment); |
2901 } | 3035 } |
2902 | 3036 |
2903 | 3037 |
2904 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { | 3038 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { |
2905 HObjectAccess access = instr->hydrogen()->access(); | 3039 HObjectAccess access = instr->hydrogen()->access(); |
2906 int offset = access.offset(); | 3040 int offset = access.offset(); |
2907 | 3041 |
2908 if (access.IsExternalMemory()) { | 3042 if (access.IsExternalMemory()) { |
2909 Register result = ToRegister(instr->result()); | 3043 Register result = ToRegister(instr->result()); |
2910 MemOperand operand = instr->object()->IsConstantOperand() | 3044 MemOperand operand = instr->object()->IsConstantOperand() |
2911 ? MemOperand::StaticVariable(ToExternalReference( | 3045 ? MemOperand::StaticVariable(ToExternalReference( |
2912 LConstantOperand::cast(instr->object()))) | 3046 LConstantOperand::cast(instr->object()))) |
2913 : MemOperand(ToRegister(instr->object()), offset); | 3047 : MemOperand(ToRegister(instr->object()), offset); |
2914 __ Load(result, operand, access.representation()); | 3048 __ Load(result, operand, access.representation()); |
2915 return; | 3049 return; |
2916 } | 3050 } |
2917 | 3051 |
2918 Register object = ToRegister(instr->object()); | 3052 Register object = ToRegister(instr->object()); |
2919 if (instr->hydrogen()->representation().IsDouble()) { | 3053 if (instr->hydrogen()->representation().IsDouble()) { |
2920 XMMRegister result = ToDoubleRegister(instr->result()); | 3054 X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset)); |
2921 __ movsd(result, FieldOperand(object, offset)); | |
2922 return; | 3055 return; |
2923 } | 3056 } |
2924 | 3057 |
2925 Register result = ToRegister(instr->result()); | 3058 Register result = ToRegister(instr->result()); |
2926 if (!access.IsInobject()) { | 3059 if (!access.IsInobject()) { |
2927 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset)); | 3060 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset)); |
2928 object = result; | 3061 object = result; |
2929 } | 3062 } |
2930 __ Load(result, FieldOperand(object, offset), access.representation()); | 3063 __ Load(result, FieldOperand(object, offset), access.representation()); |
2931 } | 3064 } |
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3037 __ SmiUntag(ToRegister(key)); | 3170 __ SmiUntag(ToRegister(key)); |
3038 } | 3171 } |
3039 Operand operand(BuildFastArrayOperand( | 3172 Operand operand(BuildFastArrayOperand( |
3040 instr->elements(), | 3173 instr->elements(), |
3041 key, | 3174 key, |
3042 instr->hydrogen()->key()->representation(), | 3175 instr->hydrogen()->key()->representation(), |
3043 elements_kind, | 3176 elements_kind, |
3044 instr->base_offset())); | 3177 instr->base_offset())); |
3045 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || | 3178 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || |
3046 elements_kind == FLOAT32_ELEMENTS) { | 3179 elements_kind == FLOAT32_ELEMENTS) { |
3047 XMMRegister result(ToDoubleRegister(instr->result())); | 3180 X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand); |
3048 __ movss(result, operand); | |
3049 __ cvtss2sd(result, result); | |
3050 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS || | 3181 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS || |
3051 elements_kind == FLOAT64_ELEMENTS) { | 3182 elements_kind == FLOAT64_ELEMENTS) { |
3052 __ movsd(ToDoubleRegister(instr->result()), operand); | 3183 X87Mov(ToX87Register(instr->result()), operand); |
3053 } else { | 3184 } else { |
3054 Register result(ToRegister(instr->result())); | 3185 Register result(ToRegister(instr->result())); |
3055 switch (elements_kind) { | 3186 switch (elements_kind) { |
3056 case EXTERNAL_INT8_ELEMENTS: | 3187 case EXTERNAL_INT8_ELEMENTS: |
3057 case INT8_ELEMENTS: | 3188 case INT8_ELEMENTS: |
3058 __ movsx_b(result, operand); | 3189 __ movsx_b(result, operand); |
3059 break; | 3190 break; |
3060 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: | 3191 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: |
3061 case EXTERNAL_UINT8_ELEMENTS: | 3192 case EXTERNAL_UINT8_ELEMENTS: |
3062 case UINT8_ELEMENTS: | 3193 case UINT8_ELEMENTS: |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3112 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); | 3243 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); |
3113 DeoptimizeIf(equal, instr->environment()); | 3244 DeoptimizeIf(equal, instr->environment()); |
3114 } | 3245 } |
3115 | 3246 |
3116 Operand double_load_operand = BuildFastArrayOperand( | 3247 Operand double_load_operand = BuildFastArrayOperand( |
3117 instr->elements(), | 3248 instr->elements(), |
3118 instr->key(), | 3249 instr->key(), |
3119 instr->hydrogen()->key()->representation(), | 3250 instr->hydrogen()->key()->representation(), |
3120 FAST_DOUBLE_ELEMENTS, | 3251 FAST_DOUBLE_ELEMENTS, |
3121 instr->base_offset()); | 3252 instr->base_offset()); |
3122 XMMRegister result = ToDoubleRegister(instr->result()); | 3253 X87Mov(ToX87Register(instr->result()), double_load_operand); |
3123 __ movsd(result, double_load_operand); | |
3124 } | 3254 } |
3125 | 3255 |
3126 | 3256 |
3127 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 3257 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
3128 Register result = ToRegister(instr->result()); | 3258 Register result = ToRegister(instr->result()); |
3129 | 3259 |
3130 // Load the result. | 3260 // Load the result. |
3131 __ mov(result, | 3261 __ mov(result, |
3132 BuildFastArrayOperand(instr->elements(), | 3262 BuildFastArrayOperand(instr->elements(), |
3133 instr->key(), | 3263 instr->key(), |
(...skipping 396 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3530 DeoptimizeIf(negative, instr->environment()); | 3660 DeoptimizeIf(negative, instr->environment()); |
3531 __ bind(&is_positive); | 3661 __ bind(&is_positive); |
3532 } | 3662 } |
3533 | 3663 |
3534 | 3664 |
3535 void LCodeGen::DoMathAbs(LMathAbs* instr) { | 3665 void LCodeGen::DoMathAbs(LMathAbs* instr) { |
3536 // Class for deferred case. | 3666 // Class for deferred case. |
3537 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode { | 3667 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode { |
3538 public: | 3668 public: |
3539 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, | 3669 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, |
3540 LMathAbs* instr) | 3670 LMathAbs* instr, |
3541 : LDeferredCode(codegen), instr_(instr) { } | 3671 const X87Stack& x87_stack) |
| 3672 : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
3542 virtual void Generate() V8_OVERRIDE { | 3673 virtual void Generate() V8_OVERRIDE { |
3543 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); | 3674 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); |
3544 } | 3675 } |
3545 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 3676 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
3546 private: | 3677 private: |
3547 LMathAbs* instr_; | 3678 LMathAbs* instr_; |
3548 }; | 3679 }; |
3549 | 3680 |
3550 ASSERT(instr->value()->Equals(instr->result())); | 3681 ASSERT(instr->value()->Equals(instr->result())); |
3551 Representation r = instr->hydrogen()->value()->representation(); | 3682 Representation r = instr->hydrogen()->value()->representation(); |
3552 | 3683 |
3553 if (r.IsDouble()) { | 3684 if (r.IsDouble()) { |
3554 XMMRegister scratch = double_scratch0(); | 3685 UNIMPLEMENTED(); |
3555 XMMRegister input_reg = ToDoubleRegister(instr->value()); | |
3556 __ xorps(scratch, scratch); | |
3557 __ subsd(scratch, input_reg); | |
3558 __ andps(input_reg, scratch); | |
3559 } else if (r.IsSmiOrInteger32()) { | 3686 } else if (r.IsSmiOrInteger32()) { |
3560 EmitIntegerMathAbs(instr); | 3687 EmitIntegerMathAbs(instr); |
3561 } else { // Tagged case. | 3688 } else { // Tagged case. |
3562 DeferredMathAbsTaggedHeapNumber* deferred = | 3689 DeferredMathAbsTaggedHeapNumber* deferred = |
3563 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); | 3690 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_); |
3564 Register input_reg = ToRegister(instr->value()); | 3691 Register input_reg = ToRegister(instr->value()); |
3565 // Smi check. | 3692 // Smi check. |
3566 __ JumpIfNotSmi(input_reg, deferred->entry()); | 3693 __ JumpIfNotSmi(input_reg, deferred->entry()); |
3567 EmitIntegerMathAbs(instr); | 3694 EmitIntegerMathAbs(instr); |
3568 __ bind(deferred->exit()); | 3695 __ bind(deferred->exit()); |
3569 } | 3696 } |
3570 } | 3697 } |
3571 | 3698 |
3572 | 3699 |
3573 void LCodeGen::DoMathFloor(LMathFloor* instr) { | 3700 void LCodeGen::DoMathFloor(LMathFloor* instr) { |
3574 XMMRegister xmm_scratch = double_scratch0(); | 3701 UNIMPLEMENTED(); |
3575 Register output_reg = ToRegister(instr->result()); | |
3576 XMMRegister input_reg = ToDoubleRegister(instr->value()); | |
3577 | |
3578 if (CpuFeatures::IsSupported(SSE4_1)) { | |
3579 CpuFeatureScope scope(masm(), SSE4_1); | |
3580 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
3581 // Deoptimize on negative zero. | |
3582 Label non_zero; | |
3583 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. | |
3584 __ ucomisd(input_reg, xmm_scratch); | |
3585 __ j(not_equal, &non_zero, Label::kNear); | |
3586 __ movmskpd(output_reg, input_reg); | |
3587 __ test(output_reg, Immediate(1)); | |
3588 DeoptimizeIf(not_zero, instr->environment()); | |
3589 __ bind(&non_zero); | |
3590 } | |
3591 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown); | |
3592 __ cvttsd2si(output_reg, Operand(xmm_scratch)); | |
3593 // Overflow is signalled with minint. | |
3594 __ cmp(output_reg, 0x1); | |
3595 DeoptimizeIf(overflow, instr->environment()); | |
3596 } else { | |
3597 Label negative_sign, done; | |
3598 // Deoptimize on unordered. | |
3599 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. | |
3600 __ ucomisd(input_reg, xmm_scratch); | |
3601 DeoptimizeIf(parity_even, instr->environment()); | |
3602 __ j(below, &negative_sign, Label::kNear); | |
3603 | |
3604 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
3605 // Check for negative zero. | |
3606 Label positive_sign; | |
3607 __ j(above, &positive_sign, Label::kNear); | |
3608 __ movmskpd(output_reg, input_reg); | |
3609 __ test(output_reg, Immediate(1)); | |
3610 DeoptimizeIf(not_zero, instr->environment()); | |
3611 __ Move(output_reg, Immediate(0)); | |
3612 __ jmp(&done, Label::kNear); | |
3613 __ bind(&positive_sign); | |
3614 } | |
3615 | |
3616 // Use truncating instruction (OK because input is positive). | |
3617 __ cvttsd2si(output_reg, Operand(input_reg)); | |
3618 // Overflow is signalled with minint. | |
3619 __ cmp(output_reg, 0x1); | |
3620 DeoptimizeIf(overflow, instr->environment()); | |
3621 __ jmp(&done, Label::kNear); | |
3622 | |
3623 // Non-zero negative reaches here. | |
3624 __ bind(&negative_sign); | |
3625 // Truncate, then compare and compensate. | |
3626 __ cvttsd2si(output_reg, Operand(input_reg)); | |
3627 __ Cvtsi2sd(xmm_scratch, output_reg); | |
3628 __ ucomisd(input_reg, xmm_scratch); | |
3629 __ j(equal, &done, Label::kNear); | |
3630 __ sub(output_reg, Immediate(1)); | |
3631 DeoptimizeIf(overflow, instr->environment()); | |
3632 | |
3633 __ bind(&done); | |
3634 } | |
3635 } | 3702 } |
3636 | 3703 |
3637 | 3704 |
3638 void LCodeGen::DoMathRound(LMathRound* instr) { | 3705 void LCodeGen::DoMathRound(LMathRound* instr) { |
3639 Register output_reg = ToRegister(instr->result()); | 3706 UNIMPLEMENTED(); |
3640 XMMRegister input_reg = ToDoubleRegister(instr->value()); | |
3641 XMMRegister xmm_scratch = double_scratch0(); | |
3642 XMMRegister input_temp = ToDoubleRegister(instr->temp()); | |
3643 ExternalReference one_half = ExternalReference::address_of_one_half(); | |
3644 ExternalReference minus_one_half = | |
3645 ExternalReference::address_of_minus_one_half(); | |
3646 | |
3647 Label done, round_to_zero, below_one_half, do_not_compensate; | |
3648 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; | |
3649 | |
3650 __ movsd(xmm_scratch, Operand::StaticVariable(one_half)); | |
3651 __ ucomisd(xmm_scratch, input_reg); | |
3652 __ j(above, &below_one_half, Label::kNear); | |
3653 | |
3654 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). | |
3655 __ addsd(xmm_scratch, input_reg); | |
3656 __ cvttsd2si(output_reg, Operand(xmm_scratch)); | |
3657 // Overflow is signalled with minint. | |
3658 __ cmp(output_reg, 0x1); | |
3659 __ RecordComment("D2I conversion overflow"); | |
3660 DeoptimizeIf(overflow, instr->environment()); | |
3661 __ jmp(&done, dist); | |
3662 | |
3663 __ bind(&below_one_half); | |
3664 __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half)); | |
3665 __ ucomisd(xmm_scratch, input_reg); | |
3666 __ j(below_equal, &round_to_zero, Label::kNear); | |
3667 | |
3668 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then | |
3669 // compare and compensate. | |
3670 __ movaps(input_temp, input_reg); // Do not alter input_reg. | |
3671 __ subsd(input_temp, xmm_scratch); | |
3672 __ cvttsd2si(output_reg, Operand(input_temp)); | |
3673 // Catch minint due to overflow, and to prevent overflow when compensating. | |
3674 __ cmp(output_reg, 0x1); | |
3675 __ RecordComment("D2I conversion overflow"); | |
3676 DeoptimizeIf(overflow, instr->environment()); | |
3677 | |
3678 __ Cvtsi2sd(xmm_scratch, output_reg); | |
3679 __ ucomisd(xmm_scratch, input_temp); | |
3680 __ j(equal, &done, dist); | |
3681 __ sub(output_reg, Immediate(1)); | |
3682 // No overflow because we already ruled out minint. | |
3683 __ jmp(&done, dist); | |
3684 | |
3685 __ bind(&round_to_zero); | |
3686 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if | |
3687 // we can ignore the difference between a result of -0 and +0. | |
3688 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
3689 // If the sign is positive, we return +0. | |
3690 __ movmskpd(output_reg, input_reg); | |
3691 __ test(output_reg, Immediate(1)); | |
3692 __ RecordComment("Minus zero"); | |
3693 DeoptimizeIf(not_zero, instr->environment()); | |
3694 } | |
3695 __ Move(output_reg, Immediate(0)); | |
3696 __ bind(&done); | |
3697 } | 3707 } |
3698 | 3708 |
3699 | 3709 |
3700 void LCodeGen::DoMathSqrt(LMathSqrt* instr) { | 3710 void LCodeGen::DoMathSqrt(LMathSqrt* instr) { |
3701 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3711 UNIMPLEMENTED(); |
3702 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); | |
3703 __ sqrtsd(input_reg, input_reg); | |
3704 } | 3712 } |
3705 | 3713 |
3706 | 3714 |
3707 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { | 3715 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { |
3708 XMMRegister xmm_scratch = double_scratch0(); | 3716 UNIMPLEMENTED(); |
3709 XMMRegister input_reg = ToDoubleRegister(instr->value()); | |
3710 Register scratch = ToRegister(instr->temp()); | |
3711 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); | |
3712 | |
3713 // Note that according to ECMA-262 15.8.2.13: | |
3714 // Math.pow(-Infinity, 0.5) == Infinity | |
3715 // Math.sqrt(-Infinity) == NaN | |
3716 Label done, sqrt; | |
3717 // Check base for -Infinity. According to IEEE-754, single-precision | |
3718 // -Infinity has the highest 9 bits set and the lowest 23 bits cleared. | |
3719 __ mov(scratch, 0xFF800000); | |
3720 __ movd(xmm_scratch, scratch); | |
3721 __ cvtss2sd(xmm_scratch, xmm_scratch); | |
3722 __ ucomisd(input_reg, xmm_scratch); | |
3723 // Comparing -Infinity with NaN results in "unordered", which sets the | |
3724 // zero flag as if both were equal. However, it also sets the carry flag. | |
3725 __ j(not_equal, &sqrt, Label::kNear); | |
3726 __ j(carry, &sqrt, Label::kNear); | |
3727 // If input is -Infinity, return Infinity. | |
3728 __ xorps(input_reg, input_reg); | |
3729 __ subsd(input_reg, xmm_scratch); | |
3730 __ jmp(&done, Label::kNear); | |
3731 | |
3732 // Square root. | |
3733 __ bind(&sqrt); | |
3734 __ xorps(xmm_scratch, xmm_scratch); | |
3735 __ addsd(input_reg, xmm_scratch); // Convert -0 to +0. | |
3736 __ sqrtsd(input_reg, input_reg); | |
3737 __ bind(&done); | |
3738 } | 3717 } |
3739 | 3718 |
3740 | 3719 |
3741 void LCodeGen::DoPower(LPower* instr) { | 3720 void LCodeGen::DoPower(LPower* instr) { |
3742 Representation exponent_type = instr->hydrogen()->right()->representation(); | 3721 UNIMPLEMENTED(); |
3743 // Having marked this as a call, we can use any registers. | |
3744 // Just make sure that the input/output registers are the expected ones. | |
3745 ASSERT(!instr->right()->IsDoubleRegister() || | |
3746 ToDoubleRegister(instr->right()).is(xmm1)); | |
3747 ASSERT(!instr->right()->IsRegister() || | |
3748 ToRegister(instr->right()).is(eax)); | |
3749 ASSERT(ToDoubleRegister(instr->left()).is(xmm2)); | |
3750 ASSERT(ToDoubleRegister(instr->result()).is(xmm3)); | |
3751 | |
3752 if (exponent_type.IsSmi()) { | |
3753 MathPowStub stub(isolate(), MathPowStub::TAGGED); | |
3754 __ CallStub(&stub); | |
3755 } else if (exponent_type.IsTagged()) { | |
3756 Label no_deopt; | |
3757 __ JumpIfSmi(eax, &no_deopt); | |
3758 __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx); | |
3759 DeoptimizeIf(not_equal, instr->environment()); | |
3760 __ bind(&no_deopt); | |
3761 MathPowStub stub(isolate(), MathPowStub::TAGGED); | |
3762 __ CallStub(&stub); | |
3763 } else if (exponent_type.IsInteger32()) { | |
3764 MathPowStub stub(isolate(), MathPowStub::INTEGER); | |
3765 __ CallStub(&stub); | |
3766 } else { | |
3767 ASSERT(exponent_type.IsDouble()); | |
3768 MathPowStub stub(isolate(), MathPowStub::DOUBLE); | |
3769 __ CallStub(&stub); | |
3770 } | |
3771 } | 3722 } |
3772 | 3723 |
3773 | 3724 |
3774 void LCodeGen::DoMathLog(LMathLog* instr) { | 3725 void LCodeGen::DoMathLog(LMathLog* instr) { |
3775 ASSERT(instr->value()->Equals(instr->result())); | 3726 UNIMPLEMENTED(); |
3776 XMMRegister input_reg = ToDoubleRegister(instr->value()); | |
3777 XMMRegister xmm_scratch = double_scratch0(); | |
3778 Label positive, done, zero; | |
3779 __ xorps(xmm_scratch, xmm_scratch); | |
3780 __ ucomisd(input_reg, xmm_scratch); | |
3781 __ j(above, &positive, Label::kNear); | |
3782 __ j(not_carry, &zero, Label::kNear); | |
3783 ExternalReference nan = | |
3784 ExternalReference::address_of_canonical_non_hole_nan(); | |
3785 __ movsd(input_reg, Operand::StaticVariable(nan)); | |
3786 __ jmp(&done, Label::kNear); | |
3787 __ bind(&zero); | |
3788 ExternalReference ninf = | |
3789 ExternalReference::address_of_negative_infinity(); | |
3790 __ movsd(input_reg, Operand::StaticVariable(ninf)); | |
3791 __ jmp(&done, Label::kNear); | |
3792 __ bind(&positive); | |
3793 __ fldln2(); | |
3794 __ sub(Operand(esp), Immediate(kDoubleSize)); | |
3795 __ movsd(Operand(esp, 0), input_reg); | |
3796 __ fld_d(Operand(esp, 0)); | |
3797 __ fyl2x(); | |
3798 __ fstp_d(Operand(esp, 0)); | |
3799 __ movsd(input_reg, Operand(esp, 0)); | |
3800 __ add(Operand(esp), Immediate(kDoubleSize)); | |
3801 __ bind(&done); | |
3802 } | 3727 } |
3803 | 3728 |
3804 | 3729 |
3805 void LCodeGen::DoMathClz32(LMathClz32* instr) { | 3730 void LCodeGen::DoMathClz32(LMathClz32* instr) { |
3806 Register input = ToRegister(instr->value()); | 3731 UNIMPLEMENTED(); |
3807 Register result = ToRegister(instr->result()); | |
3808 Label not_zero_input; | |
3809 __ bsr(result, input); | |
3810 | |
3811 __ j(not_zero, ¬_zero_input); | |
3812 __ Move(result, Immediate(63)); // 63^31 == 32 | |
3813 | |
3814 __ bind(¬_zero_input); | |
3815 __ xor_(result, Immediate(31)); // for x in [0..31], 31^x == 31-x. | |
3816 } | 3732 } |
3817 | 3733 |
3818 | 3734 |
3819 void LCodeGen::DoMathExp(LMathExp* instr) { | 3735 void LCodeGen::DoMathExp(LMathExp* instr) { |
3820 XMMRegister input = ToDoubleRegister(instr->value()); | 3736 UNIMPLEMENTED(); |
3821 XMMRegister result = ToDoubleRegister(instr->result()); | |
3822 XMMRegister temp0 = double_scratch0(); | |
3823 Register temp1 = ToRegister(instr->temp1()); | |
3824 Register temp2 = ToRegister(instr->temp2()); | |
3825 | |
3826 MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2); | |
3827 } | 3737 } |
3828 | 3738 |
3829 | 3739 |
3830 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { | 3740 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { |
3831 ASSERT(ToRegister(instr->context()).is(esi)); | 3741 ASSERT(ToRegister(instr->context()).is(esi)); |
3832 ASSERT(ToRegister(instr->function()).is(edi)); | 3742 ASSERT(ToRegister(instr->function()).is(edi)); |
3833 ASSERT(instr->HasPointerMap()); | 3743 ASSERT(instr->HasPointerMap()); |
3834 | 3744 |
3835 Handle<JSFunction> known_function = instr->hydrogen()->known_function(); | 3745 Handle<JSFunction> known_function = instr->hydrogen()->known_function(); |
3836 if (known_function.is_null()) { | 3746 if (known_function.is_null()) { |
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3913 __ bind(&done); | 3823 __ bind(&done); |
3914 } else { | 3824 } else { |
3915 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); | 3825 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); |
3916 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); | 3826 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); |
3917 } | 3827 } |
3918 } | 3828 } |
3919 | 3829 |
3920 | 3830 |
3921 void LCodeGen::DoCallRuntime(LCallRuntime* instr) { | 3831 void LCodeGen::DoCallRuntime(LCallRuntime* instr) { |
3922 ASSERT(ToRegister(instr->context()).is(esi)); | 3832 ASSERT(ToRegister(instr->context()).is(esi)); |
3923 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles()); | 3833 CallRuntime(instr->function(), instr->arity(), instr); |
3924 } | 3834 } |
3925 | 3835 |
3926 | 3836 |
3927 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { | 3837 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { |
3928 Register function = ToRegister(instr->function()); | 3838 Register function = ToRegister(instr->function()); |
3929 Register code_object = ToRegister(instr->code_object()); | 3839 Register code_object = ToRegister(instr->code_object()); |
3930 __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize)); | 3840 __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize)); |
3931 __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object); | 3841 __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object); |
3932 } | 3842 } |
3933 | 3843 |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3988 DeoptimizeIf(zero, instr->environment()); | 3898 DeoptimizeIf(zero, instr->environment()); |
3989 | 3899 |
3990 // We know now that value is not a smi, so we can omit the check below. | 3900 // We know now that value is not a smi, so we can omit the check below. |
3991 check_needed = OMIT_SMI_CHECK; | 3901 check_needed = OMIT_SMI_CHECK; |
3992 } | 3902 } |
3993 } | 3903 } |
3994 } else if (representation.IsDouble()) { | 3904 } else if (representation.IsDouble()) { |
3995 ASSERT(access.IsInobject()); | 3905 ASSERT(access.IsInobject()); |
3996 ASSERT(!instr->hydrogen()->has_transition()); | 3906 ASSERT(!instr->hydrogen()->has_transition()); |
3997 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); | 3907 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); |
3998 XMMRegister value = ToDoubleRegister(instr->value()); | 3908 X87Register value = ToX87Register(instr->value()); |
3999 __ movsd(FieldOperand(object, offset), value); | 3909 X87Mov(FieldOperand(object, offset), value); |
4000 return; | 3910 return; |
4001 } | 3911 } |
4002 | 3912 |
4003 if (instr->hydrogen()->has_transition()) { | 3913 if (instr->hydrogen()->has_transition()) { |
4004 Handle<Map> transition = instr->hydrogen()->transition_map(); | 3914 Handle<Map> transition = instr->hydrogen()->transition_map(); |
4005 AddDeprecationDependency(transition); | 3915 AddDeprecationDependency(transition); |
4006 if (!instr->hydrogen()->NeedsWriteBarrierForMap()) { | 3916 if (!instr->hydrogen()->NeedsWriteBarrierForMap()) { |
4007 __ mov(FieldOperand(object, HeapObject::kMapOffset), transition); | 3917 __ mov(FieldOperand(object, HeapObject::kMapOffset), transition); |
4008 } else { | 3918 } else { |
4009 Register temp = ToRegister(instr->temp()); | 3919 Register temp = ToRegister(instr->temp()); |
4010 Register temp_map = ToRegister(instr->temp_map()); | 3920 Register temp_map = ToRegister(instr->temp_map()); |
4011 __ mov(temp_map, transition); | 3921 __ mov(temp_map, transition); |
4012 __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map); | 3922 __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map); |
4013 // Update the write barrier for the map field. | 3923 // Update the write barrier for the map field. |
4014 __ RecordWriteField(object, | 3924 __ RecordWriteField(object, |
4015 HeapObject::kMapOffset, | 3925 HeapObject::kMapOffset, |
4016 temp_map, | 3926 temp_map, |
4017 temp, | 3927 temp, |
4018 kSaveFPRegs, | |
4019 OMIT_REMEMBERED_SET, | 3928 OMIT_REMEMBERED_SET, |
4020 OMIT_SMI_CHECK); | 3929 OMIT_SMI_CHECK); |
4021 } | 3930 } |
4022 } | 3931 } |
4023 | 3932 |
4024 // Do the store. | 3933 // Do the store. |
4025 Register write_register = object; | 3934 Register write_register = object; |
4026 if (!access.IsInobject()) { | 3935 if (!access.IsInobject()) { |
4027 write_register = ToRegister(instr->temp()); | 3936 write_register = ToRegister(instr->temp()); |
4028 __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset)); | 3937 __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset)); |
(...skipping 20 matching lines...) Expand all Loading... |
4049 } | 3958 } |
4050 | 3959 |
4051 if (instr->hydrogen()->NeedsWriteBarrier()) { | 3960 if (instr->hydrogen()->NeedsWriteBarrier()) { |
4052 Register value = ToRegister(instr->value()); | 3961 Register value = ToRegister(instr->value()); |
4053 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object; | 3962 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object; |
4054 // Update the write barrier for the object for in-object properties. | 3963 // Update the write barrier for the object for in-object properties. |
4055 __ RecordWriteField(write_register, | 3964 __ RecordWriteField(write_register, |
4056 offset, | 3965 offset, |
4057 value, | 3966 value, |
4058 temp, | 3967 temp, |
4059 kSaveFPRegs, | |
4060 EMIT_REMEMBERED_SET, | 3968 EMIT_REMEMBERED_SET, |
4061 check_needed); | 3969 check_needed); |
4062 } | 3970 } |
4063 } | 3971 } |
4064 | 3972 |
4065 | 3973 |
4066 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { | 3974 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { |
4067 ASSERT(ToRegister(instr->context()).is(esi)); | 3975 ASSERT(ToRegister(instr->context()).is(esi)); |
4068 ASSERT(ToRegister(instr->object()).is(edx)); | 3976 ASSERT(ToRegister(instr->object()).is(edx)); |
4069 ASSERT(ToRegister(instr->value()).is(eax)); | 3977 ASSERT(ToRegister(instr->value()).is(eax)); |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4108 __ SmiUntag(ToRegister(key)); | 4016 __ SmiUntag(ToRegister(key)); |
4109 } | 4017 } |
4110 Operand operand(BuildFastArrayOperand( | 4018 Operand operand(BuildFastArrayOperand( |
4111 instr->elements(), | 4019 instr->elements(), |
4112 key, | 4020 key, |
4113 instr->hydrogen()->key()->representation(), | 4021 instr->hydrogen()->key()->representation(), |
4114 elements_kind, | 4022 elements_kind, |
4115 instr->base_offset())); | 4023 instr->base_offset())); |
4116 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || | 4024 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || |
4117 elements_kind == FLOAT32_ELEMENTS) { | 4025 elements_kind == FLOAT32_ELEMENTS) { |
4118 XMMRegister xmm_scratch = double_scratch0(); | 4026 __ fld(0); |
4119 __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value())); | 4027 __ fstp_s(operand); |
4120 __ movss(operand, xmm_scratch); | |
4121 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS || | 4028 } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS || |
4122 elements_kind == FLOAT64_ELEMENTS) { | 4029 elements_kind == FLOAT64_ELEMENTS) { |
4123 __ movsd(operand, ToDoubleRegister(instr->value())); | 4030 X87Mov(operand, ToX87Register(instr->value())); |
4124 } else { | 4031 } else { |
4125 Register value = ToRegister(instr->value()); | 4032 Register value = ToRegister(instr->value()); |
4126 switch (elements_kind) { | 4033 switch (elements_kind) { |
4127 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: | 4034 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: |
4128 case EXTERNAL_UINT8_ELEMENTS: | 4035 case EXTERNAL_UINT8_ELEMENTS: |
4129 case EXTERNAL_INT8_ELEMENTS: | 4036 case EXTERNAL_INT8_ELEMENTS: |
4130 case UINT8_ELEMENTS: | 4037 case UINT8_ELEMENTS: |
4131 case INT8_ELEMENTS: | 4038 case INT8_ELEMENTS: |
4132 case UINT8_CLAMPED_ELEMENTS: | 4039 case UINT8_CLAMPED_ELEMENTS: |
4133 __ mov_b(operand, value); | 4040 __ mov_b(operand, value); |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4166 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { | 4073 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { |
4167 ExternalReference canonical_nan_reference = | 4074 ExternalReference canonical_nan_reference = |
4168 ExternalReference::address_of_canonical_non_hole_nan(); | 4075 ExternalReference::address_of_canonical_non_hole_nan(); |
4169 Operand double_store_operand = BuildFastArrayOperand( | 4076 Operand double_store_operand = BuildFastArrayOperand( |
4170 instr->elements(), | 4077 instr->elements(), |
4171 instr->key(), | 4078 instr->key(), |
4172 instr->hydrogen()->key()->representation(), | 4079 instr->hydrogen()->key()->representation(), |
4173 FAST_DOUBLE_ELEMENTS, | 4080 FAST_DOUBLE_ELEMENTS, |
4174 instr->base_offset()); | 4081 instr->base_offset()); |
4175 | 4082 |
4176 XMMRegister value = ToDoubleRegister(instr->value()); | 4083 // Can't use SSE2 in the serializer |
| 4084 if (instr->hydrogen()->IsConstantHoleStore()) { |
| 4085 // This means we should store the (double) hole. No floating point |
| 4086 // registers required. |
| 4087 double nan_double = FixedDoubleArray::hole_nan_as_double(); |
| 4088 uint64_t int_val = BitCast<uint64_t, double>(nan_double); |
| 4089 int32_t lower = static_cast<int32_t>(int_val); |
| 4090 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); |
4177 | 4091 |
4178 if (instr->NeedsCanonicalization()) { | 4092 __ mov(double_store_operand, Immediate(lower)); |
4179 Label have_value; | 4093 Operand double_store_operand2 = BuildFastArrayOperand( |
| 4094 instr->elements(), |
| 4095 instr->key(), |
| 4096 instr->hydrogen()->key()->representation(), |
| 4097 FAST_DOUBLE_ELEMENTS, |
| 4098 instr->base_offset() + kPointerSize); |
| 4099 __ mov(double_store_operand2, Immediate(upper)); |
| 4100 } else { |
| 4101 Label no_special_nan_handling; |
| 4102 X87Register value = ToX87Register(instr->value()); |
| 4103 X87Fxch(value); |
4180 | 4104 |
4181 __ ucomisd(value, value); | 4105 if (instr->NeedsCanonicalization()) { |
4182 __ j(parity_odd, &have_value, Label::kNear); // NaN. | 4106 __ fld(0); |
| 4107 __ fld(0); |
| 4108 __ FCmp(); |
4183 | 4109 |
4184 __ movsd(value, Operand::StaticVariable(canonical_nan_reference)); | 4110 __ j(parity_odd, &no_special_nan_handling, Label::kNear); |
4185 __ bind(&have_value); | 4111 __ sub(esp, Immediate(kDoubleSize)); |
| 4112 __ fst_d(MemOperand(esp, 0)); |
| 4113 __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)), |
| 4114 Immediate(kHoleNanUpper32)); |
| 4115 __ add(esp, Immediate(kDoubleSize)); |
| 4116 Label canonicalize; |
| 4117 __ j(not_equal, &canonicalize, Label::kNear); |
| 4118 __ jmp(&no_special_nan_handling, Label::kNear); |
| 4119 __ bind(&canonicalize); |
| 4120 __ fstp(0); |
| 4121 __ fld_d(Operand::StaticVariable(canonical_nan_reference)); |
| 4122 } |
| 4123 |
| 4124 __ bind(&no_special_nan_handling); |
| 4125 __ fst_d(double_store_operand); |
4186 } | 4126 } |
4187 | |
4188 __ movsd(double_store_operand, value); | |
4189 } | 4127 } |
4190 | 4128 |
4191 | 4129 |
4192 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { | 4130 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { |
4193 Register elements = ToRegister(instr->elements()); | 4131 Register elements = ToRegister(instr->elements()); |
4194 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; | 4132 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; |
4195 | 4133 |
4196 Operand operand = BuildFastArrayOperand( | 4134 Operand operand = BuildFastArrayOperand( |
4197 instr->elements(), | 4135 instr->elements(), |
4198 instr->key(), | 4136 instr->key(), |
(...skipping 19 matching lines...) Expand all Loading... |
4218 Register value = ToRegister(instr->value()); | 4156 Register value = ToRegister(instr->value()); |
4219 ASSERT(!instr->key()->IsConstantOperand()); | 4157 ASSERT(!instr->key()->IsConstantOperand()); |
4220 SmiCheck check_needed = | 4158 SmiCheck check_needed = |
4221 instr->hydrogen()->value()->IsHeapObject() | 4159 instr->hydrogen()->value()->IsHeapObject() |
4222 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 4160 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
4223 // Compute address of modified element and store it into key register. | 4161 // Compute address of modified element and store it into key register. |
4224 __ lea(key, operand); | 4162 __ lea(key, operand); |
4225 __ RecordWrite(elements, | 4163 __ RecordWrite(elements, |
4226 key, | 4164 key, |
4227 value, | 4165 value, |
4228 kSaveFPRegs, | |
4229 EMIT_REMEMBERED_SET, | 4166 EMIT_REMEMBERED_SET, |
4230 check_needed); | 4167 check_needed); |
4231 } | 4168 } |
4232 } | 4169 } |
4233 | 4170 |
4234 | 4171 |
4235 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { | 4172 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { |
4236 // By cases...external, fast-double, fast | 4173 // By cases...external, fast-double, fast |
4237 if (instr->is_typed_elements()) { | 4174 if (instr->is_typed_elements()) { |
4238 DoStoreKeyedExternalArray(instr); | 4175 DoStoreKeyedExternalArray(instr); |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4282 is_simple_map_transition ? Label::kNear : Label::kFar; | 4219 is_simple_map_transition ? Label::kNear : Label::kFar; |
4283 __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map); | 4220 __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map); |
4284 __ j(not_equal, ¬_applicable, branch_distance); | 4221 __ j(not_equal, ¬_applicable, branch_distance); |
4285 if (is_simple_map_transition) { | 4222 if (is_simple_map_transition) { |
4286 Register new_map_reg = ToRegister(instr->new_map_temp()); | 4223 Register new_map_reg = ToRegister(instr->new_map_temp()); |
4287 __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), | 4224 __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), |
4288 Immediate(to_map)); | 4225 Immediate(to_map)); |
4289 // Write barrier. | 4226 // Write barrier. |
4290 ASSERT_NE(instr->temp(), NULL); | 4227 ASSERT_NE(instr->temp(), NULL); |
4291 __ RecordWriteForMap(object_reg, to_map, new_map_reg, | 4228 __ RecordWriteForMap(object_reg, to_map, new_map_reg, |
4292 ToRegister(instr->temp()), | 4229 ToRegister(instr->temp())); |
4293 kDontSaveFPRegs); | |
4294 } else { | 4230 } else { |
4295 ASSERT(ToRegister(instr->context()).is(esi)); | 4231 ASSERT(ToRegister(instr->context()).is(esi)); |
4296 ASSERT(object_reg.is(eax)); | 4232 ASSERT(object_reg.is(eax)); |
4297 PushSafepointRegistersScope scope(this); | 4233 PushSafepointRegistersScope scope(this); |
4298 __ mov(ebx, to_map); | 4234 __ mov(ebx, to_map); |
4299 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; | 4235 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; |
4300 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); | 4236 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); |
4301 __ CallStub(&stub); | 4237 __ CallStub(&stub); |
4302 RecordSafepointWithLazyDeopt(instr, | 4238 RecordSafepointWithLazyDeopt(instr, |
4303 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | 4239 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
4304 } | 4240 } |
4305 __ bind(¬_applicable); | 4241 __ bind(¬_applicable); |
4306 } | 4242 } |
4307 | 4243 |
4308 | 4244 |
4309 void LCodeGen::DoArrayShift(LArrayShift* instr) { | 4245 void LCodeGen::DoArrayShift(LArrayShift* instr) { |
4310 ASSERT(ToRegister(instr->context()).is(esi)); | 4246 ASSERT(ToRegister(instr->context()).is(esi)); |
4311 ASSERT(ToRegister(instr->object()).is(eax)); | 4247 ASSERT(ToRegister(instr->object()).is(eax)); |
4312 ASSERT(ToRegister(instr->result()).is(eax)); | 4248 ASSERT(ToRegister(instr->result()).is(eax)); |
4313 ArrayShiftStub stub(isolate(), instr->hydrogen()->kind()); | 4249 ArrayShiftStub stub(isolate(), instr->hydrogen()->kind()); |
4314 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 4250 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
4315 } | 4251 } |
4316 | 4252 |
4317 | 4253 |
4318 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { | 4254 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { |
4319 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode { | 4255 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode { |
4320 public: | 4256 public: |
4321 DeferredStringCharCodeAt(LCodeGen* codegen, | 4257 DeferredStringCharCodeAt(LCodeGen* codegen, |
4322 LStringCharCodeAt* instr) | 4258 LStringCharCodeAt* instr, |
4323 : LDeferredCode(codegen), instr_(instr) { } | 4259 const X87Stack& x87_stack) |
| 4260 : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
4324 virtual void Generate() V8_OVERRIDE { | 4261 virtual void Generate() V8_OVERRIDE { |
4325 codegen()->DoDeferredStringCharCodeAt(instr_); | 4262 codegen()->DoDeferredStringCharCodeAt(instr_); |
4326 } | 4263 } |
4327 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4264 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4328 private: | 4265 private: |
4329 LStringCharCodeAt* instr_; | 4266 LStringCharCodeAt* instr_; |
4330 }; | 4267 }; |
4331 | 4268 |
4332 DeferredStringCharCodeAt* deferred = | 4269 DeferredStringCharCodeAt* deferred = |
4333 new(zone()) DeferredStringCharCodeAt(this, instr); | 4270 new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_); |
4334 | 4271 |
4335 StringCharLoadGenerator::Generate(masm(), | 4272 StringCharLoadGenerator::Generate(masm(), |
4336 factory(), | 4273 factory(), |
4337 ToRegister(instr->string()), | 4274 ToRegister(instr->string()), |
4338 ToRegister(instr->index()), | 4275 ToRegister(instr->index()), |
4339 ToRegister(instr->result()), | 4276 ToRegister(instr->result()), |
4340 deferred->entry()); | 4277 deferred->entry()); |
4341 __ bind(deferred->exit()); | 4278 __ bind(deferred->exit()); |
4342 } | 4279 } |
4343 | 4280 |
(...skipping 26 matching lines...) Expand all Loading... |
4370 __ AssertSmi(eax); | 4307 __ AssertSmi(eax); |
4371 __ SmiUntag(eax); | 4308 __ SmiUntag(eax); |
4372 __ StoreToSafepointRegisterSlot(result, eax); | 4309 __ StoreToSafepointRegisterSlot(result, eax); |
4373 } | 4310 } |
4374 | 4311 |
4375 | 4312 |
4376 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { | 4313 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { |
4377 class DeferredStringCharFromCode V8_FINAL : public LDeferredCode { | 4314 class DeferredStringCharFromCode V8_FINAL : public LDeferredCode { |
4378 public: | 4315 public: |
4379 DeferredStringCharFromCode(LCodeGen* codegen, | 4316 DeferredStringCharFromCode(LCodeGen* codegen, |
4380 LStringCharFromCode* instr) | 4317 LStringCharFromCode* instr, |
4381 : LDeferredCode(codegen), instr_(instr) { } | 4318 const X87Stack& x87_stack) |
| 4319 : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
4382 virtual void Generate() V8_OVERRIDE { | 4320 virtual void Generate() V8_OVERRIDE { |
4383 codegen()->DoDeferredStringCharFromCode(instr_); | 4321 codegen()->DoDeferredStringCharFromCode(instr_); |
4384 } | 4322 } |
4385 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4323 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4386 private: | 4324 private: |
4387 LStringCharFromCode* instr_; | 4325 LStringCharFromCode* instr_; |
4388 }; | 4326 }; |
4389 | 4327 |
4390 DeferredStringCharFromCode* deferred = | 4328 DeferredStringCharFromCode* deferred = |
4391 new(zone()) DeferredStringCharFromCode(this, instr); | 4329 new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_); |
4392 | 4330 |
4393 ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); | 4331 ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); |
4394 Register char_code = ToRegister(instr->char_code()); | 4332 Register char_code = ToRegister(instr->char_code()); |
4395 Register result = ToRegister(instr->result()); | 4333 Register result = ToRegister(instr->result()); |
4396 ASSERT(!char_code.is(result)); | 4334 ASSERT(!char_code.is(result)); |
4397 | 4335 |
4398 __ cmp(char_code, String::kMaxOneByteCharCode); | 4336 __ cmp(char_code, String::kMaxOneByteCharCode); |
4399 __ j(above, deferred->entry()); | 4337 __ j(above, deferred->entry()); |
4400 __ Move(result, Immediate(factory()->single_character_string_cache())); | 4338 __ Move(result, Immediate(factory()->single_character_string_cache())); |
4401 __ mov(result, FieldOperand(result, | 4339 __ mov(result, FieldOperand(result, |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4433 instr->hydrogen()->pretenure_flag()); | 4371 instr->hydrogen()->pretenure_flag()); |
4434 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 4372 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
4435 } | 4373 } |
4436 | 4374 |
4437 | 4375 |
4438 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { | 4376 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { |
4439 LOperand* input = instr->value(); | 4377 LOperand* input = instr->value(); |
4440 LOperand* output = instr->result(); | 4378 LOperand* output = instr->result(); |
4441 ASSERT(input->IsRegister() || input->IsStackSlot()); | 4379 ASSERT(input->IsRegister() || input->IsStackSlot()); |
4442 ASSERT(output->IsDoubleRegister()); | 4380 ASSERT(output->IsDoubleRegister()); |
4443 __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); | 4381 if (input->IsRegister()) { |
| 4382 Register input_reg = ToRegister(input); |
| 4383 __ push(input_reg); |
| 4384 X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand); |
| 4385 __ pop(input_reg); |
| 4386 } else { |
| 4387 X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand); |
| 4388 } |
4444 } | 4389 } |
4445 | 4390 |
4446 | 4391 |
4447 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { | 4392 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { |
4448 LOperand* input = instr->value(); | 4393 LOperand* input = instr->value(); |
4449 LOperand* output = instr->result(); | 4394 LOperand* output = instr->result(); |
4450 LOperand* temp = instr->temp(); | 4395 X87Register res = ToX87Register(output); |
4451 __ LoadUint32(ToDoubleRegister(output), | 4396 X87PrepareToWrite(res); |
4452 ToRegister(input), | 4397 __ LoadUint32NoSSE2(ToRegister(input)); |
4453 ToDoubleRegister(temp)); | 4398 X87CommitWrite(res); |
4454 } | 4399 } |
4455 | 4400 |
4456 | 4401 |
4457 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { | 4402 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { |
4458 class DeferredNumberTagI V8_FINAL : public LDeferredCode { | 4403 class DeferredNumberTagI V8_FINAL : public LDeferredCode { |
4459 public: | 4404 public: |
4460 DeferredNumberTagI(LCodeGen* codegen, | 4405 DeferredNumberTagI(LCodeGen* codegen, |
4461 LNumberTagI* instr) | 4406 LNumberTagI* instr, |
4462 : LDeferredCode(codegen), instr_(instr) { } | 4407 const X87Stack& x87_stack) |
| 4408 : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
4463 virtual void Generate() V8_OVERRIDE { | 4409 virtual void Generate() V8_OVERRIDE { |
4464 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(), | 4410 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(), |
4465 NULL, SIGNED_INT32); | 4411 SIGNED_INT32); |
4466 } | 4412 } |
4467 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4413 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4468 private: | 4414 private: |
4469 LNumberTagI* instr_; | 4415 LNumberTagI* instr_; |
4470 }; | 4416 }; |
4471 | 4417 |
4472 LOperand* input = instr->value(); | 4418 LOperand* input = instr->value(); |
4473 ASSERT(input->IsRegister() && input->Equals(instr->result())); | 4419 ASSERT(input->IsRegister() && input->Equals(instr->result())); |
4474 Register reg = ToRegister(input); | 4420 Register reg = ToRegister(input); |
4475 | 4421 |
4476 DeferredNumberTagI* deferred = | 4422 DeferredNumberTagI* deferred = |
4477 new(zone()) DeferredNumberTagI(this, instr); | 4423 new(zone()) DeferredNumberTagI(this, instr, x87_stack_); |
4478 __ SmiTag(reg); | 4424 __ SmiTag(reg); |
4479 __ j(overflow, deferred->entry()); | 4425 __ j(overflow, deferred->entry()); |
4480 __ bind(deferred->exit()); | 4426 __ bind(deferred->exit()); |
4481 } | 4427 } |
4482 | 4428 |
4483 | 4429 |
4484 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { | 4430 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { |
4485 class DeferredNumberTagU V8_FINAL : public LDeferredCode { | 4431 class DeferredNumberTagU V8_FINAL : public LDeferredCode { |
4486 public: | 4432 public: |
4487 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) | 4433 DeferredNumberTagU(LCodeGen* codegen, |
4488 : LDeferredCode(codegen), instr_(instr) { } | 4434 LNumberTagU* instr, |
| 4435 const X87Stack& x87_stack) |
| 4436 : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
4489 virtual void Generate() V8_OVERRIDE { | 4437 virtual void Generate() V8_OVERRIDE { |
4490 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), | 4438 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(), |
4491 instr_->temp2(), UNSIGNED_INT32); | 4439 UNSIGNED_INT32); |
4492 } | 4440 } |
4493 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4441 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4494 private: | 4442 private: |
4495 LNumberTagU* instr_; | 4443 LNumberTagU* instr_; |
4496 }; | 4444 }; |
4497 | 4445 |
4498 LOperand* input = instr->value(); | 4446 LOperand* input = instr->value(); |
4499 ASSERT(input->IsRegister() && input->Equals(instr->result())); | 4447 ASSERT(input->IsRegister() && input->Equals(instr->result())); |
4500 Register reg = ToRegister(input); | 4448 Register reg = ToRegister(input); |
4501 | 4449 |
4502 DeferredNumberTagU* deferred = | 4450 DeferredNumberTagU* deferred = |
4503 new(zone()) DeferredNumberTagU(this, instr); | 4451 new(zone()) DeferredNumberTagU(this, instr, x87_stack_); |
4504 __ cmp(reg, Immediate(Smi::kMaxValue)); | 4452 __ cmp(reg, Immediate(Smi::kMaxValue)); |
4505 __ j(above, deferred->entry()); | 4453 __ j(above, deferred->entry()); |
4506 __ SmiTag(reg); | 4454 __ SmiTag(reg); |
4507 __ bind(deferred->exit()); | 4455 __ bind(deferred->exit()); |
4508 } | 4456 } |
4509 | 4457 |
4510 | 4458 |
4511 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, | 4459 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, |
4512 LOperand* value, | 4460 LOperand* value, |
4513 LOperand* temp1, | 4461 LOperand* temp, |
4514 LOperand* temp2, | |
4515 IntegerSignedness signedness) { | 4462 IntegerSignedness signedness) { |
4516 Label done, slow; | 4463 Label done, slow; |
4517 Register reg = ToRegister(value); | 4464 Register reg = ToRegister(value); |
4518 Register tmp = ToRegister(temp1); | 4465 Register tmp = ToRegister(temp); |
4519 XMMRegister xmm_scratch = double_scratch0(); | |
4520 | 4466 |
4521 if (signedness == SIGNED_INT32) { | 4467 if (signedness == SIGNED_INT32) { |
4522 // There was overflow, so bits 30 and 31 of the original integer | 4468 // There was overflow, so bits 30 and 31 of the original integer |
4523 // disagree. Try to allocate a heap number in new space and store | 4469 // disagree. Try to allocate a heap number in new space and store |
4524 // the value in there. If that fails, call the runtime system. | 4470 // the value in there. If that fails, call the runtime system. |
4525 __ SmiUntag(reg); | 4471 __ SmiUntag(reg); |
4526 __ xor_(reg, 0x80000000); | 4472 __ xor_(reg, 0x80000000); |
4527 __ Cvtsi2sd(xmm_scratch, Operand(reg)); | 4473 __ push(reg); |
| 4474 __ fild_s(Operand(esp, 0)); |
| 4475 __ pop(reg); |
4528 } else { | 4476 } else { |
4529 __ LoadUint32(xmm_scratch, reg, ToDoubleRegister(temp2)); | 4477 // There's no fild variant for unsigned values, so zero-extend to a 64-bit |
| 4478 // int manually. |
| 4479 __ push(Immediate(0)); |
| 4480 __ push(reg); |
| 4481 __ fild_d(Operand(esp, 0)); |
| 4482 __ pop(reg); |
| 4483 __ pop(reg); |
4530 } | 4484 } |
4531 | 4485 |
4532 if (FLAG_inline_new) { | 4486 if (FLAG_inline_new) { |
4533 __ AllocateHeapNumber(reg, tmp, no_reg, &slow); | 4487 __ AllocateHeapNumber(reg, tmp, no_reg, &slow); |
4534 __ jmp(&done, Label::kNear); | 4488 __ jmp(&done, Label::kNear); |
4535 } | 4489 } |
4536 | 4490 |
4537 // Slow case: Call the runtime system to do the number allocation. | 4491 // Slow case: Call the runtime system to do the number allocation. |
4538 __ bind(&slow); | 4492 __ bind(&slow); |
4539 { | 4493 { |
4540 // TODO(3095996): Put a valid pointer value in the stack slot where the | 4494 // TODO(3095996): Put a valid pointer value in the stack slot where the |
4541 // result register is stored, as this register is in the pointer map, but | 4495 // result register is stored, as this register is in the pointer map, but |
4542 // contains an integer value. | 4496 // contains an integer value. |
4543 __ Move(reg, Immediate(0)); | 4497 __ Move(reg, Immediate(0)); |
4544 | 4498 |
4545 // Preserve the value of all registers. | 4499 // Preserve the value of all registers. |
4546 PushSafepointRegistersScope scope(this); | 4500 PushSafepointRegistersScope scope(this); |
4547 | 4501 |
4548 // NumberTagI and NumberTagD use the context from the frame, rather than | 4502 // NumberTagI and NumberTagD use the context from the frame, rather than |
4549 // the environment's HContext or HInlinedContext value. | 4503 // the environment's HContext or HInlinedContext value. |
4550 // They only call Runtime::kHiddenAllocateHeapNumber. | 4504 // They only call Runtime::kHiddenAllocateHeapNumber. |
4551 // The corresponding HChange instructions are added in a phase that does | 4505 // The corresponding HChange instructions are added in a phase that does |
4552 // not have easy access to the local context. | 4506 // not have easy access to the local context. |
4553 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); | 4507 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); |
4554 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber); | 4508 __ CallRuntime(Runtime::kHiddenAllocateHeapNumber); |
4555 RecordSafepointWithRegisters( | 4509 RecordSafepointWithRegisters( |
4556 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); | 4510 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); |
4557 __ StoreToSafepointRegisterSlot(reg, eax); | 4511 __ StoreToSafepointRegisterSlot(reg, eax); |
4558 } | 4512 } |
4559 | 4513 |
4560 // Done. Put the value in xmm_scratch into the value of the allocated heap | |
4561 // number. | |
4562 __ bind(&done); | 4514 __ bind(&done); |
4563 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch); | 4515 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); |
4564 } | 4516 } |
4565 | 4517 |
4566 | 4518 |
4567 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { | 4519 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { |
4568 class DeferredNumberTagD V8_FINAL : public LDeferredCode { | 4520 class DeferredNumberTagD V8_FINAL : public LDeferredCode { |
4569 public: | 4521 public: |
4570 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) | 4522 DeferredNumberTagD(LCodeGen* codegen, |
4571 : LDeferredCode(codegen), instr_(instr) { } | 4523 LNumberTagD* instr, |
| 4524 const X87Stack& x87_stack) |
| 4525 : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
4572 virtual void Generate() V8_OVERRIDE { | 4526 virtual void Generate() V8_OVERRIDE { |
4573 codegen()->DoDeferredNumberTagD(instr_); | 4527 codegen()->DoDeferredNumberTagD(instr_); |
4574 } | 4528 } |
4575 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4529 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4576 private: | 4530 private: |
4577 LNumberTagD* instr_; | 4531 LNumberTagD* instr_; |
4578 }; | 4532 }; |
4579 | 4533 |
4580 Register reg = ToRegister(instr->result()); | 4534 Register reg = ToRegister(instr->result()); |
4581 | 4535 |
| 4536 // Put the value to the top of stack |
| 4537 X87Register src = ToX87Register(instr->value()); |
| 4538 X87LoadForUsage(src); |
| 4539 |
4582 DeferredNumberTagD* deferred = | 4540 DeferredNumberTagD* deferred = |
4583 new(zone()) DeferredNumberTagD(this, instr); | 4541 new(zone()) DeferredNumberTagD(this, instr, x87_stack_); |
4584 if (FLAG_inline_new) { | 4542 if (FLAG_inline_new) { |
4585 Register tmp = ToRegister(instr->temp()); | 4543 Register tmp = ToRegister(instr->temp()); |
4586 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); | 4544 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); |
4587 } else { | 4545 } else { |
4588 __ jmp(deferred->entry()); | 4546 __ jmp(deferred->entry()); |
4589 } | 4547 } |
4590 __ bind(deferred->exit()); | 4548 __ bind(deferred->exit()); |
4591 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 4549 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); |
4592 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); | |
4593 } | 4550 } |
4594 | 4551 |
4595 | 4552 |
4596 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { | 4553 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { |
4597 // TODO(3095996): Get rid of this. For now, we need to make the | 4554 // TODO(3095996): Get rid of this. For now, we need to make the |
4598 // result register contain a valid pointer because it is already | 4555 // result register contain a valid pointer because it is already |
4599 // contained in the register pointer map. | 4556 // contained in the register pointer map. |
4600 Register reg = ToRegister(instr->result()); | 4557 Register reg = ToRegister(instr->result()); |
4601 __ Move(reg, Immediate(0)); | 4558 __ Move(reg, Immediate(0)); |
4602 | 4559 |
4603 PushSafepointRegistersScope scope(this); | 4560 PushSafepointRegistersScope scope(this); |
4604 // NumberTagI and NumberTagD use the context from the frame, rather than | 4561 // NumberTagI and NumberTagD use the context from the frame, rather than |
4605 // the environment's HContext or HInlinedContext value. | 4562 // the environment's HContext or HInlinedContext value. |
4606 // They only call Runtime::kHiddenAllocateHeapNumber. | 4563 // They only call Runtime::kHiddenAllocateHeapNumber. |
4607 // The corresponding HChange instructions are added in a phase that does | 4564 // The corresponding HChange instructions are added in a phase that does |
4608 // not have easy access to the local context. | 4565 // not have easy access to the local context. |
4609 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); | 4566 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); |
4610 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber); | 4567 __ CallRuntime(Runtime::kHiddenAllocateHeapNumber); |
4611 RecordSafepointWithRegisters( | 4568 RecordSafepointWithRegisters( |
4612 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); | 4569 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); |
4613 __ StoreToSafepointRegisterSlot(reg, eax); | 4570 __ StoreToSafepointRegisterSlot(reg, eax); |
4614 } | 4571 } |
4615 | 4572 |
4616 | 4573 |
4617 void LCodeGen::DoSmiTag(LSmiTag* instr) { | 4574 void LCodeGen::DoSmiTag(LSmiTag* instr) { |
4618 HChange* hchange = instr->hydrogen(); | 4575 HChange* hchange = instr->hydrogen(); |
4619 Register input = ToRegister(instr->value()); | 4576 Register input = ToRegister(instr->value()); |
4620 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4577 if (hchange->CheckFlag(HValue::kCanOverflow) && |
(...skipping 16 matching lines...) Expand all Loading... |
4637 if (instr->needs_check()) { | 4594 if (instr->needs_check()) { |
4638 __ test(result, Immediate(kSmiTagMask)); | 4595 __ test(result, Immediate(kSmiTagMask)); |
4639 DeoptimizeIf(not_zero, instr->environment()); | 4596 DeoptimizeIf(not_zero, instr->environment()); |
4640 } else { | 4597 } else { |
4641 __ AssertSmi(result); | 4598 __ AssertSmi(result); |
4642 } | 4599 } |
4643 __ SmiUntag(result); | 4600 __ SmiUntag(result); |
4644 } | 4601 } |
4645 | 4602 |
4646 | 4603 |
4647 void LCodeGen::EmitNumberUntagD(Register input_reg, | 4604 void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg, |
4648 Register temp_reg, | 4605 Register temp_reg, |
4649 XMMRegister result_reg, | 4606 X87Register res_reg, |
4650 bool can_convert_undefined_to_nan, | 4607 bool can_convert_undefined_to_nan, |
4651 bool deoptimize_on_minus_zero, | 4608 bool deoptimize_on_minus_zero, |
4652 LEnvironment* env, | 4609 LEnvironment* env, |
4653 NumberUntagDMode mode) { | 4610 NumberUntagDMode mode) { |
4654 Label convert, load_smi, done; | 4611 Label load_smi, done; |
4655 | 4612 |
| 4613 X87PrepareToWrite(res_reg); |
4656 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { | 4614 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
4657 // Smi check. | 4615 // Smi check. |
4658 __ JumpIfSmi(input_reg, &load_smi, Label::kNear); | 4616 __ JumpIfSmi(input_reg, &load_smi, Label::kNear); |
4659 | 4617 |
4660 // Heap number map check. | 4618 // Heap number map check. |
4661 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), | 4619 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), |
4662 factory()->heap_number_map()); | 4620 factory()->heap_number_map()); |
4663 if (can_convert_undefined_to_nan) { | 4621 if (!can_convert_undefined_to_nan) { |
4664 __ j(not_equal, &convert, Label::kNear); | 4622 DeoptimizeIf(not_equal, env); |
4665 } else { | 4623 } else { |
| 4624 Label heap_number, convert; |
| 4625 __ j(equal, &heap_number, Label::kNear); |
| 4626 |
| 4627 // Convert undefined (or hole) to NaN. |
| 4628 __ cmp(input_reg, factory()->undefined_value()); |
4666 DeoptimizeIf(not_equal, env); | 4629 DeoptimizeIf(not_equal, env); |
| 4630 |
| 4631 __ bind(&convert); |
| 4632 ExternalReference nan = |
| 4633 ExternalReference::address_of_canonical_non_hole_nan(); |
| 4634 __ fld_d(Operand::StaticVariable(nan)); |
| 4635 __ jmp(&done, Label::kNear); |
| 4636 |
| 4637 __ bind(&heap_number); |
4667 } | 4638 } |
| 4639 // Heap number to x87 conversion. |
| 4640 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| 4641 if (deoptimize_on_minus_zero) { |
| 4642 __ fldz(); |
| 4643 __ FCmp(); |
| 4644 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| 4645 __ j(not_zero, &done, Label::kNear); |
4668 | 4646 |
4669 // Heap number to XMM conversion. | 4647 // Use general purpose registers to check if we have -0.0 |
4670 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 4648 __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset)); |
| 4649 __ test(temp_reg, Immediate(HeapNumber::kSignMask)); |
| 4650 __ j(zero, &done, Label::kNear); |
4671 | 4651 |
4672 if (deoptimize_on_minus_zero) { | 4652 // Pop FPU stack before deoptimizing. |
4673 XMMRegister xmm_scratch = double_scratch0(); | 4653 __ fstp(0); |
4674 __ xorps(xmm_scratch, xmm_scratch); | |
4675 __ ucomisd(result_reg, xmm_scratch); | |
4676 __ j(not_zero, &done, Label::kNear); | |
4677 __ movmskpd(temp_reg, result_reg); | |
4678 __ test_b(temp_reg, 1); | |
4679 DeoptimizeIf(not_zero, env); | 4654 DeoptimizeIf(not_zero, env); |
4680 } | 4655 } |
4681 __ jmp(&done, Label::kNear); | 4656 __ jmp(&done, Label::kNear); |
4682 | |
4683 if (can_convert_undefined_to_nan) { | |
4684 __ bind(&convert); | |
4685 | |
4686 // Convert undefined (and hole) to NaN. | |
4687 __ cmp(input_reg, factory()->undefined_value()); | |
4688 DeoptimizeIf(not_equal, env); | |
4689 | |
4690 ExternalReference nan = | |
4691 ExternalReference::address_of_canonical_non_hole_nan(); | |
4692 __ movsd(result_reg, Operand::StaticVariable(nan)); | |
4693 __ jmp(&done, Label::kNear); | |
4694 } | |
4695 } else { | 4657 } else { |
4696 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); | 4658 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); |
4697 } | 4659 } |
4698 | 4660 |
4699 __ bind(&load_smi); | 4661 __ bind(&load_smi); |
4700 // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the | 4662 // Clobbering a temp is faster than re-tagging the |
4701 // input register since we avoid dependencies. | 4663 // input register since we avoid dependencies. |
4702 __ mov(temp_reg, input_reg); | 4664 __ mov(temp_reg, input_reg); |
4703 __ SmiUntag(temp_reg); // Untag smi before converting to float. | 4665 __ SmiUntag(temp_reg); // Untag smi before converting to float. |
4704 __ Cvtsi2sd(result_reg, Operand(temp_reg)); | 4666 __ push(temp_reg); |
| 4667 __ fild_s(Operand(esp, 0)); |
| 4668 __ add(esp, Immediate(kPointerSize)); |
4705 __ bind(&done); | 4669 __ bind(&done); |
| 4670 X87CommitWrite(res_reg); |
4706 } | 4671 } |
4707 | 4672 |
4708 | 4673 |
4709 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { | 4674 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { |
4710 Register input_reg = ToRegister(instr->value()); | 4675 Register input_reg = ToRegister(instr->value()); |
4711 | 4676 |
4712 // The input was optimistically untagged; revert it. | 4677 // The input was optimistically untagged; revert it. |
4713 STATIC_ASSERT(kSmiTagSize == 1); | 4678 STATIC_ASSERT(kSmiTagSize == 1); |
4714 __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag)); | 4679 __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag)); |
4715 | 4680 |
(...skipping 21 matching lines...) Expand all Loading... |
4737 __ Move(input_reg, Immediate(1)); | 4702 __ Move(input_reg, Immediate(1)); |
4738 __ jmp(done); | 4703 __ jmp(done); |
4739 | 4704 |
4740 __ bind(&check_false); | 4705 __ bind(&check_false); |
4741 __ cmp(input_reg, factory()->false_value()); | 4706 __ cmp(input_reg, factory()->false_value()); |
4742 __ RecordComment("Deferred TaggedToI: cannot truncate"); | 4707 __ RecordComment("Deferred TaggedToI: cannot truncate"); |
4743 DeoptimizeIf(not_equal, instr->environment()); | 4708 DeoptimizeIf(not_equal, instr->environment()); |
4744 __ Move(input_reg, Immediate(0)); | 4709 __ Move(input_reg, Immediate(0)); |
4745 } else { | 4710 } else { |
4746 Label bailout; | 4711 Label bailout; |
4747 XMMRegister scratch = (instr->temp() != NULL) | 4712 __ TaggedToI(input_reg, input_reg, |
4748 ? ToDoubleRegister(instr->temp()) | |
4749 : no_xmm_reg; | |
4750 __ TaggedToI(input_reg, input_reg, scratch, | |
4751 instr->hydrogen()->GetMinusZeroMode(), &bailout); | 4713 instr->hydrogen()->GetMinusZeroMode(), &bailout); |
4752 __ jmp(done); | 4714 __ jmp(done); |
4753 __ bind(&bailout); | 4715 __ bind(&bailout); |
4754 DeoptimizeIf(no_condition, instr->environment()); | 4716 DeoptimizeIf(no_condition, instr->environment()); |
4755 } | 4717 } |
4756 } | 4718 } |
4757 | 4719 |
4758 | 4720 |
4759 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | 4721 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
4760 class DeferredTaggedToI V8_FINAL : public LDeferredCode { | 4722 class DeferredTaggedToI V8_FINAL : public LDeferredCode { |
4761 public: | 4723 public: |
4762 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | 4724 DeferredTaggedToI(LCodeGen* codegen, |
4763 : LDeferredCode(codegen), instr_(instr) { } | 4725 LTaggedToI* instr, |
| 4726 const X87Stack& x87_stack) |
| 4727 : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
4764 virtual void Generate() V8_OVERRIDE { | 4728 virtual void Generate() V8_OVERRIDE { |
4765 codegen()->DoDeferredTaggedToI(instr_, done()); | 4729 codegen()->DoDeferredTaggedToI(instr_, done()); |
4766 } | 4730 } |
4767 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4731 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4768 private: | 4732 private: |
4769 LTaggedToI* instr_; | 4733 LTaggedToI* instr_; |
4770 }; | 4734 }; |
4771 | 4735 |
4772 LOperand* input = instr->value(); | 4736 LOperand* input = instr->value(); |
4773 ASSERT(input->IsRegister()); | 4737 ASSERT(input->IsRegister()); |
4774 Register input_reg = ToRegister(input); | 4738 Register input_reg = ToRegister(input); |
4775 ASSERT(input_reg.is(ToRegister(instr->result()))); | 4739 ASSERT(input_reg.is(ToRegister(instr->result()))); |
4776 | 4740 |
4777 if (instr->hydrogen()->value()->representation().IsSmi()) { | 4741 if (instr->hydrogen()->value()->representation().IsSmi()) { |
4778 __ SmiUntag(input_reg); | 4742 __ SmiUntag(input_reg); |
4779 } else { | 4743 } else { |
4780 DeferredTaggedToI* deferred = | 4744 DeferredTaggedToI* deferred = |
4781 new(zone()) DeferredTaggedToI(this, instr); | 4745 new(zone()) DeferredTaggedToI(this, instr, x87_stack_); |
4782 // Optimistically untag the input. | 4746 // Optimistically untag the input. |
4783 // If the input is a HeapObject, SmiUntag will set the carry flag. | 4747 // If the input is a HeapObject, SmiUntag will set the carry flag. |
4784 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); | 4748 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); |
4785 __ SmiUntag(input_reg); | 4749 __ SmiUntag(input_reg); |
4786 // Branch to deferred code if the input was tagged. | 4750 // Branch to deferred code if the input was tagged. |
4787 // The deferred code will take care of restoring the tag. | 4751 // The deferred code will take care of restoring the tag. |
4788 __ j(carry, deferred->entry()); | 4752 __ j(carry, deferred->entry()); |
4789 __ bind(deferred->exit()); | 4753 __ bind(deferred->exit()); |
4790 } | 4754 } |
4791 } | 4755 } |
4792 | 4756 |
4793 | 4757 |
4794 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { | 4758 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { |
4795 LOperand* input = instr->value(); | 4759 LOperand* input = instr->value(); |
4796 ASSERT(input->IsRegister()); | 4760 ASSERT(input->IsRegister()); |
4797 LOperand* temp = instr->temp(); | 4761 LOperand* temp = instr->temp(); |
4798 ASSERT(temp->IsRegister()); | 4762 ASSERT(temp->IsRegister()); |
4799 LOperand* result = instr->result(); | 4763 LOperand* result = instr->result(); |
4800 ASSERT(result->IsDoubleRegister()); | 4764 ASSERT(result->IsDoubleRegister()); |
4801 | 4765 |
4802 Register input_reg = ToRegister(input); | 4766 Register input_reg = ToRegister(input); |
4803 bool deoptimize_on_minus_zero = | 4767 bool deoptimize_on_minus_zero = |
4804 instr->hydrogen()->deoptimize_on_minus_zero(); | 4768 instr->hydrogen()->deoptimize_on_minus_zero(); |
4805 Register temp_reg = ToRegister(temp); | 4769 Register temp_reg = ToRegister(temp); |
4806 | 4770 |
4807 HValue* value = instr->hydrogen()->value(); | 4771 HValue* value = instr->hydrogen()->value(); |
4808 NumberUntagDMode mode = value->representation().IsSmi() | 4772 NumberUntagDMode mode = value->representation().IsSmi() |
4809 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; | 4773 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; |
4810 | 4774 |
4811 XMMRegister result_reg = ToDoubleRegister(result); | 4775 EmitNumberUntagDNoSSE2(input_reg, |
4812 EmitNumberUntagD(input_reg, | 4776 temp_reg, |
4813 temp_reg, | 4777 ToX87Register(result), |
4814 result_reg, | 4778 instr->hydrogen()->can_convert_undefined_to_nan(), |
4815 instr->hydrogen()->can_convert_undefined_to_nan(), | 4779 deoptimize_on_minus_zero, |
4816 deoptimize_on_minus_zero, | 4780 instr->environment(), |
4817 instr->environment(), | 4781 mode); |
4818 mode); | |
4819 } | 4782 } |
4820 | 4783 |
4821 | 4784 |
4822 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { | 4785 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { |
4823 LOperand* input = instr->value(); | 4786 LOperand* input = instr->value(); |
4824 ASSERT(input->IsDoubleRegister()); | 4787 ASSERT(input->IsDoubleRegister()); |
4825 LOperand* result = instr->result(); | 4788 LOperand* result = instr->result(); |
4826 ASSERT(result->IsRegister()); | 4789 ASSERT(result->IsRegister()); |
4827 Register result_reg = ToRegister(result); | 4790 Register result_reg = ToRegister(result); |
4828 | 4791 |
4829 if (instr->truncating()) { | 4792 if (instr->truncating()) { |
4830 XMMRegister input_reg = ToDoubleRegister(input); | 4793 X87Register input_reg = ToX87Register(input); |
4831 __ TruncateDoubleToI(result_reg, input_reg); | 4794 X87Fxch(input_reg); |
| 4795 __ TruncateX87TOSToI(result_reg); |
4832 } else { | 4796 } else { |
4833 Label bailout, done; | 4797 Label bailout, done; |
4834 XMMRegister input_reg = ToDoubleRegister(input); | 4798 X87Register input_reg = ToX87Register(input); |
4835 XMMRegister xmm_scratch = double_scratch0(); | 4799 X87Fxch(input_reg); |
4836 __ DoubleToI(result_reg, input_reg, xmm_scratch, | 4800 __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(), |
4837 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); | 4801 &bailout, Label::kNear); |
4838 __ jmp(&done, Label::kNear); | 4802 __ jmp(&done, Label::kNear); |
4839 __ bind(&bailout); | 4803 __ bind(&bailout); |
4840 DeoptimizeIf(no_condition, instr->environment()); | 4804 DeoptimizeIf(no_condition, instr->environment()); |
4841 __ bind(&done); | 4805 __ bind(&done); |
4842 } | 4806 } |
4843 } | 4807 } |
4844 | 4808 |
4845 | 4809 |
4846 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { | 4810 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { |
4847 LOperand* input = instr->value(); | 4811 LOperand* input = instr->value(); |
4848 ASSERT(input->IsDoubleRegister()); | 4812 ASSERT(input->IsDoubleRegister()); |
4849 LOperand* result = instr->result(); | 4813 LOperand* result = instr->result(); |
4850 ASSERT(result->IsRegister()); | 4814 ASSERT(result->IsRegister()); |
4851 Register result_reg = ToRegister(result); | 4815 Register result_reg = ToRegister(result); |
4852 | 4816 |
4853 Label bailout, done; | 4817 Label bailout, done; |
4854 XMMRegister input_reg = ToDoubleRegister(input); | 4818 X87Register input_reg = ToX87Register(input); |
4855 XMMRegister xmm_scratch = double_scratch0(); | 4819 X87Fxch(input_reg); |
4856 __ DoubleToI(result_reg, input_reg, xmm_scratch, | 4820 __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(), |
4857 instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); | 4821 &bailout, Label::kNear); |
4858 __ jmp(&done, Label::kNear); | 4822 __ jmp(&done, Label::kNear); |
4859 __ bind(&bailout); | 4823 __ bind(&bailout); |
4860 DeoptimizeIf(no_condition, instr->environment()); | 4824 DeoptimizeIf(no_condition, instr->environment()); |
4861 __ bind(&done); | 4825 __ bind(&done); |
4862 | 4826 |
4863 __ SmiTag(result_reg); | 4827 __ SmiTag(result_reg); |
4864 DeoptimizeIf(overflow, instr->environment()); | 4828 DeoptimizeIf(overflow, instr->environment()); |
4865 } | 4829 } |
4866 | 4830 |
4867 | 4831 |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4938 } | 4902 } |
4939 DeoptimizeIf(not_equal, instr->environment()); | 4903 DeoptimizeIf(not_equal, instr->environment()); |
4940 } | 4904 } |
4941 | 4905 |
4942 | 4906 |
4943 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { | 4907 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { |
4944 { | 4908 { |
4945 PushSafepointRegistersScope scope(this); | 4909 PushSafepointRegistersScope scope(this); |
4946 __ push(object); | 4910 __ push(object); |
4947 __ xor_(esi, esi); | 4911 __ xor_(esi, esi); |
4948 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); | 4912 __ CallRuntime(Runtime::kTryMigrateInstance); |
4949 RecordSafepointWithRegisters( | 4913 RecordSafepointWithRegisters( |
4950 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); | 4914 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); |
4951 | 4915 |
4952 __ test(eax, Immediate(kSmiTagMask)); | 4916 __ test(eax, Immediate(kSmiTagMask)); |
4953 } | 4917 } |
4954 DeoptimizeIf(zero, instr->environment()); | 4918 DeoptimizeIf(zero, instr->environment()); |
4955 } | 4919 } |
4956 | 4920 |
4957 | 4921 |
4958 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 4922 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
4959 class DeferredCheckMaps V8_FINAL : public LDeferredCode { | 4923 class DeferredCheckMaps V8_FINAL : public LDeferredCode { |
4960 public: | 4924 public: |
4961 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) | 4925 DeferredCheckMaps(LCodeGen* codegen, |
4962 : LDeferredCode(codegen), instr_(instr), object_(object) { | 4926 LCheckMaps* instr, |
| 4927 Register object, |
| 4928 const X87Stack& x87_stack) |
| 4929 : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) { |
4963 SetExit(check_maps()); | 4930 SetExit(check_maps()); |
4964 } | 4931 } |
4965 virtual void Generate() V8_OVERRIDE { | 4932 virtual void Generate() V8_OVERRIDE { |
4966 codegen()->DoDeferredInstanceMigration(instr_, object_); | 4933 codegen()->DoDeferredInstanceMigration(instr_, object_); |
4967 } | 4934 } |
4968 Label* check_maps() { return &check_maps_; } | 4935 Label* check_maps() { return &check_maps_; } |
4969 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4936 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4970 private: | 4937 private: |
4971 LCheckMaps* instr_; | 4938 LCheckMaps* instr_; |
4972 Label check_maps_; | 4939 Label check_maps_; |
4973 Register object_; | 4940 Register object_; |
4974 }; | 4941 }; |
4975 | 4942 |
4976 if (instr->hydrogen()->IsStabilityCheck()) { | 4943 if (instr->hydrogen()->IsStabilityCheck()) { |
4977 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); | 4944 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); |
4978 for (int i = 0; i < maps->size(); ++i) { | 4945 for (int i = 0; i < maps->size(); ++i) { |
4979 AddStabilityDependency(maps->at(i).handle()); | 4946 AddStabilityDependency(maps->at(i).handle()); |
4980 } | 4947 } |
4981 return; | 4948 return; |
4982 } | 4949 } |
4983 | 4950 |
4984 LOperand* input = instr->value(); | 4951 LOperand* input = instr->value(); |
4985 ASSERT(input->IsRegister()); | 4952 ASSERT(input->IsRegister()); |
4986 Register reg = ToRegister(input); | 4953 Register reg = ToRegister(input); |
4987 | 4954 |
4988 DeferredCheckMaps* deferred = NULL; | 4955 DeferredCheckMaps* deferred = NULL; |
4989 if (instr->hydrogen()->HasMigrationTarget()) { | 4956 if (instr->hydrogen()->HasMigrationTarget()) { |
4990 deferred = new(zone()) DeferredCheckMaps(this, instr, reg); | 4957 deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_); |
4991 __ bind(deferred->check_maps()); | 4958 __ bind(deferred->check_maps()); |
4992 } | 4959 } |
4993 | 4960 |
4994 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); | 4961 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); |
4995 Label success; | 4962 Label success; |
4996 for (int i = 0; i < maps->size() - 1; i++) { | 4963 for (int i = 0; i < maps->size() - 1; i++) { |
4997 Handle<Map> map = maps->at(i).handle(); | 4964 Handle<Map> map = maps->at(i).handle(); |
4998 __ CompareMap(reg, map); | 4965 __ CompareMap(reg, map); |
4999 __ j(equal, &success, Label::kNear); | 4966 __ j(equal, &success, Label::kNear); |
5000 } | 4967 } |
5001 | 4968 |
5002 Handle<Map> map = maps->at(maps->size() - 1).handle(); | 4969 Handle<Map> map = maps->at(maps->size() - 1).handle(); |
5003 __ CompareMap(reg, map); | 4970 __ CompareMap(reg, map); |
5004 if (instr->hydrogen()->HasMigrationTarget()) { | 4971 if (instr->hydrogen()->HasMigrationTarget()) { |
5005 __ j(not_equal, deferred->entry()); | 4972 __ j(not_equal, deferred->entry()); |
5006 } else { | 4973 } else { |
5007 DeoptimizeIf(not_equal, instr->environment()); | 4974 DeoptimizeIf(not_equal, instr->environment()); |
5008 } | 4975 } |
5009 | 4976 |
5010 __ bind(&success); | 4977 __ bind(&success); |
5011 } | 4978 } |
5012 | 4979 |
5013 | 4980 |
5014 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 4981 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
5015 XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); | 4982 UNREACHABLE(); |
5016 XMMRegister xmm_scratch = double_scratch0(); | |
5017 Register result_reg = ToRegister(instr->result()); | |
5018 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg); | |
5019 } | 4983 } |
5020 | 4984 |
5021 | 4985 |
5022 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { | 4986 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { |
5023 ASSERT(instr->unclamped()->Equals(instr->result())); | 4987 ASSERT(instr->unclamped()->Equals(instr->result())); |
5024 Register value_reg = ToRegister(instr->result()); | 4988 Register value_reg = ToRegister(instr->result()); |
5025 __ ClampUint8(value_reg); | 4989 __ ClampUint8(value_reg); |
5026 } | 4990 } |
5027 | 4991 |
5028 | 4992 |
5029 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { | 4993 void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) { |
5030 ASSERT(instr->unclamped()->Equals(instr->result())); | |
5031 Register input_reg = ToRegister(instr->unclamped()); | 4994 Register input_reg = ToRegister(instr->unclamped()); |
5032 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm()); | 4995 Register result_reg = ToRegister(instr->result()); |
5033 XMMRegister xmm_scratch = double_scratch0(); | 4996 Register scratch = ToRegister(instr->scratch()); |
5034 Label is_smi, done, heap_number; | 4997 Register scratch2 = ToRegister(instr->scratch2()); |
| 4998 Register scratch3 = ToRegister(instr->scratch3()); |
| 4999 Label is_smi, done, heap_number, valid_exponent, |
| 5000 largest_value, zero_result, maybe_nan_or_infinity; |
5035 | 5001 |
5036 __ JumpIfSmi(input_reg, &is_smi); | 5002 __ JumpIfSmi(input_reg, &is_smi); |
5037 | 5003 |
5038 // Check for heap number | 5004 // Check for heap number |
5039 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), | 5005 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), |
5040 factory()->heap_number_map()); | 5006 factory()->heap_number_map()); |
5041 __ j(equal, &heap_number, Label::kNear); | 5007 __ j(equal, &heap_number, Label::kNear); |
5042 | 5008 |
5043 // Check for undefined. Undefined is converted to zero for clamping | 5009 // Check for undefined. Undefined is converted to zero for clamping |
5044 // conversions. | 5010 // conversions. |
5045 __ cmp(input_reg, factory()->undefined_value()); | 5011 __ cmp(input_reg, factory()->undefined_value()); |
5046 DeoptimizeIf(not_equal, instr->environment()); | 5012 DeoptimizeIf(not_equal, instr->environment()); |
5047 __ mov(input_reg, 0); | 5013 __ jmp(&zero_result, Label::kNear); |
5048 __ jmp(&done, Label::kNear); | |
5049 | 5014 |
5050 // Heap number | 5015 // Heap number |
5051 __ bind(&heap_number); | 5016 __ bind(&heap_number); |
5052 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 5017 |
5053 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg); | 5018 // Surprisingly, all of the hand-crafted bit-manipulations below are much |
| 5019 // faster than the x86 FPU built-in instruction, especially since "banker's |
| 5020 // rounding" would be additionally very expensive |
| 5021 |
| 5022 // Get exponent word. |
| 5023 __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset)); |
| 5024 __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); |
| 5025 |
| 5026 // Test for negative values --> clamp to zero |
| 5027 __ test(scratch, scratch); |
| 5028 __ j(negative, &zero_result, Label::kNear); |
| 5029 |
| 5030 // Get exponent alone in scratch2. |
| 5031 __ mov(scratch2, scratch); |
| 5032 __ and_(scratch2, HeapNumber::kExponentMask); |
| 5033 __ shr(scratch2, HeapNumber::kExponentShift); |
| 5034 __ j(zero, &zero_result, Label::kNear); |
| 5035 __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1)); |
| 5036 __ j(negative, &zero_result, Label::kNear); |
| 5037 |
| 5038 const uint32_t non_int8_exponent = 7; |
| 5039 __ cmp(scratch2, Immediate(non_int8_exponent + 1)); |
| 5040 // If the exponent is too big, check for special values. |
| 5041 __ j(greater, &maybe_nan_or_infinity, Label::kNear); |
| 5042 |
| 5043 __ bind(&valid_exponent); |
| 5044 // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent |
| 5045 // < 7. The shift bias is the number of bits to shift the mantissa such that |
| 5046 // with an exponent of 7 such the that top-most one is in bit 30, allowing |
| 5047 // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to |
| 5048 // 1). |
| 5049 int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1; |
| 5050 __ lea(result_reg, MemOperand(scratch2, shift_bias)); |
| 5051 // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the |
| 5052 // top bits of the mantissa. |
| 5053 __ and_(scratch, HeapNumber::kMantissaMask); |
| 5054 // Put back the implicit 1 of the mantissa |
| 5055 __ or_(scratch, 1 << HeapNumber::kExponentShift); |
| 5056 // Shift up to round |
| 5057 __ shl_cl(scratch); |
| 5058 // Use "banker's rounding" to spec: If fractional part of number is 0.5, then |
| 5059 // use the bit in the "ones" place and add it to the "halves" place, which has |
| 5060 // the effect of rounding to even. |
| 5061 __ mov(scratch2, scratch); |
| 5062 const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8; |
| 5063 const uint32_t one_bit_shift = one_half_bit_shift + 1; |
| 5064 __ and_(scratch2, Immediate((1 << one_bit_shift) - 1)); |
| 5065 __ cmp(scratch2, Immediate(1 << one_half_bit_shift)); |
| 5066 Label no_round; |
| 5067 __ j(less, &no_round, Label::kNear); |
| 5068 Label round_up; |
| 5069 __ mov(scratch2, Immediate(1 << one_half_bit_shift)); |
| 5070 __ j(greater, &round_up, Label::kNear); |
| 5071 __ test(scratch3, scratch3); |
| 5072 __ j(not_zero, &round_up, Label::kNear); |
| 5073 __ mov(scratch2, scratch); |
| 5074 __ and_(scratch2, Immediate(1 << one_bit_shift)); |
| 5075 __ shr(scratch2, 1); |
| 5076 __ bind(&round_up); |
| 5077 __ add(scratch, scratch2); |
| 5078 __ j(overflow, &largest_value, Label::kNear); |
| 5079 __ bind(&no_round); |
| 5080 __ shr(scratch, 23); |
| 5081 __ mov(result_reg, scratch); |
| 5082 __ jmp(&done, Label::kNear); |
| 5083 |
| 5084 __ bind(&maybe_nan_or_infinity); |
| 5085 // Check for NaN/Infinity, all other values map to 255 |
| 5086 __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1)); |
| 5087 __ j(not_equal, &largest_value, Label::kNear); |
| 5088 |
| 5089 // Check for NaN, which differs from Infinity in that at least one mantissa |
| 5090 // bit is set. |
| 5091 __ and_(scratch, HeapNumber::kMantissaMask); |
| 5092 __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); |
| 5093 __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN |
| 5094 // Infinity -> Fall through to map to 255. |
| 5095 |
| 5096 __ bind(&largest_value); |
| 5097 __ mov(result_reg, Immediate(255)); |
| 5098 __ jmp(&done, Label::kNear); |
| 5099 |
| 5100 __ bind(&zero_result); |
| 5101 __ xor_(result_reg, result_reg); |
5054 __ jmp(&done, Label::kNear); | 5102 __ jmp(&done, Label::kNear); |
5055 | 5103 |
5056 // smi | 5104 // smi |
5057 __ bind(&is_smi); | 5105 __ bind(&is_smi); |
5058 __ SmiUntag(input_reg); | 5106 if (!input_reg.is(result_reg)) { |
5059 __ ClampUint8(input_reg); | 5107 __ mov(result_reg, input_reg); |
| 5108 } |
| 5109 __ SmiUntag(result_reg); |
| 5110 __ ClampUint8(result_reg); |
5060 __ bind(&done); | 5111 __ bind(&done); |
5061 } | 5112 } |
5062 | 5113 |
5063 | 5114 |
5064 void LCodeGen::DoDoubleBits(LDoubleBits* instr) { | 5115 void LCodeGen::DoDoubleBits(LDoubleBits* instr) { |
5065 XMMRegister value_reg = ToDoubleRegister(instr->value()); | 5116 UNREACHABLE(); |
5066 Register result_reg = ToRegister(instr->result()); | |
5067 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { | |
5068 if (CpuFeatures::IsSupported(SSE4_1)) { | |
5069 CpuFeatureScope scope2(masm(), SSE4_1); | |
5070 __ pextrd(result_reg, value_reg, 1); | |
5071 } else { | |
5072 XMMRegister xmm_scratch = double_scratch0(); | |
5073 __ pshufd(xmm_scratch, value_reg, 1); | |
5074 __ movd(result_reg, xmm_scratch); | |
5075 } | |
5076 } else { | |
5077 __ movd(result_reg, value_reg); | |
5078 } | |
5079 } | 5117 } |
5080 | 5118 |
5081 | 5119 |
5082 void LCodeGen::DoConstructDouble(LConstructDouble* instr) { | 5120 void LCodeGen::DoConstructDouble(LConstructDouble* instr) { |
5083 Register hi_reg = ToRegister(instr->hi()); | 5121 UNREACHABLE(); |
5084 Register lo_reg = ToRegister(instr->lo()); | |
5085 XMMRegister result_reg = ToDoubleRegister(instr->result()); | |
5086 | |
5087 if (CpuFeatures::IsSupported(SSE4_1)) { | |
5088 CpuFeatureScope scope2(masm(), SSE4_1); | |
5089 __ movd(result_reg, lo_reg); | |
5090 __ pinsrd(result_reg, hi_reg, 1); | |
5091 } else { | |
5092 XMMRegister xmm_scratch = double_scratch0(); | |
5093 __ movd(result_reg, hi_reg); | |
5094 __ psllq(result_reg, 32); | |
5095 __ movd(xmm_scratch, lo_reg); | |
5096 __ orps(result_reg, xmm_scratch); | |
5097 } | |
5098 } | 5122 } |
5099 | 5123 |
5100 | 5124 |
5101 void LCodeGen::DoAllocate(LAllocate* instr) { | 5125 void LCodeGen::DoAllocate(LAllocate* instr) { |
5102 class DeferredAllocate V8_FINAL : public LDeferredCode { | 5126 class DeferredAllocate V8_FINAL : public LDeferredCode { |
5103 public: | 5127 public: |
5104 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) | 5128 DeferredAllocate(LCodeGen* codegen, |
5105 : LDeferredCode(codegen), instr_(instr) { } | 5129 LAllocate* instr, |
| 5130 const X87Stack& x87_stack) |
| 5131 : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
5106 virtual void Generate() V8_OVERRIDE { | 5132 virtual void Generate() V8_OVERRIDE { |
5107 codegen()->DoDeferredAllocate(instr_); | 5133 codegen()->DoDeferredAllocate(instr_); |
5108 } | 5134 } |
5109 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 5135 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
5110 private: | 5136 private: |
5111 LAllocate* instr_; | 5137 LAllocate* instr_; |
5112 }; | 5138 }; |
5113 | 5139 |
5114 DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr); | 5140 DeferredAllocate* deferred = |
| 5141 new(zone()) DeferredAllocate(this, instr, x87_stack_); |
5115 | 5142 |
5116 Register result = ToRegister(instr->result()); | 5143 Register result = ToRegister(instr->result()); |
5117 Register temp = ToRegister(instr->temp()); | 5144 Register temp = ToRegister(instr->temp()); |
5118 | 5145 |
5119 // Allocate memory for the object. | 5146 // Allocate memory for the object. |
5120 AllocationFlags flags = TAG_OBJECT; | 5147 AllocationFlags flags = TAG_OBJECT; |
5121 if (instr->hydrogen()->MustAllocateDoubleAligned()) { | 5148 if (instr->hydrogen()->MustAllocateDoubleAligned()) { |
5122 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); | 5149 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); |
5123 } | 5150 } |
5124 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { | 5151 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { |
(...skipping 332 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5457 | 5484 |
5458 | 5485 |
5459 void LCodeGen::DoDummyUse(LDummyUse* instr) { | 5486 void LCodeGen::DoDummyUse(LDummyUse* instr) { |
5460 // Nothing to see here, move on! | 5487 // Nothing to see here, move on! |
5461 } | 5488 } |
5462 | 5489 |
5463 | 5490 |
5464 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { | 5491 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { |
5465 PushSafepointRegistersScope scope(this); | 5492 PushSafepointRegistersScope scope(this); |
5466 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); | 5493 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); |
5467 __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard); | 5494 __ CallRuntime(Runtime::kHiddenStackGuard); |
5468 RecordSafepointWithLazyDeopt( | 5495 RecordSafepointWithLazyDeopt( |
5469 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | 5496 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
5470 ASSERT(instr->HasEnvironment()); | 5497 ASSERT(instr->HasEnvironment()); |
5471 LEnvironment* env = instr->environment(); | 5498 LEnvironment* env = instr->environment(); |
5472 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); | 5499 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
5473 } | 5500 } |
5474 | 5501 |
5475 | 5502 |
5476 void LCodeGen::DoStackCheck(LStackCheck* instr) { | 5503 void LCodeGen::DoStackCheck(LStackCheck* instr) { |
5477 class DeferredStackCheck V8_FINAL : public LDeferredCode { | 5504 class DeferredStackCheck V8_FINAL : public LDeferredCode { |
5478 public: | 5505 public: |
5479 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) | 5506 DeferredStackCheck(LCodeGen* codegen, |
5480 : LDeferredCode(codegen), instr_(instr) { } | 5507 LStackCheck* instr, |
| 5508 const X87Stack& x87_stack) |
| 5509 : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
5481 virtual void Generate() V8_OVERRIDE { | 5510 virtual void Generate() V8_OVERRIDE { |
5482 codegen()->DoDeferredStackCheck(instr_); | 5511 codegen()->DoDeferredStackCheck(instr_); |
5483 } | 5512 } |
5484 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 5513 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
5485 private: | 5514 private: |
5486 LStackCheck* instr_; | 5515 LStackCheck* instr_; |
5487 }; | 5516 }; |
5488 | 5517 |
5489 ASSERT(instr->HasEnvironment()); | 5518 ASSERT(instr->HasEnvironment()); |
5490 LEnvironment* env = instr->environment(); | 5519 LEnvironment* env = instr->environment(); |
(...skipping 10 matching lines...) Expand all Loading... |
5501 ASSERT(instr->context()->IsRegister()); | 5530 ASSERT(instr->context()->IsRegister()); |
5502 ASSERT(ToRegister(instr->context()).is(esi)); | 5531 ASSERT(ToRegister(instr->context()).is(esi)); |
5503 CallCode(isolate()->builtins()->StackCheck(), | 5532 CallCode(isolate()->builtins()->StackCheck(), |
5504 RelocInfo::CODE_TARGET, | 5533 RelocInfo::CODE_TARGET, |
5505 instr); | 5534 instr); |
5506 __ bind(&done); | 5535 __ bind(&done); |
5507 } else { | 5536 } else { |
5508 ASSERT(instr->hydrogen()->is_backwards_branch()); | 5537 ASSERT(instr->hydrogen()->is_backwards_branch()); |
5509 // Perform stack overflow check if this goto needs it before jumping. | 5538 // Perform stack overflow check if this goto needs it before jumping. |
5510 DeferredStackCheck* deferred_stack_check = | 5539 DeferredStackCheck* deferred_stack_check = |
5511 new(zone()) DeferredStackCheck(this, instr); | 5540 new(zone()) DeferredStackCheck(this, instr, x87_stack_); |
5512 ExternalReference stack_limit = | 5541 ExternalReference stack_limit = |
5513 ExternalReference::address_of_stack_limit(isolate()); | 5542 ExternalReference::address_of_stack_limit(isolate()); |
5514 __ cmp(esp, Operand::StaticVariable(stack_limit)); | 5543 __ cmp(esp, Operand::StaticVariable(stack_limit)); |
5515 __ j(below, deferred_stack_check->entry()); | 5544 __ j(below, deferred_stack_check->entry()); |
5516 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); | 5545 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); |
5517 __ bind(instr->done_label()); | 5546 __ bind(instr->done_label()); |
5518 deferred_stack_check->SetExit(instr->done_label()); | 5547 deferred_stack_check->SetExit(instr->done_label()); |
5519 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); | 5548 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); |
5520 // Don't record a deoptimization index for the safepoint here. | 5549 // Don't record a deoptimization index for the safepoint here. |
5521 // This will be done explicitly when emitting call and the safepoint in | 5550 // This will be done explicitly when emitting call and the safepoint in |
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5602 } | 5631 } |
5603 | 5632 |
5604 | 5633 |
5605 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, | 5634 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, |
5606 Register object, | 5635 Register object, |
5607 Register index) { | 5636 Register index) { |
5608 PushSafepointRegistersScope scope(this); | 5637 PushSafepointRegistersScope scope(this); |
5609 __ push(object); | 5638 __ push(object); |
5610 __ push(index); | 5639 __ push(index); |
5611 __ xor_(esi, esi); | 5640 __ xor_(esi, esi); |
5612 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); | 5641 __ CallRuntime(Runtime::kLoadMutableDouble); |
5613 RecordSafepointWithRegisters( | 5642 RecordSafepointWithRegisters( |
5614 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); | 5643 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); |
5615 __ StoreToSafepointRegisterSlot(object, eax); | 5644 __ StoreToSafepointRegisterSlot(object, eax); |
5616 } | 5645 } |
5617 | 5646 |
5618 | 5647 |
5619 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { | 5648 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { |
5620 class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode { | 5649 class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode { |
5621 public: | 5650 public: |
5622 DeferredLoadMutableDouble(LCodeGen* codegen, | 5651 DeferredLoadMutableDouble(LCodeGen* codegen, |
5623 LLoadFieldByIndex* instr, | 5652 LLoadFieldByIndex* instr, |
5624 Register object, | 5653 Register object, |
5625 Register index) | 5654 Register index, |
5626 : LDeferredCode(codegen), | 5655 const X87Stack& x87_stack) |
| 5656 : LDeferredCode(codegen, x87_stack), |
5627 instr_(instr), | 5657 instr_(instr), |
5628 object_(object), | 5658 object_(object), |
5629 index_(index) { | 5659 index_(index) { |
5630 } | 5660 } |
5631 virtual void Generate() V8_OVERRIDE { | 5661 virtual void Generate() V8_OVERRIDE { |
5632 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_); | 5662 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_); |
5633 } | 5663 } |
5634 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 5664 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
5635 private: | 5665 private: |
5636 LLoadFieldByIndex* instr_; | 5666 LLoadFieldByIndex* instr_; |
5637 Register object_; | 5667 Register object_; |
5638 Register index_; | 5668 Register index_; |
5639 }; | 5669 }; |
5640 | 5670 |
5641 Register object = ToRegister(instr->object()); | 5671 Register object = ToRegister(instr->object()); |
5642 Register index = ToRegister(instr->index()); | 5672 Register index = ToRegister(instr->index()); |
5643 | 5673 |
5644 DeferredLoadMutableDouble* deferred; | 5674 DeferredLoadMutableDouble* deferred; |
5645 deferred = new(zone()) DeferredLoadMutableDouble( | 5675 deferred = new(zone()) DeferredLoadMutableDouble( |
5646 this, instr, object, index); | 5676 this, instr, object, index, x87_stack_); |
5647 | 5677 |
5648 Label out_of_object, done; | 5678 Label out_of_object, done; |
5649 __ test(index, Immediate(Smi::FromInt(1))); | 5679 __ test(index, Immediate(Smi::FromInt(1))); |
5650 __ j(not_zero, deferred->entry()); | 5680 __ j(not_zero, deferred->entry()); |
5651 | 5681 |
5652 __ sar(index, 1); | 5682 __ sar(index, 1); |
5653 | 5683 |
5654 __ cmp(index, Immediate(0)); | 5684 __ cmp(index, Immediate(0)); |
5655 __ j(less, &out_of_object, Label::kNear); | 5685 __ j(less, &out_of_object, Label::kNear); |
5656 __ mov(object, FieldOperand(object, | 5686 __ mov(object, FieldOperand(object, |
(...skipping 12 matching lines...) Expand all Loading... |
5669 FixedArray::kHeaderSize - kPointerSize)); | 5699 FixedArray::kHeaderSize - kPointerSize)); |
5670 __ bind(deferred->exit()); | 5700 __ bind(deferred->exit()); |
5671 __ bind(&done); | 5701 __ bind(&done); |
5672 } | 5702 } |
5673 | 5703 |
5674 | 5704 |
5675 #undef __ | 5705 #undef __ |
5676 | 5706 |
5677 } } // namespace v8::internal | 5707 } } // namespace v8::internal |
5678 | 5708 |
5679 #endif // V8_TARGET_ARCH_IA32 | 5709 #endif // V8_TARGET_ARCH_X87 |
OLD | NEW |