Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(266)

Side by Side Diff: src/ia32/lithium-codegen-ia32.cc

Issue 13426006: Improvements for x87 stack handling (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Improvements Created 7 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
95 GenerateDeferredCode() && 95 GenerateDeferredCode() &&
96 GenerateJumpTable() && 96 GenerateJumpTable() &&
97 GenerateSafepointTable(); 97 GenerateSafepointTable();
98 } 98 }
99 99
100 100
101 void LCodeGen::FinishCode(Handle<Code> code) { 101 void LCodeGen::FinishCode(Handle<Code> code) {
102 ASSERT(is_done()); 102 ASSERT(is_done());
103 code->set_stack_slots(GetStackSlotCount()); 103 code->set_stack_slots(GetStackSlotCount());
104 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); 104 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
105 if (FLAG_weak_embedded_maps_in_optimized_code) { 105 if (FLAG_weak_embedded_maps_in_optimized_code &&
106 code->kind() == Code::OPTIMIZED_FUNCTION) {
mvstanton 2013/04/08 16:10:14 I don't believe this is needed for this CL.
106 RegisterDependentCodeForEmbeddedMaps(code); 107 RegisterDependentCodeForEmbeddedMaps(code);
107 } 108 }
108 PopulateDeoptimizationData(code); 109 PopulateDeoptimizationData(code);
109 if (!info()->IsStub()) { 110 if (!info()->IsStub()) {
110 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code); 111 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
111 } 112 }
112 for (int i = 0 ; i < prototype_maps_.length(); i++) { 113 for (int i = 0 ; i < prototype_maps_.length(); i++) {
113 prototype_maps_.at(i)->AddDependentCode( 114 prototype_maps_.at(i)->AddDependentCode(
114 DependentCode::kPrototypeCheckGroup, code); 115 DependentCode::kPrototypeCheckGroup, code);
115 } 116 }
(...skipping 243 matching lines...) Expand 10 before | Expand all | Expand 10 after
359 changed_value->id(), changed_value->Mnemonic(), 360 changed_value->id(), changed_value->Mnemonic(),
360 use_id, use_mnemo); 361 use_id, use_mnemo);
361 } else { 362 } else {
362 Comment(";;; @%d: %s. <#%d>", current_instruction_, 363 Comment(";;; @%d: %s. <#%d>", current_instruction_,
363 instr->Mnemonic(), hydrogen->id()); 364 instr->Mnemonic(), hydrogen->id());
364 } 365 }
365 } else { 366 } else {
366 Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); 367 Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
367 } 368 }
368 } 369 }
370
369 instr->CompileToNative(this); 371 instr->CompileToNative(this);
372
373 if (!CpuFeatures::IsSupported(SSE2)) {
374 if (instr->ClobbersDoubleRegisters()) {
danno 2013/04/08 12:57:27 I am not sure that is safe to do _after_ the Compi
mvstanton 2013/04/08 16:10:14 Thanks for your extensive help with this brain tea
375 if (x87_stack_depth_ > 0) {
376 PopX87();
377 }
378 }
379
380 ASSERT(!instr->HasDoubleRegisterResult() || x87_stack_depth_ == 1);
381
382 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
383 // Make sure the floating point stack is either empty or has one item,
384 // the result value of the instruction.
385 int tos = (x87_stack_depth_ > 0) ? 7 : 0;
386 const int kTopMask = 0x3800;
387 __ push(eax);
388 __ fwait();
389 __ fnstsw_ax();
390 __ and_(eax, kTopMask);
391 __ shr(eax, 11);
392 __ cmp(eax, Immediate(tos));
393 Label all_ok;
394 __ j(equal, &all_ok);
395 __ Check(equal, "FPU Top is not zero after instruction");
396 __ bind(&all_ok);
397 __ fnclex();
398 __ pop(eax);
399 }
400 }
370 } 401 }
371 } 402 }
372 EnsureSpaceForLazyDeopt(); 403 EnsureSpaceForLazyDeopt();
373 return !is_aborted(); 404 return !is_aborted();
374 } 405 }
375 406
376 407
377 bool LCodeGen::GenerateJumpTable() { 408 bool LCodeGen::GenerateJumpTable() {
378 Label needs_frame_not_call; 409 Label needs_frame_not_call;
379 Label needs_frame_is_call; 410 Label needs_frame_is_call;
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
514 XMMRegister LCodeGen::ToDoubleRegister(int index) const { 545 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
515 return XMMRegister::FromAllocationIndex(index); 546 return XMMRegister::FromAllocationIndex(index);
516 } 547 }
517 548
518 549
519 bool LCodeGen::IsX87TopOfStack(LOperand* op) const { 550 bool LCodeGen::IsX87TopOfStack(LOperand* op) const {
520 return op->IsDoubleRegister(); 551 return op->IsDoubleRegister();
521 } 552 }
522 553
523 554
555 void LCodeGen::ReadX87Operand(Operand dst) {
556 ASSERT(x87_stack_depth_ == 1);
557 __ fst_d(dst);
558 }
559
560
561 void LCodeGen::PushX87Operand(Operand src) {
danno 2013/04/08 12:57:27 Maybe make this PushX87DoubleOperand?
mvstanton 2013/04/08 16:10:14 Done.
562 ASSERT(x87_stack_depth_ == 0);
563 x87_stack_depth_++;
564 __ fld_d(src);
565 }
566
567
568 void LCodeGen::PushX87FloatOperand(Operand src) {
569 ASSERT(x87_stack_depth_ == 0);
570 x87_stack_depth_++;
571 __ fld_s(src);
572 }
573
574
575 void LCodeGen::PopX87() {
576 ASSERT(x87_stack_depth_ == 1);
577 x87_stack_depth_--;
578 __ fstp(0);
579 }
580
581
582 void LCodeGen::MarkReturnX87Result() {
danno 2013/04/08 12:57:27 Maybe CurrentInstructionReturnsX87Result?
mvstanton 2013/04/08 16:10:14 Done.
583 ASSERT(x87_stack_depth_ <= 1);
584 if (x87_stack_depth_ == 0) {
585 x87_stack_depth_ = 1;
586 }
587 }
588
524 Register LCodeGen::ToRegister(LOperand* op) const { 589 Register LCodeGen::ToRegister(LOperand* op) const {
525 ASSERT(op->IsRegister()); 590 ASSERT(op->IsRegister());
526 return ToRegister(op->index()); 591 return ToRegister(op->index());
527 } 592 }
528 593
529 594
530 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { 595 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
531 ASSERT(op->IsDoubleRegister()); 596 ASSERT(op->IsDoubleRegister());
532 return ToDoubleRegister(op->index()); 597 return ToDoubleRegister(op->index());
533 } 598 }
(...skipping 305 matching lines...) Expand 10 before | Expand all | Expand 10 after
839 translation.index(), 904 translation.index(),
840 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); 905 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
841 deoptimizations_.Add(environment, zone()); 906 deoptimizations_.Add(environment, zone());
842 } 907 }
843 } 908 }
844 909
845 910
846 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { 911 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
847 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 912 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
848 ASSERT(environment->HasBeenRegistered()); 913 ASSERT(environment->HasBeenRegistered());
914 // It's an error to deoptimize with the x87 fp stack in use.
915 ASSERT(x87_stack_depth_ == 0);
849 int id = environment->deoptimization_index(); 916 int id = environment->deoptimization_index();
850 ASSERT(info()->IsOptimizing() || info()->IsStub()); 917 ASSERT(info()->IsOptimizing() || info()->IsStub());
851 Deoptimizer::BailoutType bailout_type = info()->IsStub() 918 Deoptimizer::BailoutType bailout_type = info()->IsStub()
852 ? Deoptimizer::LAZY 919 ? Deoptimizer::LAZY
853 : Deoptimizer::EAGER; 920 : Deoptimizer::EAGER;
854 Address entry = 921 Address entry =
855 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); 922 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
856 if (entry == NULL) { 923 if (entry == NULL) {
857 Abort("bailout was not prepared"); 924 Abort("bailout was not prepared");
858 return; 925 return;
(...skipping 823 matching lines...) Expand 10 before | Expand all | Expand 10 after
1682 } 1749 }
1683 1750
1684 1751
1685 void LCodeGen::DoConstantI(LConstantI* instr) { 1752 void LCodeGen::DoConstantI(LConstantI* instr) {
1686 ASSERT(instr->result()->IsRegister()); 1753 ASSERT(instr->result()->IsRegister());
1687 __ Set(ToRegister(instr->result()), Immediate(instr->value())); 1754 __ Set(ToRegister(instr->result()), Immediate(instr->value()));
1688 } 1755 }
1689 1756
1690 1757
1691 void LCodeGen::DoConstantD(LConstantD* instr) { 1758 void LCodeGen::DoConstantD(LConstantD* instr) {
1692 ASSERT(instr->result()->IsDoubleRegister());
1693 XMMRegister res = ToDoubleRegister(instr->result());
1694 double v = instr->value(); 1759 double v = instr->value();
1695 // Use xor to produce +0.0 in a fast and compact way, but avoid to 1760 uint64_t int_val = BitCast<uint64_t, double>(v);
1696 // do so if the constant is -0.0. 1761 int32_t lower = static_cast<int32_t>(int_val);
1697 if (BitCast<uint64_t, double>(v) == 0) { 1762 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
1698 __ xorps(res, res); 1763
1764 if (!CpuFeatures::IsSafeForSnapshot(SSE2)) {
1765 __ push(Immediate(lower));
1766 __ push(Immediate(upper));
1767 PushX87Operand(Operand(esp, 0));
1768 __ add(Operand(esp), Immediate(kDoubleSize));
1769 MarkReturnX87Result();
1699 } else { 1770 } else {
1700 Register temp = ToRegister(instr->temp()); 1771 CpuFeatureScope scope1(masm(), SSE2);
1701 uint64_t int_val = BitCast<uint64_t, double>(v); 1772 ASSERT(instr->result()->IsDoubleRegister());
1702 int32_t lower = static_cast<int32_t>(int_val); 1773 XMMRegister res = ToDoubleRegister(instr->result());
1703 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); 1774 if (int_val == 0) {
1704 if (CpuFeatures::IsSupported(SSE4_1)) { 1775 __ xorps(res, res);
1705 CpuFeatureScope scope1(masm(), SSE2); 1776 } else {
1706 CpuFeatureScope scope2(masm(), SSE4_1); 1777 Register temp = ToRegister(instr->temp());
1707 if (lower != 0) { 1778 if (CpuFeatures::IsSupported(SSE4_1)) {
1708 __ Set(temp, Immediate(lower)); 1779 CpuFeatureScope scope2(masm(), SSE4_1);
1780 if (lower != 0) {
1781 __ Set(temp, Immediate(lower));
1782 __ movd(res, Operand(temp));
1783 __ Set(temp, Immediate(upper));
1784 __ pinsrd(res, Operand(temp), 1);
1785 } else {
1786 __ xorps(res, res);
1787 __ Set(temp, Immediate(upper));
1788 __ pinsrd(res, Operand(temp), 1);
1789 }
1790 } else {
1791 __ Set(temp, Immediate(upper));
1709 __ movd(res, Operand(temp)); 1792 __ movd(res, Operand(temp));
1710 __ Set(temp, Immediate(upper)); 1793 __ psllq(res, 32);
1711 __ pinsrd(res, Operand(temp), 1); 1794 if (lower != 0) {
1712 } else { 1795 __ Set(temp, Immediate(lower));
1713 __ xorps(res, res); 1796 __ movd(xmm0, Operand(temp));
1714 __ Set(temp, Immediate(upper)); 1797 __ por(res, xmm0);
1715 __ pinsrd(res, Operand(temp), 1); 1798 }
1716 }
1717 } else {
1718 CpuFeatureScope scope(masm(), SSE2);
1719 __ Set(temp, Immediate(upper));
1720 __ movd(res, Operand(temp));
1721 __ psllq(res, 32);
1722 if (lower != 0) {
1723 __ Set(temp, Immediate(lower));
1724 __ movd(xmm0, Operand(temp));
1725 __ por(res, xmm0);
1726 } 1799 }
1727 } 1800 }
1728 } 1801 }
1729 } 1802 }
1730 1803
1731 1804
1732 void LCodeGen::DoConstantT(LConstantT* instr) { 1805 void LCodeGen::DoConstantT(LConstantT* instr) {
1733 Register reg = ToRegister(instr->result()); 1806 Register reg = ToRegister(instr->result());
1734 Handle<Object> handle = instr->value(); 1807 Handle<Object> handle = instr->value();
1735 if (handle->IsHeapObject()) { 1808 if (handle->IsHeapObject()) {
(...skipping 971 matching lines...) Expand 10 before | Expand all | Expand 10 after
2707 int parameter_count = ToInteger32(instr->constant_parameter_count()); 2780 int parameter_count = ToInteger32(instr->constant_parameter_count());
2708 if (dynamic_frame_alignment && FLAG_debug_code) { 2781 if (dynamic_frame_alignment && FLAG_debug_code) {
2709 __ cmp(Operand(esp, 2782 __ cmp(Operand(esp,
2710 (parameter_count + extra_value_count) * kPointerSize), 2783 (parameter_count + extra_value_count) * kPointerSize),
2711 Immediate(kAlignmentZapValue)); 2784 Immediate(kAlignmentZapValue));
2712 __ Assert(equal, "expected alignment marker"); 2785 __ Assert(equal, "expected alignment marker");
2713 } 2786 }
2714 __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx); 2787 __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
2715 } else { 2788 } else {
2716 Register reg = ToRegister(instr->parameter_count()); 2789 Register reg = ToRegister(instr->parameter_count());
2790 __ SmiUntag(reg); // it is a smi
danno 2013/04/08 12:57:27 Is this an unrelated bug?
mvstanton 2013/04/08 16:10:14 Yep, removed.
2717 Register return_addr_reg = reg.is(ecx) ? ebx : ecx; 2791 Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
2718 if (dynamic_frame_alignment && FLAG_debug_code) { 2792 if (dynamic_frame_alignment && FLAG_debug_code) {
2719 ASSERT(extra_value_count == 2); 2793 ASSERT(extra_value_count == 2);
2720 __ cmp(Operand(esp, reg, times_pointer_size, 2794 __ cmp(Operand(esp, reg, times_pointer_size,
2721 extra_value_count * kPointerSize), 2795 extra_value_count * kPointerSize),
2722 Immediate(kAlignmentZapValue)); 2796 Immediate(kAlignmentZapValue));
2723 __ Assert(equal, "expected alignment marker"); 2797 __ Assert(equal, "expected alignment marker");
2724 } 2798 }
2725 2799
2726 // emit code to restore stack based on instr->parameter_count() 2800 // emit code to restore stack based on instr->parameter_count()
(...skipping 392 matching lines...) Expand 10 before | Expand all | Expand 10 after
3119 LLoadExternalArrayPointer* instr) { 3193 LLoadExternalArrayPointer* instr) {
3120 Register result = ToRegister(instr->result()); 3194 Register result = ToRegister(instr->result());
3121 Register input = ToRegister(instr->object()); 3195 Register input = ToRegister(instr->object());
3122 __ mov(result, FieldOperand(input, 3196 __ mov(result, FieldOperand(input,
3123 ExternalArray::kExternalPointerOffset)); 3197 ExternalArray::kExternalPointerOffset));
3124 } 3198 }
3125 3199
3126 3200
3127 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { 3201 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3128 Register arguments = ToRegister(instr->arguments()); 3202 Register arguments = ToRegister(instr->arguments());
3129 Register length = ToRegister(instr->length());
3130 Operand index = ToOperand(instr->index());
3131 Register result = ToRegister(instr->result()); 3203 Register result = ToRegister(instr->result());
3132 // There are two words between the frame pointer and the last argument. 3204 if (instr->length()->IsConstantOperand() &&
3133 // Subtracting from length accounts for one of them add one more. 3205 instr->index()->IsConstantOperand()) {
3134 __ sub(length, index); 3206 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3135 __ mov(result, Operand(arguments, length, times_4, kPointerSize)); 3207 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3208 int index = (const_length - const_index) + 1;
3209 __ mov(result, Operand(arguments, index * kPointerSize));
3210 } else {
3211 Register length = ToRegister(instr->length());
3212 Operand index = ToOperand(instr->index());
3213 // There are two words between the frame pointer and the last argument.
3214 // Subtracting from length accounts for one of them add one more.
3215 __ sub(length, index);
3216 __ mov(result, Operand(arguments, length, times_4, kPointerSize));
3217 }
danno 2013/04/08 12:57:27 Unrelated changes?
mvstanton 2013/04/08 16:10:14 Yep, removed.
3136 } 3218 }
3137 3219
3138 3220
3139 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { 3221 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3140 ElementsKind elements_kind = instr->elements_kind(); 3222 ElementsKind elements_kind = instr->elements_kind();
3141 LOperand* key = instr->key(); 3223 LOperand* key = instr->key();
3142 if (!key->IsConstantOperand() && 3224 if (!key->IsConstantOperand() &&
3143 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(), 3225 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
3144 elements_kind)) { 3226 elements_kind)) {
3145 __ SmiUntag(ToRegister(key)); 3227 __ SmiUntag(ToRegister(key));
3146 } 3228 }
3147 Operand operand(BuildFastArrayOperand( 3229 Operand operand(BuildFastArrayOperand(
3148 instr->elements(), 3230 instr->elements(),
3149 key, 3231 key,
3150 instr->hydrogen()->key()->representation(), 3232 instr->hydrogen()->key()->representation(),
3151 elements_kind, 3233 elements_kind,
3152 0, 3234 0,
3153 instr->additional_index())); 3235 instr->additional_index()));
3154 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { 3236 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3155 if (CpuFeatures::IsSupported(SSE2)) { 3237 if (CpuFeatures::IsSupported(SSE2)) {
3156 CpuFeatureScope scope(masm(), SSE2); 3238 CpuFeatureScope scope(masm(), SSE2);
3157 XMMRegister result(ToDoubleRegister(instr->result())); 3239 XMMRegister result(ToDoubleRegister(instr->result()));
3158 __ movss(result, operand); 3240 __ movss(result, operand);
3159 __ cvtss2sd(result, result); 3241 __ cvtss2sd(result, result);
3160 } else { 3242 } else {
3161 __ fld_s(operand); 3243 PushX87FloatOperand(operand);
3162 HandleX87FPReturnValue(instr); 3244 MarkReturnX87Result();
3163 } 3245 }
3164 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { 3246 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3165 if (CpuFeatures::IsSupported(SSE2)) { 3247 if (CpuFeatures::IsSupported(SSE2)) {
3166 CpuFeatureScope scope(masm(), SSE2); 3248 CpuFeatureScope scope(masm(), SSE2);
3167 __ movdbl(ToDoubleRegister(instr->result()), operand); 3249 __ movdbl(ToDoubleRegister(instr->result()), operand);
3168 } else { 3250 } else {
3169 __ fld_d(operand); 3251 PushX87Operand(operand);
3170 HandleX87FPReturnValue(instr); 3252 MarkReturnX87Result();
3171 } 3253 }
3172 } else { 3254 } else {
3173 Register result(ToRegister(instr->result())); 3255 Register result(ToRegister(instr->result()));
3174 switch (elements_kind) { 3256 switch (elements_kind) {
3175 case EXTERNAL_BYTE_ELEMENTS: 3257 case EXTERNAL_BYTE_ELEMENTS:
3176 __ movsx_b(result, operand); 3258 __ movsx_b(result, operand);
3177 break; 3259 break;
3178 case EXTERNAL_PIXEL_ELEMENTS: 3260 case EXTERNAL_PIXEL_ELEMENTS:
3179 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: 3261 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3180 __ movzx_b(result, operand); 3262 __ movzx_b(result, operand);
(...skipping 24 matching lines...) Expand all
3205 case FAST_HOLEY_DOUBLE_ELEMENTS: 3287 case FAST_HOLEY_DOUBLE_ELEMENTS:
3206 case DICTIONARY_ELEMENTS: 3288 case DICTIONARY_ELEMENTS:
3207 case NON_STRICT_ARGUMENTS_ELEMENTS: 3289 case NON_STRICT_ARGUMENTS_ELEMENTS:
3208 UNREACHABLE(); 3290 UNREACHABLE();
3209 break; 3291 break;
3210 } 3292 }
3211 } 3293 }
3212 } 3294 }
3213 3295
3214 3296
3215 void LCodeGen::HandleX87FPReturnValue(LInstruction* instr) {
3216 if (IsX87TopOfStack(instr->result())) {
3217 // Return value is already on stack. If the value has no uses, then
3218 // pop it off the FP stack. Otherwise, make sure that there are enough
3219 // copies of the value on the stack to feed all of the usages, e.g.
3220 // when the following instruction uses the return value in multiple
3221 // inputs.
3222 int count = instr->hydrogen_value()->UseCount();
3223 if (count == 0) {
3224 __ fstp(0);
3225 } else {
3226 count--;
3227 ASSERT(count <= 7);
3228 while (count-- > 0) {
3229 __ fld(0);
3230 }
3231 }
3232 } else {
3233 __ fstp_d(ToOperand(instr->result()));
3234 }
3235 }
3236
3237
3238 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { 3297 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3239 if (instr->hydrogen()->RequiresHoleCheck()) { 3298 if (instr->hydrogen()->RequiresHoleCheck()) {
3240 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + 3299 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
3241 sizeof(kHoleNanLower32); 3300 sizeof(kHoleNanLower32);
3242 Operand hole_check_operand = BuildFastArrayOperand( 3301 Operand hole_check_operand = BuildFastArrayOperand(
3243 instr->elements(), instr->key(), 3302 instr->elements(), instr->key(),
3244 instr->hydrogen()->key()->representation(), 3303 instr->hydrogen()->key()->representation(),
3245 FAST_DOUBLE_ELEMENTS, 3304 FAST_DOUBLE_ELEMENTS,
3246 offset, 3305 offset,
3247 instr->additional_index()); 3306 instr->additional_index());
3248 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); 3307 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
3249 DeoptimizeIf(equal, instr->environment()); 3308 DeoptimizeIf(equal, instr->environment());
3250 } 3309 }
3251 3310
3252 Operand double_load_operand = BuildFastArrayOperand( 3311 Operand double_load_operand = BuildFastArrayOperand(
3253 instr->elements(), 3312 instr->elements(),
3254 instr->key(), 3313 instr->key(),
3255 instr->hydrogen()->key()->representation(), 3314 instr->hydrogen()->key()->representation(),
3256 FAST_DOUBLE_ELEMENTS, 3315 FAST_DOUBLE_ELEMENTS,
3257 FixedDoubleArray::kHeaderSize - kHeapObjectTag, 3316 FixedDoubleArray::kHeaderSize - kHeapObjectTag,
3258 instr->additional_index()); 3317 instr->additional_index());
3259 if (CpuFeatures::IsSupported(SSE2)) { 3318 if (CpuFeatures::IsSupported(SSE2)) {
3260 CpuFeatureScope scope(masm(), SSE2); 3319 CpuFeatureScope scope(masm(), SSE2);
3261 XMMRegister result = ToDoubleRegister(instr->result()); 3320 XMMRegister result = ToDoubleRegister(instr->result());
3262 __ movdbl(result, double_load_operand); 3321 __ movdbl(result, double_load_operand);
3263 } else { 3322 } else {
3264 __ fld_d(double_load_operand); 3323 PushX87Operand(double_load_operand);
3265 HandleX87FPReturnValue(instr); 3324 MarkReturnX87Result();
3266 } 3325 }
3267 } 3326 }
3268 3327
3269 3328
3270 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { 3329 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3271 Register result = ToRegister(instr->result()); 3330 Register result = ToRegister(instr->result());
3272 3331
3273 // Load the result. 3332 // Load the result.
3274 __ mov(result, 3333 __ mov(result,
3275 BuildFastArrayOperand(instr->elements(), 3334 BuildFastArrayOperand(instr->elements(),
(...skipping 921 matching lines...) Expand 10 before | Expand all | Expand 10 after
4197 4256
4198 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { 4257 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4199 Register result = ToRegister(instr->result()); 4258 Register result = ToRegister(instr->result());
4200 Register base = ToRegister(instr->base_object()); 4259 Register base = ToRegister(instr->base_object());
4201 __ lea(result, Operand(base, instr->offset())); 4260 __ lea(result, Operand(base, instr->offset()));
4202 } 4261 }
4203 4262
4204 4263
4205 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { 4264 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4206 Register object = ToRegister(instr->object()); 4265 Register object = ToRegister(instr->object());
4207 Register value = ToRegister(instr->value());
4208 int offset = instr->offset(); 4266 int offset = instr->offset();
4209 4267
4210 if (!instr->transition().is_null()) { 4268 if (!instr->transition().is_null()) {
4211 if (!instr->hydrogen()->NeedsWriteBarrierForMap()) { 4269 if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
4212 __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition()); 4270 __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
4213 } else { 4271 } else {
4214 Register temp = ToRegister(instr->temp()); 4272 Register temp = ToRegister(instr->temp());
4215 Register temp_map = ToRegister(instr->temp_map()); 4273 Register temp_map = ToRegister(instr->temp_map());
4216 __ mov(temp_map, instr->transition()); 4274 __ mov(temp_map, instr->transition());
4217 __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map); 4275 __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
4218 // Update the write barrier for the map field. 4276 // Update the write barrier for the map field.
4219 __ RecordWriteField(object, 4277 __ RecordWriteField(object,
4220 HeapObject::kMapOffset, 4278 HeapObject::kMapOffset,
4221 temp_map, 4279 temp_map,
4222 temp, 4280 temp,
4223 GetSaveFPRegsMode(), 4281 GetSaveFPRegsMode(),
4224 OMIT_REMEMBERED_SET, 4282 OMIT_REMEMBERED_SET,
4225 OMIT_SMI_CHECK); 4283 OMIT_SMI_CHECK);
4226 } 4284 }
4227 } 4285 }
4228 4286
4229 // Do the store. 4287 // Do the store.
4230 HType type = instr->hydrogen()->value()->type(); 4288 HType type = instr->hydrogen()->value()->type();
4231 SmiCheck check_needed = 4289 SmiCheck check_needed =
4232 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 4290 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4233 if (instr->is_in_object()) { 4291 if (instr->is_in_object()) {
4234 __ mov(FieldOperand(object, offset), value); 4292 if (instr->value()->IsConstantOperand()) {
danno 2013/04/08 12:57:27 Since this has now gotten bigger, can you merge th
mvstanton 2013/04/08 16:10:14 All unrelated, removed!
4293 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4294 if (IsInteger32(operand_value)) {
4295 int const_value = ToInteger32(operand_value);
4296 __ mov(FieldOperand(object, offset), Immediate(const_value));
4297 } else {
4298 Handle<Object> handle_value = ToHandle(operand_value);
4299 __ mov(FieldOperand(object, offset), handle_value);
4300 }
4301 } else {
4302 __ mov(FieldOperand(object, offset), ToRegister(instr->value()));
4303 }
4304
4235 if (instr->hydrogen()->NeedsWriteBarrier()) { 4305 if (instr->hydrogen()->NeedsWriteBarrier()) {
4306 Register value = ToRegister(instr->value());
4236 Register temp = ToRegister(instr->temp()); 4307 Register temp = ToRegister(instr->temp());
4237 // Update the write barrier for the object for in-object properties. 4308 // Update the write barrier for the object for in-object properties.
4238 __ RecordWriteField(object, 4309 __ RecordWriteField(object,
4239 offset, 4310 offset,
4240 value, 4311 value,
4241 temp, 4312 temp,
4242 GetSaveFPRegsMode(), 4313 GetSaveFPRegsMode(),
4243 EMIT_REMEMBERED_SET, 4314 EMIT_REMEMBERED_SET,
4244 check_needed); 4315 check_needed);
4245 } 4316 }
4246 } else { 4317 } else {
4247 Register temp = ToRegister(instr->temp()); 4318 Register temp = ToRegister(instr->temp());
4248 __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset)); 4319 __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
4249 __ mov(FieldOperand(temp, offset), value); 4320
4321 if (instr->value()->IsConstantOperand()) {
4322 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4323 if (IsInteger32(operand_value)) {
4324 int const_value = ToInteger32(operand_value);
4325 __ mov(FieldOperand(temp, offset), Immediate(const_value));
4326 } else {
4327 Handle<Object> handle_value = ToHandle(operand_value);
4328 __ mov(FieldOperand(temp, offset), handle_value);
4329 }
4330 } else {
4331 __ mov(FieldOperand(temp, offset), ToRegister(instr->value()));
4332 }
4333
4250 if (instr->hydrogen()->NeedsWriteBarrier()) { 4334 if (instr->hydrogen()->NeedsWriteBarrier()) {
4335 Register value = ToRegister(instr->value());
4251 // Update the write barrier for the properties array. 4336 // Update the write barrier for the properties array.
4252 // object is used as a scratch register. 4337 // object is used as a scratch register.
4253 __ RecordWriteField(temp, 4338 __ RecordWriteField(temp,
4254 offset, 4339 offset,
4255 value, 4340 value,
4256 object, 4341 object,
4257 GetSaveFPRegsMode(), 4342 GetSaveFPRegsMode(),
4258 EMIT_REMEMBERED_SET, 4343 EMIT_REMEMBERED_SET,
4259 check_needed); 4344 check_needed);
4260 } 4345 }
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
4304 __ SmiUntag(ToRegister(key)); 4389 __ SmiUntag(ToRegister(key));
4305 } 4390 }
4306 Operand operand(BuildFastArrayOperand( 4391 Operand operand(BuildFastArrayOperand(
4307 instr->elements(), 4392 instr->elements(),
4308 key, 4393 key,
4309 instr->hydrogen()->key()->representation(), 4394 instr->hydrogen()->key()->representation(),
4310 elements_kind, 4395 elements_kind,
4311 0, 4396 0,
4312 instr->additional_index())); 4397 instr->additional_index()));
4313 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { 4398 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
4314 CpuFeatureScope scope(masm(), SSE2); 4399 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
4315 __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value())); 4400 CpuFeatureScope scope(masm(), SSE2);
4316 __ movss(operand, xmm0); 4401 __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
4402 __ movss(operand, xmm0);
4403 } else {
4404 __ fld(0);
4405 __ fstp_s(operand);
4406 }
4317 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { 4407 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
4318 CpuFeatureScope scope(masm(), SSE2); 4408 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
4319 __ movdbl(operand, ToDoubleRegister(instr->value())); 4409 CpuFeatureScope scope(masm(), SSE2);
4410 __ movdbl(operand, ToDoubleRegister(instr->value()));
4411 } else {
4412 __ fst_d(operand);
4413 }
4320 } else { 4414 } else {
4321 Register value = ToRegister(instr->value()); 4415 Register value = ToRegister(instr->value());
4322 switch (elements_kind) { 4416 switch (elements_kind) {
4323 case EXTERNAL_PIXEL_ELEMENTS: 4417 case EXTERNAL_PIXEL_ELEMENTS:
4324 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: 4418 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
4325 case EXTERNAL_BYTE_ELEMENTS: 4419 case EXTERNAL_BYTE_ELEMENTS:
4326 __ mov_b(operand, value); 4420 __ mov_b(operand, value);
4327 break; 4421 break;
4328 case EXTERNAL_SHORT_ELEMENTS: 4422 case EXTERNAL_SHORT_ELEMENTS:
4329 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: 4423 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
(...skipping 14 matching lines...) Expand all
4344 case DICTIONARY_ELEMENTS: 4438 case DICTIONARY_ELEMENTS:
4345 case NON_STRICT_ARGUMENTS_ELEMENTS: 4439 case NON_STRICT_ARGUMENTS_ELEMENTS:
4346 UNREACHABLE(); 4440 UNREACHABLE();
4347 break; 4441 break;
4348 } 4442 }
4349 } 4443 }
4350 } 4444 }
4351 4445
4352 4446
4353 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { 4447 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4354 CpuFeatureScope scope(masm(), SSE2); 4448 ExternalReference canonical_nan_reference =
4355 XMMRegister value = ToDoubleRegister(instr->value()); 4449 ExternalReference::address_of_canonical_non_hole_nan();
4356
4357 if (instr->NeedsCanonicalization()) {
4358 Label have_value;
4359
4360 __ ucomisd(value, value);
4361 __ j(parity_odd, &have_value); // NaN.
4362
4363 ExternalReference canonical_nan_reference =
4364 ExternalReference::address_of_canonical_non_hole_nan();
4365 __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
4366 __ bind(&have_value);
4367 }
4368
4369 Operand double_store_operand = BuildFastArrayOperand( 4450 Operand double_store_operand = BuildFastArrayOperand(
4370 instr->elements(), 4451 instr->elements(),
4371 instr->key(), 4452 instr->key(),
4372 instr->hydrogen()->key()->representation(), 4453 instr->hydrogen()->key()->representation(),
4373 FAST_DOUBLE_ELEMENTS, 4454 FAST_DOUBLE_ELEMENTS,
4374 FixedDoubleArray::kHeaderSize - kHeapObjectTag, 4455 FixedDoubleArray::kHeaderSize - kHeapObjectTag,
4375 instr->additional_index()); 4456 instr->additional_index());
4376 __ movdbl(double_store_operand, value); 4457
4458 if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
4459 CpuFeatureScope scope(masm(), SSE2);
4460 XMMRegister value = ToDoubleRegister(instr->value());
4461
4462 if (instr->NeedsCanonicalization()) {
4463 Label have_value;
4464
4465 __ ucomisd(value, value);
4466 __ j(parity_odd, &have_value); // NaN.
4467
4468 __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
4469 __ bind(&have_value);
4470 }
4471
4472 __ movdbl(double_store_operand, value);
4473 } else {
4474 // Can't use SSE2 in the serializer
4475 if (instr->hydrogen()->IsConstantHoleStore()) {
4476 // This means we should store the (double) hole. No floating point
4477 // registers required.
4478 double nan_double = FixedDoubleArray::hole_nan_as_double();
4479 uint64_t int_val = BitCast<uint64_t, double>(nan_double);
4480 int32_t lower = static_cast<int32_t>(int_val);
4481 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
4482
4483 __ mov(double_store_operand, Immediate(lower));
4484 Operand double_store_operand2 = BuildFastArrayOperand(
4485 instr->elements(),
4486 instr->key(),
4487 instr->hydrogen()->key()->representation(),
4488 FAST_DOUBLE_ELEMENTS,
4489 FixedDoubleArray::kHeaderSize - kHeapObjectTag + kPointerSize,
4490 instr->additional_index());
4491 __ mov(double_store_operand2, Immediate(upper));
4492 } else {
4493 Label no_special_nan_handling;
4494 ASSERT(x87_stack_depth_ > 0);
4495
4496 if (instr->NeedsCanonicalization()) {
4497 __ fld(0);
4498 __ fld(0);
4499 __ FCmp();
4500
4501 __ j(parity_odd, &no_special_nan_handling);
4502 __ sub(esp, Immediate(kDoubleSize));
4503 __ fst_d(MemOperand(esp, 0));
4504 __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
4505 Immediate(kHoleNanUpper32));
4506 __ add(esp, Immediate(kDoubleSize));
4507 Label canonicalize;
4508 __ j(not_equal, &canonicalize);
4509 __ jmp(&no_special_nan_handling);
4510 __ bind(&canonicalize);
4511 __ fstp(0);
4512 __ fld_d(Operand::StaticVariable(canonical_nan_reference));
4513 }
4514
4515 __ bind(&no_special_nan_handling);
4516 __ fst_d(double_store_operand);
4517 }
4518 }
4377 } 4519 }
4378 4520
4379 4521
4380 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { 4522 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4381 Register value = ToRegister(instr->value());
4382 Register elements = ToRegister(instr->elements()); 4523 Register elements = ToRegister(instr->elements());
4383 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; 4524 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4384 4525
4385 Operand operand = BuildFastArrayOperand( 4526 Operand operand = BuildFastArrayOperand(
4386 instr->elements(), 4527 instr->elements(),
4387 instr->key(), 4528 instr->key(),
4388 instr->hydrogen()->key()->representation(), 4529 instr->hydrogen()->key()->representation(),
4389 FAST_ELEMENTS, 4530 FAST_ELEMENTS,
4390 FixedArray::kHeaderSize - kHeapObjectTag, 4531 FixedArray::kHeaderSize - kHeapObjectTag,
4391 instr->additional_index()); 4532 instr->additional_index());
4392 __ mov(operand, value); 4533 if (instr->value()->IsRegister()) {
danno 2013/04/08 12:57:27 Is this change related to SSE2 at all?
mvstanton 2013/04/08 16:10:14 Nope, removed.
4534 __ mov(operand, ToRegister(instr->value()));
4535 } else {
4536 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4537 if (IsInteger32(operand_value)) {
4538 int const_value = ToInteger32(operand_value);
4539 __ mov(operand, Immediate(const_value));
4540 } else {
4541 Handle<Object> handle_value = ToHandle(operand_value);
4542 __ mov(operand, handle_value);
4543 }
4544 }
4393 4545
4394 if (instr->hydrogen()->NeedsWriteBarrier()) { 4546 if (instr->hydrogen()->NeedsWriteBarrier()) {
4547 ASSERT(instr->value()->IsRegister());
danno 2013/04/08 12:57:27 Is this change related to SSE2 at all?
mvstanton 2013/04/08 16:10:14 Nosir, removed.
4548 Register value = ToRegister(instr->value());
4395 ASSERT(!instr->key()->IsConstantOperand()); 4549 ASSERT(!instr->key()->IsConstantOperand());
4396 HType type = instr->hydrogen()->value()->type(); 4550 HType type = instr->hydrogen()->value()->type();
4397 SmiCheck check_needed = 4551 SmiCheck check_needed =
4398 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 4552 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4399 // Compute address of modified element and store it into key register. 4553 // Compute address of modified element and store it into key register.
4400 __ lea(key, operand); 4554 __ lea(key, operand);
4401 __ RecordWrite(elements, 4555 __ RecordWrite(elements,
4402 key, 4556 key,
4403 value, 4557 value,
4404 GetSaveFPRegsMode(), 4558 GetSaveFPRegsMode(),
(...skipping 393 matching lines...) Expand 10 before | Expand all | Expand 10 after
4798 4952
4799 Label no_special_nan_handling; 4953 Label no_special_nan_handling;
4800 Label done; 4954 Label done;
4801 if (convert_hole) { 4955 if (convert_hole) {
4802 bool use_sse2 = CpuFeatures::IsSupported(SSE2); 4956 bool use_sse2 = CpuFeatures::IsSupported(SSE2);
4803 if (use_sse2) { 4957 if (use_sse2) {
4804 CpuFeatureScope scope(masm(), SSE2); 4958 CpuFeatureScope scope(masm(), SSE2);
4805 XMMRegister input_reg = ToDoubleRegister(instr->value()); 4959 XMMRegister input_reg = ToDoubleRegister(instr->value());
4806 __ ucomisd(input_reg, input_reg); 4960 __ ucomisd(input_reg, input_reg);
4807 } else { 4961 } else {
4808 if (!IsX87TopOfStack(instr->value())) {
4809 __ fld_d(ToOperand(instr->value()));
4810 }
4811 __ fld(0); 4962 __ fld(0);
4812 __ fld(0); 4963 __ fld(0);
4813 __ FCmp(); 4964 __ FCmp();
4814 } 4965 }
4815 4966
4816 __ j(parity_odd, &no_special_nan_handling); 4967 __ j(parity_odd, &no_special_nan_handling);
4817 __ sub(esp, Immediate(kDoubleSize)); 4968 __ sub(esp, Immediate(kDoubleSize));
4818 if (use_sse2) { 4969 if (use_sse2) {
4819 CpuFeatureScope scope(masm(), SSE2); 4970 CpuFeatureScope scope(masm(), SSE2);
4820 XMMRegister input_reg = ToDoubleRegister(instr->value()); 4971 XMMRegister input_reg = ToDoubleRegister(instr->value());
4821 __ movdbl(MemOperand(esp, 0), input_reg); 4972 __ movdbl(MemOperand(esp, 0), input_reg);
4822 } else { 4973 } else {
4823 __ fld(0); 4974 __ fld(0);
4824 __ fstp_d(MemOperand(esp, 0)); 4975 __ fstp_d(MemOperand(esp, 0));
4825 } 4976 }
4826 __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)), 4977 __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
4827 Immediate(kHoleNanUpper32)); 4978 Immediate(kHoleNanUpper32));
4828 Label canonicalize; 4979 Label canonicalize;
4829 __ j(not_equal, &canonicalize); 4980 __ j(not_equal, &canonicalize);
4830 __ add(esp, Immediate(kDoubleSize)); 4981 __ add(esp, Immediate(kDoubleSize));
4831 __ mov(reg, factory()->the_hole_value()); 4982 __ mov(reg, factory()->the_hole_value());
4983 if (!use_sse2) {
4984 __ fstp(0);
4985 }
4832 __ jmp(&done); 4986 __ jmp(&done);
4833 __ bind(&canonicalize); 4987 __ bind(&canonicalize);
4834 __ add(esp, Immediate(kDoubleSize)); 4988 __ add(esp, Immediate(kDoubleSize));
4835 ExternalReference nan = 4989 ExternalReference nan =
4836 ExternalReference::address_of_canonical_non_hole_nan(); 4990 ExternalReference::address_of_canonical_non_hole_nan();
4837 if (use_sse2) { 4991 if (use_sse2) {
4838 CpuFeatureScope scope(masm(), SSE2); 4992 CpuFeatureScope scope(masm(), SSE2);
4839 XMMRegister input_reg = ToDoubleRegister(instr->value()); 4993 XMMRegister input_reg = ToDoubleRegister(instr->value());
4840 __ movdbl(input_reg, Operand::StaticVariable(nan)); 4994 __ movdbl(input_reg, Operand::StaticVariable(nan));
4841 } else { 4995 } else {
4842 __ fstp(0); 4996 __ fstp(0);
4843 __ fld_d(Operand::StaticVariable(nan)); 4997 __ fld_d(Operand::StaticVariable(nan));
4844 } 4998 }
4845 } 4999 }
4846 5000
4847 __ bind(&no_special_nan_handling); 5001 __ bind(&no_special_nan_handling);
4848 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); 5002 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4849 if (FLAG_inline_new) { 5003 if (FLAG_inline_new) {
4850 Register tmp = ToRegister(instr->temp()); 5004 Register tmp = ToRegister(instr->temp());
4851 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); 5005 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
4852 } else { 5006 } else {
4853 __ jmp(deferred->entry()); 5007 __ jmp(deferred->entry());
4854 } 5008 }
4855 __ bind(deferred->exit()); 5009 __ bind(deferred->exit());
4856 if (CpuFeatures::IsSupported(SSE2)) { 5010 if (CpuFeatures::IsSupported(SSE2)) {
4857 CpuFeatureScope scope(masm(), SSE2); 5011 CpuFeatureScope scope(masm(), SSE2);
4858 XMMRegister input_reg = ToDoubleRegister(instr->value()); 5012 XMMRegister input_reg = ToDoubleRegister(instr->value());
4859 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); 5013 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
4860 } else { 5014 } else {
4861 if (!IsX87TopOfStack(instr->value())) { 5015 __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset));
4862 __ fld_d(ToOperand(instr->value()));
4863 }
4864 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
4865 } 5016 }
4866 __ bind(&done); 5017 __ bind(&done);
4867 } 5018 }
4868 5019
4869 5020
4870 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 5021 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4871 // TODO(3095996): Get rid of this. For now, we need to make the 5022 // TODO(3095996): Get rid of this. For now, we need to make the
4872 // result register contain a valid pointer because it is already 5023 // result register contain a valid pointer because it is already
4873 // contained in the register pointer map. 5024 // contained in the register pointer map.
4874 Register reg = ToRegister(instr->result()); 5025 Register reg = ToRegister(instr->result());
(...skipping 27 matching lines...) Expand all
4902 if (instr->needs_check()) { 5053 if (instr->needs_check()) {
4903 __ test(ToRegister(input), Immediate(kSmiTagMask)); 5054 __ test(ToRegister(input), Immediate(kSmiTagMask));
4904 DeoptimizeIf(not_zero, instr->environment()); 5055 DeoptimizeIf(not_zero, instr->environment());
4905 } else { 5056 } else {
4906 __ AssertSmi(ToRegister(input)); 5057 __ AssertSmi(ToRegister(input));
4907 } 5058 }
4908 __ SmiUntag(ToRegister(input)); 5059 __ SmiUntag(ToRegister(input));
4909 } 5060 }
4910 5061
4911 5062
5063 void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
5064 Register temp_reg,
5065 bool deoptimize_on_undefined,
5066 bool deoptimize_on_minus_zero,
5067 LEnvironment* env,
5068 NumberUntagDMode mode) {
5069 Label load_smi, done;
5070
5071 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5072 // Smi check.
5073 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
5074
5075 // Heap number map check.
5076 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5077 factory()->heap_number_map());
5078 if (deoptimize_on_undefined) {
5079 DeoptimizeIf(not_equal, env);
5080 } else {
5081 Label heap_number;
5082 __ j(equal, &heap_number, Label::kNear);
5083
5084 __ cmp(input_reg, factory()->undefined_value());
5085 DeoptimizeIf(not_equal, env);
5086
5087 // Convert undefined to NaN.
5088 ExternalReference nan =
5089 ExternalReference::address_of_canonical_non_hole_nan();
5090 __ fld_d(Operand::StaticVariable(nan));
5091 __ jmp(&done, Label::kNear);
5092 __ bind(&heap_number);
5093 }
5094 // Heap number to XMM conversion.
5095 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
5096 if (deoptimize_on_minus_zero) {
5097 __ fldz();
5098 __ FCmp();
5099 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
5100 __ j(not_zero, &done, Label::kNear);
5101 // TODO(mvstanton): the code to check for -0.0 on non-sse2 is not
5102 // complete, write it here.
5103
5104 // Pop FPU stack before deoptimizing.
5105 __ fstp(0);
5106 DeoptimizeIf(not_zero, env);
5107 }
5108 __ jmp(&done, Label::kNear);
5109 } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
5110 __ test(input_reg, Immediate(kSmiTagMask));
5111 DeoptimizeIf(not_equal, env);
5112 } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
5113 __ test(input_reg, Immediate(kSmiTagMask));
5114 __ j(zero, &load_smi);
5115 ExternalReference hole_nan_reference =
5116 ExternalReference::address_of_the_hole_nan();
5117 __ fld_d(Operand::StaticVariable(hole_nan_reference));
5118 __ jmp(&done, Label::kNear);
5119 } else {
5120 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
5121 }
5122
5123 __ bind(&load_smi);
5124 __ SmiUntag(input_reg); // Untag smi before converting to float.
5125 __ push(input_reg);
5126 __ fild_s(Operand(esp, 0));
5127 __ pop(input_reg);
5128 __ SmiTag(input_reg); // Retag smi.
5129 __ bind(&done);
5130 }
5131
5132
4912 void LCodeGen::EmitNumberUntagD(Register input_reg, 5133 void LCodeGen::EmitNumberUntagD(Register input_reg,
4913 Register temp_reg, 5134 Register temp_reg,
4914 XMMRegister result_reg, 5135 XMMRegister result_reg,
4915 bool deoptimize_on_undefined, 5136 bool deoptimize_on_undefined,
4916 bool deoptimize_on_minus_zero, 5137 bool deoptimize_on_minus_zero,
4917 LEnvironment* env, 5138 LEnvironment* env,
4918 NumberUntagDMode mode) { 5139 NumberUntagDMode mode) {
4919 Label load_smi, done; 5140 Label load_smi, done;
4920 5141
4921 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { 5142 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
5014 __ RecordComment("Deferred TaggedToI: exponent too big"); 5235 __ RecordComment("Deferred TaggedToI: exponent too big");
5015 DeoptimizeIf(no_condition, instr->environment()); 5236 DeoptimizeIf(no_condition, instr->environment());
5016 5237
5017 // Reserve space for 64 bit answer. 5238 // Reserve space for 64 bit answer.
5018 __ bind(&convert); 5239 __ bind(&convert);
5019 __ sub(Operand(esp), Immediate(kDoubleSize)); 5240 __ sub(Operand(esp), Immediate(kDoubleSize));
5020 // Do conversion, which cannot fail because we checked the exponent. 5241 // Do conversion, which cannot fail because we checked the exponent.
5021 __ fisttp_d(Operand(esp, 0)); 5242 __ fisttp_d(Operand(esp, 0));
5022 __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result. 5243 __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result.
5023 __ add(Operand(esp), Immediate(kDoubleSize)); 5244 __ add(Operand(esp), Immediate(kDoubleSize));
5024 } else { 5245 } else if (CpuFeatures::IsSupported(SSE2)) {
5025 CpuFeatureScope scope(masm(), SSE2); 5246 CpuFeatureScope scope(masm(), SSE2);
5026 XMMRegister xmm_temp = ToDoubleRegister(instr->temp()); 5247 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
5027 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); 5248 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
5028 __ cvttsd2si(input_reg, Operand(xmm0)); 5249 __ cvttsd2si(input_reg, Operand(xmm0));
5029 __ cmp(input_reg, 0x80000000u); 5250 __ cmp(input_reg, 0x80000000u);
5030 __ j(not_equal, &done); 5251 __ j(not_equal, &done);
5031 // Check if the input was 0x8000000 (kMinInt). 5252 // Check if the input was 0x8000000 (kMinInt).
5032 // If no, then we got an overflow and we deoptimize. 5253 // If no, then we got an overflow and we deoptimize.
5033 ExternalReference min_int = ExternalReference::address_of_min_int(); 5254 ExternalReference min_int = ExternalReference::address_of_min_int();
5034 __ movdbl(xmm_temp, Operand::StaticVariable(min_int)); 5255 __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
5035 __ ucomisd(xmm_temp, xmm0); 5256 __ ucomisd(xmm_temp, xmm0);
5036 DeoptimizeIf(not_equal, instr->environment()); 5257 DeoptimizeIf(not_equal, instr->environment());
5037 DeoptimizeIf(parity_even, instr->environment()); // NaN. 5258 DeoptimizeIf(parity_even, instr->environment()); // NaN.
5259 } else {
5260 UNREACHABLE();
5038 } 5261 }
5039 } else if (CpuFeatures::IsSupported(SSE2)) { 5262 } else if (CpuFeatures::IsSupported(SSE2)) {
5040 CpuFeatureScope scope(masm(), SSE2); 5263 CpuFeatureScope scope(masm(), SSE2);
5041 // Deoptimize if we don't have a heap number. 5264 // Deoptimize if we don't have a heap number.
5042 __ RecordComment("Deferred TaggedToI: not a heap number"); 5265 __ RecordComment("Deferred TaggedToI: not a heap number");
5043 DeoptimizeIf(not_equal, instr->environment()); 5266 DeoptimizeIf(not_equal, instr->environment());
5044 5267
5045 XMMRegister xmm_temp = ToDoubleRegister(instr->temp()); 5268 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
5046 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); 5269 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
5047 __ cvttsd2si(input_reg, Operand(xmm0)); 5270 __ cvttsd2si(input_reg, Operand(xmm0));
(...skipping 24 matching lines...) Expand all
5072 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) 5295 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5073 : LDeferredCode(codegen), instr_(instr) { } 5296 : LDeferredCode(codegen), instr_(instr) { }
5074 virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } 5297 virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
5075 virtual LInstruction* instr() { return instr_; } 5298 virtual LInstruction* instr() { return instr_; }
5076 private: 5299 private:
5077 LTaggedToI* instr_; 5300 LTaggedToI* instr_;
5078 }; 5301 };
5079 5302
5080 LOperand* input = instr->value(); 5303 LOperand* input = instr->value();
5081 ASSERT(input->IsRegister()); 5304 ASSERT(input->IsRegister());
5082 ASSERT(input->Equals(instr->result()));
5083
5084 Register input_reg = ToRegister(input); 5305 Register input_reg = ToRegister(input);
5306 ASSERT(input_reg.is(ToRegister(instr->result())));
5085 5307
5086 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); 5308 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5087 5309
5088 // Smi check.
5089 __ JumpIfNotSmi(input_reg, deferred->entry()); 5310 __ JumpIfNotSmi(input_reg, deferred->entry());
5090 5311 __ SmiUntag(input_reg);
5091 // Smi to int32 conversion
5092 __ SmiUntag(input_reg); // Untag smi.
5093
5094 __ bind(deferred->exit()); 5312 __ bind(deferred->exit());
5095 } 5313 }
5096 5314
5315
5316 void LCodeGen::DoDeferredTaggedToINoSSE2(LTaggedToINoSSE2* instr) {
5317 Label done, heap_number;
5318 Register result_reg = ToRegister(instr->result());
5319 Register input_reg = ToRegister(instr->value());
5320
5321 // Heap number map check.
5322 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5323 factory()->heap_number_map());
5324 __ j(equal, &heap_number, Label::kNear);
5325 // Check for undefined. Undefined is converted to zero for truncating
5326 // conversions.
5327 __ cmp(input_reg, factory()->undefined_value());
5328 __ RecordComment("Deferred TaggedToI: cannot truncate");
5329 DeoptimizeIf(not_equal, instr->environment());
5330 __ xor_(result_reg, result_reg);
5331 __ jmp(&done, Label::kFar);
5332 __ bind(&heap_number);
5333
5334 // Surprisingly, all of this crazy bit manipulation is considerably
5335 // faster than using the built-in x86 CPU conversion functions (about 6x).
5336 Label right_exponent, adjust_bias, zero_result;
5337 Register scratch = ToRegister(instr->scratch());
5338 Register scratch2 = ToRegister(instr->scratch2());
5339 // Get exponent word.
5340 __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5341 // Get exponent alone in scratch2.
5342 __ mov(scratch2, scratch);
5343 __ and_(scratch2, HeapNumber::kExponentMask);
5344 __ shr(scratch2, HeapNumber::kExponentShift);
5345 if (instr->truncating()) {
5346 __ j(zero, &zero_result);
5347 } else {
5348 __ j(not_zero, &adjust_bias);
5349 __ test(scratch, Immediate(HeapNumber::kMantissaMask));
5350 DeoptimizeIf(not_zero, instr->environment());
5351 __ cmp(FieldOperand(input_reg, HeapNumber::kMantissaOffset), Immediate(0));
5352 DeoptimizeIf(not_equal, instr->environment());
5353 __ bind(&adjust_bias);
5354 }
5355 __ sub(scratch2, Immediate(HeapNumber::kExponentBias));
5356 if (!instr->truncating()) {
5357 DeoptimizeIf(negative, instr->environment());
5358 } else {
5359 __ j(negative, &zero_result);
5360 }
5361
5362 // Get the second half of the double. For some exponents we don't
5363 // actually need this because the bits get shifted out again, but
5364 // it's probably slower to test than just to do it.
5365 Register scratch3 = ToRegister(instr->scratch3());
5366 __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5367 __ xor_(result_reg, result_reg);
5368
5369 const uint32_t non_int32_exponent = 31;
5370 __ cmp(scratch2, Immediate(non_int32_exponent));
5371 // If we have a match of the int32 exponent then skip some logic.
5372 __ j(equal, &right_exponent, Label::kNear);
5373 // If the number doesn't find in an int32, deopt.
5374 DeoptimizeIf(greater, instr->environment());
5375
5376 // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
5377 // < 31.
5378 __ mov(result_reg, Immediate(31));
5379 __ sub(result_reg, scratch2);
5380
5381 __ bind(&right_exponent);
5382
5383 // Save off exponent for negative check later.
5384 __ mov(scratch2, scratch);
5385
5386 // Here result_reg is the shift, scratch is the exponent word.
5387 // Get the top bits of the mantissa.
5388 __ and_(scratch, HeapNumber::kMantissaMask);
5389 // Put back the implicit 1.
5390 __ or_(scratch, 1 << HeapNumber::kExponentShift);
5391 // Shift up the mantissa bits to take up the space the exponent used to
5392 // take. We have kExponentShift + 1 significant bits int he low end of the
5393 // word. Shift them to the top bits.
5394 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
5395 __ shl(scratch, shift_distance);
5396 if (!instr->truncating()) {
5397 // If not truncating, a non-zero value in the bottom 22 bits means a
5398 // non-integral value --> trigger a deopt.
5399 __ test(scratch3, Immediate((1 << (32 - shift_distance)) - 1));
5400 DeoptimizeIf(not_equal, instr->environment());
5401 }
5402 // Shift down 22 bits to get the most significant 10 bits or the low
5403 // mantissa word.
5404 __ shr(scratch3, 32 - shift_distance);
5405 __ or_(scratch3, scratch);
5406 if (!instr->truncating()) {
5407 // If truncating, a non-zero value in the bits that will be shifted away
5408 // when adjusting the exponent means rounding --> deopt.
5409 __ mov(scratch, 0x1);
5410 ASSERT(result_reg.is(ecx));
5411 __ shl_cl(scratch);
5412 __ dec(scratch);
5413 __ test(scratch3, scratch);
5414 DeoptimizeIf(not_equal, instr->environment());
5415 }
5416 // Move down according to the exponent.
5417 ASSERT(result_reg.is(ecx));
5418 __ shr_cl(scratch3);
5419 // Now the unsigned 32-bit answer is in scratch3. We need to move it to
5420 // result_reg and we may need to fix the sign.
5421 Label negative_result;
5422 __ xor_(result_reg, result_reg);
5423 __ cmp(scratch2, result_reg);
5424 __ j(less, &negative_result, Label::kNear);
5425 __ cmp(scratch3, result_reg);
5426 __ mov(result_reg, scratch3);
5427 // If the result is > MAX_INT, result doesn't fit in signed 32-bit --> deopt.
5428 DeoptimizeIf(less, instr->environment());
5429 __ jmp(&done, Label::kNear);
5430 __ bind(&zero_result);
5431 __ xor_(result_reg, result_reg);
5432 __ jmp(&done, Label::kNear);
5433 __ bind(&negative_result);
5434 __ sub(result_reg, scratch3);
5435 if (!instr->truncating()) {
5436 // -0.0 triggers a deopt.
5437 DeoptimizeIf(zero, instr->environment());
5438 }
5439 // If the negative subtraction overflows into a positive number, there was an
5440 // overflow --> deopt.
5441 DeoptimizeIf(positive, instr->environment());
5442 __ bind(&done);
5443 }
5444
5445
5446 void LCodeGen::DoTaggedToINoSSE2(LTaggedToINoSSE2* instr) {
5447 class DeferredTaggedToINoSSE2: public LDeferredCode {
5448 public:
5449 DeferredTaggedToINoSSE2(LCodeGen* codegen, LTaggedToINoSSE2* instr)
5450 : LDeferredCode(codegen), instr_(instr) { }
5451 virtual void Generate() { codegen()->DoDeferredTaggedToINoSSE2(instr_); }
5452 virtual LInstruction* instr() { return instr_; }
5453 private:
5454 LTaggedToINoSSE2* instr_;
5455 };
5456
5457 LOperand* input = instr->value();
5458 ASSERT(input->IsRegister());
5459 Register input_reg = ToRegister(input);
5460 ASSERT(input_reg.is(ToRegister(instr->result())));
5461
5462 DeferredTaggedToINoSSE2* deferred =
5463 new(zone()) DeferredTaggedToINoSSE2(this, instr);
5464
5465 // Smi check.
5466 __ JumpIfNotSmi(input_reg, deferred->entry());
5467 __ SmiUntag(input_reg); // Untag smi.
5468 __ bind(deferred->exit());
5469 }
5470
5097 5471
5098 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 5472 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5099 LOperand* input = instr->value(); 5473 LOperand* input = instr->value();
5100 ASSERT(input->IsRegister()); 5474 ASSERT(input->IsRegister());
5101 LOperand* temp = instr->temp(); 5475 LOperand* temp = instr->temp();
5102 ASSERT(temp == NULL || temp->IsRegister()); 5476 ASSERT(temp == NULL || temp->IsRegister());
5103 LOperand* result = instr->result(); 5477 LOperand* result = instr->result();
5104 ASSERT(result->IsDoubleRegister()); 5478 ASSERT(result->IsDoubleRegister());
5105 5479
5480 Register input_reg = ToRegister(input);
5481 bool deoptimize_on_minus_zero =
5482 instr->hydrogen()->deoptimize_on_minus_zero();
5483 Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
5484
5485 NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
5486 HValue* value = instr->hydrogen()->value();
5487 if (value->type().IsSmi()) {
5488 if (value->IsLoadKeyed()) {
5489 HLoadKeyed* load = HLoadKeyed::cast(value);
5490 if (load->UsesMustHandleHole()) {
5491 if (load->hole_mode() == ALLOW_RETURN_HOLE) {
5492 mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
5493 } else {
5494 mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
5495 }
5496 } else {
5497 mode = NUMBER_CANDIDATE_IS_SMI;
5498 }
5499 }
5500 }
5501
5106 if (CpuFeatures::IsSupported(SSE2)) { 5502 if (CpuFeatures::IsSupported(SSE2)) {
5107 CpuFeatureScope scope(masm(), SSE2); 5503 CpuFeatureScope scope(masm(), SSE2);
5108 Register input_reg = ToRegister(input);
5109 XMMRegister result_reg = ToDoubleRegister(result); 5504 XMMRegister result_reg = ToDoubleRegister(result);
5110
5111 bool deoptimize_on_minus_zero =
5112 instr->hydrogen()->deoptimize_on_minus_zero();
5113 Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
5114
5115 NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
5116 HValue* value = instr->hydrogen()->value();
5117 if (value->type().IsSmi()) {
5118 if (value->IsLoadKeyed()) {
5119 HLoadKeyed* load = HLoadKeyed::cast(value);
5120 if (load->UsesMustHandleHole()) {
5121 if (load->hole_mode() == ALLOW_RETURN_HOLE) {
5122 mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
5123 } else {
5124 mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
5125 }
5126 } else {
5127 mode = NUMBER_CANDIDATE_IS_SMI;
5128 }
5129 }
5130 }
5131
5132 EmitNumberUntagD(input_reg, 5505 EmitNumberUntagD(input_reg,
5133 temp_reg, 5506 temp_reg,
5134 result_reg, 5507 result_reg,
5135 instr->hydrogen()->deoptimize_on_undefined(), 5508 instr->hydrogen()->deoptimize_on_undefined(),
5136 deoptimize_on_minus_zero, 5509 deoptimize_on_minus_zero,
5137 instr->environment(), 5510 instr->environment(),
5138 mode); 5511 mode);
5139 } else { 5512 } else {
5140 UNIMPLEMENTED(); 5513 EmitNumberUntagDNoSSE2(input_reg,
5514 temp_reg,
5515 instr->hydrogen()->deoptimize_on_undefined(),
5516 deoptimize_on_minus_zero,
5517 instr->environment(),
5518 mode);
5519 MarkReturnX87Result();
5141 } 5520 }
5142 } 5521 }
5143 5522
5144 5523
5145 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 5524 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5146 LOperand* input = instr->value(); 5525 LOperand* input = instr->value();
5147 ASSERT(input->IsDoubleRegister()); 5526 ASSERT(input->IsDoubleRegister());
5148 LOperand* result = instr->result(); 5527 LOperand* result = instr->result();
5149 ASSERT(result->IsRegister()); 5528 ASSERT(result->IsRegister());
5150 CpuFeatureScope scope(masm(), SSE2); 5529 CpuFeatureScope scope(masm(), SSE2);
(...skipping 251 matching lines...) Expand 10 before | Expand all | Expand 10 after
5402 // Heap number 5781 // Heap number
5403 __ bind(&heap_number); 5782 __ bind(&heap_number);
5404 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); 5783 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
5405 __ ClampDoubleToUint8(xmm0, xmm1, input_reg); 5784 __ ClampDoubleToUint8(xmm0, xmm1, input_reg);
5406 __ jmp(&done, Label::kNear); 5785 __ jmp(&done, Label::kNear);
5407 5786
5408 // smi 5787 // smi
5409 __ bind(&is_smi); 5788 __ bind(&is_smi);
5410 __ SmiUntag(input_reg); 5789 __ SmiUntag(input_reg);
5411 __ ClampUint8(input_reg); 5790 __ ClampUint8(input_reg);
5412
5413 __ bind(&done); 5791 __ bind(&done);
5414 } 5792 }
5415 5793
5794
5795 void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
5796 Register input_reg = ToRegister(instr->unclamped());
5797 Register result_reg = ToRegister(instr->result());
5798 Register scratch = ToRegister(instr->scratch());
5799 Register scratch2 = ToRegister(instr->scratch2());
5800 Register scratch3 = ToRegister(instr->scratch3());
5801 Label is_smi, done, heap_number, valid_exponent,
5802 largest_value, zero_result, maybe_nan_or_infinity;
5803
5804 __ JumpIfSmi(input_reg, &is_smi);
5805
5806 // Check for heap number
5807 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5808 factory()->heap_number_map());
5809 __ j(equal, &heap_number, Label::kFar);
5810
5811 // Check for undefined. Undefined is converted to zero for clamping
5812 // conversions.
5813 __ cmp(input_reg, factory()->undefined_value());
5814 DeoptimizeIf(not_equal, instr->environment());
5815 __ jmp(&zero_result);
5816
5817 // Heap number
5818 __ bind(&heap_number);
5819
5820 // Surprisingly, all of the hand-crafted bit-manipulations below are much
5821 // faster than the x86 FPU built-in instruction, especially since "banker's
5822 // rounding" would be additionally very expensive
5823
5824 // Get exponent word.
5825 __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5826 __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5827
5828 // Test for negative values --> clamp to zero
5829 __ test(scratch, scratch);
5830 __ j(negative, &zero_result);
5831
5832 // Get exponent alone in scratch2.
5833 __ mov(scratch2, scratch);
5834 __ and_(scratch2, HeapNumber::kExponentMask);
5835 __ shr(scratch2, HeapNumber::kExponentShift);
5836 __ j(zero, &zero_result);
5837 __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
5838 __ j(negative, &zero_result);
5839
5840 const uint32_t non_int8_exponent = 7;
5841 __ cmp(scratch2, Immediate(non_int8_exponent + 1));
5842 // If the exponent is too big, check for special values.
5843 __ j(greater, &maybe_nan_or_infinity, Label::kNear);
5844
5845 __ bind(&valid_exponent);
5846 // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
5847 // < 7. The shift bias is the number of bits to shift the mantissa such that
5848 // with an exponent of 7 such the that top-most one is in bit 30, allowing
5849 // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to
5850 // 1).
5851 int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1;
5852 __ lea(result_reg, MemOperand(scratch2, shift_bias));
5853 // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the
5854 // top bits of the mantissa.
5855 __ and_(scratch, HeapNumber::kMantissaMask);
5856 // Put back the implicit 1 of the mantissa
5857 __ or_(scratch, 1 << HeapNumber::kExponentShift);
5858 // Shift up to round
5859 __ shl_cl(scratch);
5860 // Use "banker's rounding" to spec: If fractional part of number is 0.5, then
5861 // use the bit in the "ones" place and add it to the "halves" place, which has
5862 // the effect of rounding to even.
5863 __ mov(scratch2, scratch);
5864 const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8;
5865 const uint32_t one_bit_shift = one_half_bit_shift + 1;
5866 __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
5867 __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
5868 Label no_round;
5869 __ j(less, &no_round);
5870 Label round_up;
5871 __ mov(scratch2, Immediate(1 << one_half_bit_shift));
5872 __ j(greater, &round_up);
5873 __ test(scratch3, scratch3);
5874 __ j(not_zero, &round_up);
5875 __ mov(scratch2, scratch);
5876 __ and_(scratch2, Immediate(1 << one_bit_shift));
5877 __ shr(scratch2, 1);
5878 __ bind(&round_up);
5879 __ add(scratch, scratch2);
5880 __ j(overflow, &largest_value);
5881 __ bind(&no_round);
5882 __ shr(scratch, 23);
5883 __ mov(result_reg, scratch);
5884 __ jmp(&done, Label::kNear);
5885
5886 __ bind(&maybe_nan_or_infinity);
5887 // Check for NaN/Infinity, all other values map to 255
5888 __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1));
5889 __ j(not_equal, &largest_value, Label::kNear);
5890
5891 // Check for NaN, which differs from Infinity in that at least one mantissa
5892 // bit is set.
5893 __ and_(scratch, HeapNumber::kMantissaMask);
5894 __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5895 __ j(not_zero, &zero_result); // M!=0 --> NaN
5896 // Infinity -> Fall through to map to 255.
5897
5898 __ bind(&largest_value);
5899 __ mov(result_reg, Immediate(255));
5900 __ jmp(&done, Label::kNear);
5901
5902 __ bind(&zero_result);
5903 __ xor_(result_reg, result_reg);
5904 __ jmp(&done);
5905
5906 // smi
5907 __ bind(&is_smi);
5908 if (!input_reg.is(result_reg)) {
5909 __ mov(result_reg, input_reg);
5910 }
5911 __ SmiUntag(result_reg);
5912 __ ClampUint8(result_reg);
5913 __ bind(&done);
5914 }
5915
5416 5916
5417 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { 5917 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
5418 Register reg = ToRegister(instr->temp()); 5918 Register reg = ToRegister(instr->temp());
5419 5919
5420 ZoneList<Handle<JSObject> >* prototypes = instr->prototypes(); 5920 ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
5421 ZoneList<Handle<Map> >* maps = instr->maps(); 5921 ZoneList<Handle<Map> >* maps = instr->maps();
5422 5922
5423 ASSERT(prototypes->length() == maps->length()); 5923 ASSERT(prototypes->length() == maps->length());
5424 5924
5425 if (instr->hydrogen()->CanOmitPrototypeChecks()) { 5925 if (instr->hydrogen()->CanOmitPrototypeChecks()) {
(...skipping 867 matching lines...) Expand 10 before | Expand all | Expand 10 after
6293 FixedArray::kHeaderSize - kPointerSize)); 6793 FixedArray::kHeaderSize - kPointerSize));
6294 __ bind(&done); 6794 __ bind(&done);
6295 } 6795 }
6296 6796
6297 6797
6298 #undef __ 6798 #undef __
6299 6799
6300 } } // namespace v8::internal 6800 } } // namespace v8::internal
6301 6801
6302 #endif // V8_TARGET_ARCH_IA32 6802 #endif // V8_TARGET_ARCH_IA32
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698