OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 348 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
359 changed_value->id(), changed_value->Mnemonic(), | 359 changed_value->id(), changed_value->Mnemonic(), |
360 use_id, use_mnemo); | 360 use_id, use_mnemo); |
361 } else { | 361 } else { |
362 Comment(";;; @%d: %s. <#%d>", current_instruction_, | 362 Comment(";;; @%d: %s. <#%d>", current_instruction_, |
363 instr->Mnemonic(), hydrogen->id()); | 363 instr->Mnemonic(), hydrogen->id()); |
364 } | 364 } |
365 } else { | 365 } else { |
366 Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); | 366 Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); |
367 } | 367 } |
368 } | 368 } |
| 369 |
| 370 if (!CpuFeatures::IsSupported(SSE2)) { |
| 371 FlushX87StackIfNecessary(instr); |
| 372 } |
| 373 |
369 instr->CompileToNative(this); | 374 instr->CompileToNative(this); |
| 375 |
| 376 if (!CpuFeatures::IsSupported(SSE2)) { |
| 377 ASSERT(!instr->HasDoubleRegisterResult() || x87_stack_depth_ == 1); |
| 378 |
| 379 if (FLAG_debug_code && FLAG_enable_slow_asserts) { |
| 380 __ VerifyX87StackDepth(x87_stack_depth_); |
| 381 } |
| 382 } |
370 } | 383 } |
371 } | 384 } |
372 EnsureSpaceForLazyDeopt(); | 385 EnsureSpaceForLazyDeopt(); |
373 return !is_aborted(); | 386 return !is_aborted(); |
374 } | 387 } |
375 | 388 |
376 | 389 |
377 bool LCodeGen::GenerateJumpTable() { | 390 bool LCodeGen::GenerateJumpTable() { |
378 Label needs_frame_not_call; | 391 Label needs_frame_not_call; |
379 Label needs_frame_is_call; | 392 Label needs_frame_is_call; |
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
514 XMMRegister LCodeGen::ToDoubleRegister(int index) const { | 527 XMMRegister LCodeGen::ToDoubleRegister(int index) const { |
515 return XMMRegister::FromAllocationIndex(index); | 528 return XMMRegister::FromAllocationIndex(index); |
516 } | 529 } |
517 | 530 |
518 | 531 |
519 bool LCodeGen::IsX87TopOfStack(LOperand* op) const { | 532 bool LCodeGen::IsX87TopOfStack(LOperand* op) const { |
520 return op->IsDoubleRegister(); | 533 return op->IsDoubleRegister(); |
521 } | 534 } |
522 | 535 |
523 | 536 |
| 537 void LCodeGen::ReadX87Operand(Operand dst) { |
| 538 ASSERT(x87_stack_depth_ == 1); |
| 539 __ fst_d(dst); |
| 540 } |
| 541 |
| 542 |
| 543 void LCodeGen::PushX87DoubleOperand(Operand src) { |
| 544 ASSERT(x87_stack_depth_ == 0); |
| 545 x87_stack_depth_++; |
| 546 __ fld_d(src); |
| 547 } |
| 548 |
| 549 |
| 550 void LCodeGen::PushX87FloatOperand(Operand src) { |
| 551 ASSERT(x87_stack_depth_ == 0); |
| 552 x87_stack_depth_++; |
| 553 __ fld_s(src); |
| 554 } |
| 555 |
| 556 |
| 557 void LCodeGen::PopX87() { |
| 558 ASSERT(x87_stack_depth_ == 1); |
| 559 x87_stack_depth_--; |
| 560 __ fstp(0); |
| 561 } |
| 562 |
| 563 |
| 564 void LCodeGen::CurrentInstructionReturnsX87Result() { |
| 565 ASSERT(x87_stack_depth_ <= 1); |
| 566 if (x87_stack_depth_ == 0) { |
| 567 x87_stack_depth_ = 1; |
| 568 } |
| 569 } |
| 570 |
| 571 |
| 572 void LCodeGen::FlushX87StackIfNecessary(LInstruction* instr) { |
| 573 if (x87_stack_depth_ > 0) { |
| 574 if ((instr->ClobbersDoubleRegisters() || |
| 575 instr->HasDoubleRegisterResult()) && |
| 576 !instr->HasDoubleRegisterInput()) { |
| 577 PopX87(); |
| 578 } |
| 579 } |
| 580 } |
| 581 |
| 582 |
524 Register LCodeGen::ToRegister(LOperand* op) const { | 583 Register LCodeGen::ToRegister(LOperand* op) const { |
525 ASSERT(op->IsRegister()); | 584 ASSERT(op->IsRegister()); |
526 return ToRegister(op->index()); | 585 return ToRegister(op->index()); |
527 } | 586 } |
528 | 587 |
529 | 588 |
530 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { | 589 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { |
531 ASSERT(op->IsDoubleRegister()); | 590 ASSERT(op->IsDoubleRegister()); |
532 return ToDoubleRegister(op->index()); | 591 return ToDoubleRegister(op->index()); |
533 } | 592 } |
(...skipping 305 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
839 translation.index(), | 898 translation.index(), |
840 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | 899 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); |
841 deoptimizations_.Add(environment, zone()); | 900 deoptimizations_.Add(environment, zone()); |
842 } | 901 } |
843 } | 902 } |
844 | 903 |
845 | 904 |
846 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { | 905 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { |
847 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 906 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
848 ASSERT(environment->HasBeenRegistered()); | 907 ASSERT(environment->HasBeenRegistered()); |
| 908 // It's an error to deoptimize with the x87 fp stack in use. |
| 909 ASSERT(x87_stack_depth_ == 0); |
849 int id = environment->deoptimization_index(); | 910 int id = environment->deoptimization_index(); |
850 ASSERT(info()->IsOptimizing() || info()->IsStub()); | 911 ASSERT(info()->IsOptimizing() || info()->IsStub()); |
851 Deoptimizer::BailoutType bailout_type = info()->IsStub() | 912 Deoptimizer::BailoutType bailout_type = info()->IsStub() |
852 ? Deoptimizer::LAZY | 913 ? Deoptimizer::LAZY |
853 : Deoptimizer::EAGER; | 914 : Deoptimizer::EAGER; |
854 Address entry = | 915 Address entry = |
855 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); | 916 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); |
856 if (entry == NULL) { | 917 if (entry == NULL) { |
857 Abort("bailout was not prepared"); | 918 Abort("bailout was not prepared"); |
858 return; | 919 return; |
(...skipping 823 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1682 } | 1743 } |
1683 | 1744 |
1684 | 1745 |
1685 void LCodeGen::DoConstantI(LConstantI* instr) { | 1746 void LCodeGen::DoConstantI(LConstantI* instr) { |
1686 ASSERT(instr->result()->IsRegister()); | 1747 ASSERT(instr->result()->IsRegister()); |
1687 __ Set(ToRegister(instr->result()), Immediate(instr->value())); | 1748 __ Set(ToRegister(instr->result()), Immediate(instr->value())); |
1688 } | 1749 } |
1689 | 1750 |
1690 | 1751 |
1691 void LCodeGen::DoConstantD(LConstantD* instr) { | 1752 void LCodeGen::DoConstantD(LConstantD* instr) { |
1692 ASSERT(instr->result()->IsDoubleRegister()); | |
1693 XMMRegister res = ToDoubleRegister(instr->result()); | |
1694 double v = instr->value(); | 1753 double v = instr->value(); |
1695 // Use xor to produce +0.0 in a fast and compact way, but avoid to | 1754 uint64_t int_val = BitCast<uint64_t, double>(v); |
1696 // do so if the constant is -0.0. | 1755 int32_t lower = static_cast<int32_t>(int_val); |
1697 if (BitCast<uint64_t, double>(v) == 0) { | 1756 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); |
1698 __ xorps(res, res); | 1757 |
| 1758 if (!CpuFeatures::IsSafeForSnapshot(SSE2)) { |
| 1759 __ push(Immediate(lower)); |
| 1760 __ push(Immediate(upper)); |
| 1761 PushX87DoubleOperand(Operand(esp, 0)); |
| 1762 __ add(Operand(esp), Immediate(kDoubleSize)); |
| 1763 CurrentInstructionReturnsX87Result(); |
1699 } else { | 1764 } else { |
1700 Register temp = ToRegister(instr->temp()); | 1765 CpuFeatureScope scope1(masm(), SSE2); |
1701 uint64_t int_val = BitCast<uint64_t, double>(v); | 1766 ASSERT(instr->result()->IsDoubleRegister()); |
1702 int32_t lower = static_cast<int32_t>(int_val); | 1767 XMMRegister res = ToDoubleRegister(instr->result()); |
1703 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); | 1768 if (int_val == 0) { |
1704 if (CpuFeatures::IsSupported(SSE4_1)) { | 1769 __ xorps(res, res); |
1705 CpuFeatureScope scope1(masm(), SSE2); | 1770 } else { |
1706 CpuFeatureScope scope2(masm(), SSE4_1); | 1771 Register temp = ToRegister(instr->temp()); |
1707 if (lower != 0) { | 1772 if (CpuFeatures::IsSupported(SSE4_1)) { |
1708 __ Set(temp, Immediate(lower)); | 1773 CpuFeatureScope scope2(masm(), SSE4_1); |
| 1774 if (lower != 0) { |
| 1775 __ Set(temp, Immediate(lower)); |
| 1776 __ movd(res, Operand(temp)); |
| 1777 __ Set(temp, Immediate(upper)); |
| 1778 __ pinsrd(res, Operand(temp), 1); |
| 1779 } else { |
| 1780 __ xorps(res, res); |
| 1781 __ Set(temp, Immediate(upper)); |
| 1782 __ pinsrd(res, Operand(temp), 1); |
| 1783 } |
| 1784 } else { |
| 1785 __ Set(temp, Immediate(upper)); |
1709 __ movd(res, Operand(temp)); | 1786 __ movd(res, Operand(temp)); |
1710 __ Set(temp, Immediate(upper)); | 1787 __ psllq(res, 32); |
1711 __ pinsrd(res, Operand(temp), 1); | 1788 if (lower != 0) { |
1712 } else { | 1789 __ Set(temp, Immediate(lower)); |
1713 __ xorps(res, res); | 1790 __ movd(xmm0, Operand(temp)); |
1714 __ Set(temp, Immediate(upper)); | 1791 __ por(res, xmm0); |
1715 __ pinsrd(res, Operand(temp), 1); | 1792 } |
1716 } | |
1717 } else { | |
1718 CpuFeatureScope scope(masm(), SSE2); | |
1719 __ Set(temp, Immediate(upper)); | |
1720 __ movd(res, Operand(temp)); | |
1721 __ psllq(res, 32); | |
1722 if (lower != 0) { | |
1723 __ Set(temp, Immediate(lower)); | |
1724 __ movd(xmm0, Operand(temp)); | |
1725 __ por(res, xmm0); | |
1726 } | 1793 } |
1727 } | 1794 } |
1728 } | 1795 } |
1729 } | 1796 } |
1730 | 1797 |
1731 | 1798 |
1732 void LCodeGen::DoConstantT(LConstantT* instr) { | 1799 void LCodeGen::DoConstantT(LConstantT* instr) { |
1733 Register reg = ToRegister(instr->result()); | 1800 Register reg = ToRegister(instr->result()); |
1734 Handle<Object> handle = instr->value(); | 1801 Handle<Object> handle = instr->value(); |
1735 if (handle->IsHeapObject()) { | 1802 if (handle->IsHeapObject()) { |
(...skipping 1415 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3151 elements_kind, | 3218 elements_kind, |
3152 0, | 3219 0, |
3153 instr->additional_index())); | 3220 instr->additional_index())); |
3154 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { | 3221 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
3155 if (CpuFeatures::IsSupported(SSE2)) { | 3222 if (CpuFeatures::IsSupported(SSE2)) { |
3156 CpuFeatureScope scope(masm(), SSE2); | 3223 CpuFeatureScope scope(masm(), SSE2); |
3157 XMMRegister result(ToDoubleRegister(instr->result())); | 3224 XMMRegister result(ToDoubleRegister(instr->result())); |
3158 __ movss(result, operand); | 3225 __ movss(result, operand); |
3159 __ cvtss2sd(result, result); | 3226 __ cvtss2sd(result, result); |
3160 } else { | 3227 } else { |
3161 __ fld_s(operand); | 3228 PushX87FloatOperand(operand); |
3162 HandleX87FPReturnValue(instr); | 3229 CurrentInstructionReturnsX87Result(); |
3163 } | 3230 } |
3164 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { | 3231 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
3165 if (CpuFeatures::IsSupported(SSE2)) { | 3232 if (CpuFeatures::IsSupported(SSE2)) { |
3166 CpuFeatureScope scope(masm(), SSE2); | 3233 CpuFeatureScope scope(masm(), SSE2); |
3167 __ movdbl(ToDoubleRegister(instr->result()), operand); | 3234 __ movdbl(ToDoubleRegister(instr->result()), operand); |
3168 } else { | 3235 } else { |
3169 __ fld_d(operand); | 3236 PushX87DoubleOperand(operand); |
3170 HandleX87FPReturnValue(instr); | 3237 CurrentInstructionReturnsX87Result(); |
3171 } | 3238 } |
3172 } else { | 3239 } else { |
3173 Register result(ToRegister(instr->result())); | 3240 Register result(ToRegister(instr->result())); |
3174 switch (elements_kind) { | 3241 switch (elements_kind) { |
3175 case EXTERNAL_BYTE_ELEMENTS: | 3242 case EXTERNAL_BYTE_ELEMENTS: |
3176 __ movsx_b(result, operand); | 3243 __ movsx_b(result, operand); |
3177 break; | 3244 break; |
3178 case EXTERNAL_PIXEL_ELEMENTS: | 3245 case EXTERNAL_PIXEL_ELEMENTS: |
3179 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: | 3246 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: |
3180 __ movzx_b(result, operand); | 3247 __ movzx_b(result, operand); |
(...skipping 24 matching lines...) Expand all Loading... |
3205 case FAST_HOLEY_DOUBLE_ELEMENTS: | 3272 case FAST_HOLEY_DOUBLE_ELEMENTS: |
3206 case DICTIONARY_ELEMENTS: | 3273 case DICTIONARY_ELEMENTS: |
3207 case NON_STRICT_ARGUMENTS_ELEMENTS: | 3274 case NON_STRICT_ARGUMENTS_ELEMENTS: |
3208 UNREACHABLE(); | 3275 UNREACHABLE(); |
3209 break; | 3276 break; |
3210 } | 3277 } |
3211 } | 3278 } |
3212 } | 3279 } |
3213 | 3280 |
3214 | 3281 |
3215 void LCodeGen::HandleX87FPReturnValue(LInstruction* instr) { | |
3216 if (IsX87TopOfStack(instr->result())) { | |
3217 // Return value is already on stack. If the value has no uses, then | |
3218 // pop it off the FP stack. Otherwise, make sure that there are enough | |
3219 // copies of the value on the stack to feed all of the usages, e.g. | |
3220 // when the following instruction uses the return value in multiple | |
3221 // inputs. | |
3222 int count = instr->hydrogen_value()->UseCount(); | |
3223 if (count == 0) { | |
3224 __ fstp(0); | |
3225 } else { | |
3226 count--; | |
3227 ASSERT(count <= 7); | |
3228 while (count-- > 0) { | |
3229 __ fld(0); | |
3230 } | |
3231 } | |
3232 } else { | |
3233 __ fstp_d(ToOperand(instr->result())); | |
3234 } | |
3235 } | |
3236 | |
3237 | |
3238 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { | 3282 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { |
3239 if (instr->hydrogen()->RequiresHoleCheck()) { | 3283 if (instr->hydrogen()->RequiresHoleCheck()) { |
3240 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + | 3284 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + |
3241 sizeof(kHoleNanLower32); | 3285 sizeof(kHoleNanLower32); |
3242 Operand hole_check_operand = BuildFastArrayOperand( | 3286 Operand hole_check_operand = BuildFastArrayOperand( |
3243 instr->elements(), instr->key(), | 3287 instr->elements(), instr->key(), |
3244 instr->hydrogen()->key()->representation(), | 3288 instr->hydrogen()->key()->representation(), |
3245 FAST_DOUBLE_ELEMENTS, | 3289 FAST_DOUBLE_ELEMENTS, |
3246 offset, | 3290 offset, |
3247 instr->additional_index()); | 3291 instr->additional_index()); |
3248 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); | 3292 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); |
3249 DeoptimizeIf(equal, instr->environment()); | 3293 DeoptimizeIf(equal, instr->environment()); |
3250 } | 3294 } |
3251 | 3295 |
3252 Operand double_load_operand = BuildFastArrayOperand( | 3296 Operand double_load_operand = BuildFastArrayOperand( |
3253 instr->elements(), | 3297 instr->elements(), |
3254 instr->key(), | 3298 instr->key(), |
3255 instr->hydrogen()->key()->representation(), | 3299 instr->hydrogen()->key()->representation(), |
3256 FAST_DOUBLE_ELEMENTS, | 3300 FAST_DOUBLE_ELEMENTS, |
3257 FixedDoubleArray::kHeaderSize - kHeapObjectTag, | 3301 FixedDoubleArray::kHeaderSize - kHeapObjectTag, |
3258 instr->additional_index()); | 3302 instr->additional_index()); |
3259 if (CpuFeatures::IsSupported(SSE2)) { | 3303 if (CpuFeatures::IsSupported(SSE2)) { |
3260 CpuFeatureScope scope(masm(), SSE2); | 3304 CpuFeatureScope scope(masm(), SSE2); |
3261 XMMRegister result = ToDoubleRegister(instr->result()); | 3305 XMMRegister result = ToDoubleRegister(instr->result()); |
3262 __ movdbl(result, double_load_operand); | 3306 __ movdbl(result, double_load_operand); |
3263 } else { | 3307 } else { |
3264 __ fld_d(double_load_operand); | 3308 PushX87DoubleOperand(double_load_operand); |
3265 HandleX87FPReturnValue(instr); | 3309 CurrentInstructionReturnsX87Result(); |
3266 } | 3310 } |
3267 } | 3311 } |
3268 | 3312 |
3269 | 3313 |
3270 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 3314 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
3271 Register result = ToRegister(instr->result()); | 3315 Register result = ToRegister(instr->result()); |
3272 | 3316 |
3273 // Load the result. | 3317 // Load the result. |
3274 __ mov(result, | 3318 __ mov(result, |
3275 BuildFastArrayOperand(instr->elements(), | 3319 BuildFastArrayOperand(instr->elements(), |
(...skipping 1028 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4304 __ SmiUntag(ToRegister(key)); | 4348 __ SmiUntag(ToRegister(key)); |
4305 } | 4349 } |
4306 Operand operand(BuildFastArrayOperand( | 4350 Operand operand(BuildFastArrayOperand( |
4307 instr->elements(), | 4351 instr->elements(), |
4308 key, | 4352 key, |
4309 instr->hydrogen()->key()->representation(), | 4353 instr->hydrogen()->key()->representation(), |
4310 elements_kind, | 4354 elements_kind, |
4311 0, | 4355 0, |
4312 instr->additional_index())); | 4356 instr->additional_index())); |
4313 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { | 4357 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
4314 CpuFeatureScope scope(masm(), SSE2); | 4358 if (CpuFeatures::IsSafeForSnapshot(SSE2)) { |
4315 __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value())); | 4359 CpuFeatureScope scope(masm(), SSE2); |
4316 __ movss(operand, xmm0); | 4360 __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value())); |
| 4361 __ movss(operand, xmm0); |
| 4362 } else { |
| 4363 __ fld(0); |
| 4364 __ fstp_s(operand); |
| 4365 } |
4317 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { | 4366 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
4318 CpuFeatureScope scope(masm(), SSE2); | 4367 if (CpuFeatures::IsSafeForSnapshot(SSE2)) { |
4319 __ movdbl(operand, ToDoubleRegister(instr->value())); | 4368 CpuFeatureScope scope(masm(), SSE2); |
| 4369 __ movdbl(operand, ToDoubleRegister(instr->value())); |
| 4370 } else { |
| 4371 __ fst_d(operand); |
| 4372 } |
4320 } else { | 4373 } else { |
4321 Register value = ToRegister(instr->value()); | 4374 Register value = ToRegister(instr->value()); |
4322 switch (elements_kind) { | 4375 switch (elements_kind) { |
4323 case EXTERNAL_PIXEL_ELEMENTS: | 4376 case EXTERNAL_PIXEL_ELEMENTS: |
4324 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: | 4377 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: |
4325 case EXTERNAL_BYTE_ELEMENTS: | 4378 case EXTERNAL_BYTE_ELEMENTS: |
4326 __ mov_b(operand, value); | 4379 __ mov_b(operand, value); |
4327 break; | 4380 break; |
4328 case EXTERNAL_SHORT_ELEMENTS: | 4381 case EXTERNAL_SHORT_ELEMENTS: |
4329 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: | 4382 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: |
(...skipping 14 matching lines...) Expand all Loading... |
4344 case DICTIONARY_ELEMENTS: | 4397 case DICTIONARY_ELEMENTS: |
4345 case NON_STRICT_ARGUMENTS_ELEMENTS: | 4398 case NON_STRICT_ARGUMENTS_ELEMENTS: |
4346 UNREACHABLE(); | 4399 UNREACHABLE(); |
4347 break; | 4400 break; |
4348 } | 4401 } |
4349 } | 4402 } |
4350 } | 4403 } |
4351 | 4404 |
4352 | 4405 |
4353 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { | 4406 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { |
4354 CpuFeatureScope scope(masm(), SSE2); | 4407 ExternalReference canonical_nan_reference = |
4355 XMMRegister value = ToDoubleRegister(instr->value()); | 4408 ExternalReference::address_of_canonical_non_hole_nan(); |
4356 | |
4357 if (instr->NeedsCanonicalization()) { | |
4358 Label have_value; | |
4359 | |
4360 __ ucomisd(value, value); | |
4361 __ j(parity_odd, &have_value); // NaN. | |
4362 | |
4363 ExternalReference canonical_nan_reference = | |
4364 ExternalReference::address_of_canonical_non_hole_nan(); | |
4365 __ movdbl(value, Operand::StaticVariable(canonical_nan_reference)); | |
4366 __ bind(&have_value); | |
4367 } | |
4368 | |
4369 Operand double_store_operand = BuildFastArrayOperand( | 4409 Operand double_store_operand = BuildFastArrayOperand( |
4370 instr->elements(), | 4410 instr->elements(), |
4371 instr->key(), | 4411 instr->key(), |
4372 instr->hydrogen()->key()->representation(), | 4412 instr->hydrogen()->key()->representation(), |
4373 FAST_DOUBLE_ELEMENTS, | 4413 FAST_DOUBLE_ELEMENTS, |
4374 FixedDoubleArray::kHeaderSize - kHeapObjectTag, | 4414 FixedDoubleArray::kHeaderSize - kHeapObjectTag, |
4375 instr->additional_index()); | 4415 instr->additional_index()); |
4376 __ movdbl(double_store_operand, value); | 4416 |
| 4417 if (CpuFeatures::IsSafeForSnapshot(SSE2)) { |
| 4418 CpuFeatureScope scope(masm(), SSE2); |
| 4419 XMMRegister value = ToDoubleRegister(instr->value()); |
| 4420 |
| 4421 if (instr->NeedsCanonicalization()) { |
| 4422 Label have_value; |
| 4423 |
| 4424 __ ucomisd(value, value); |
| 4425 __ j(parity_odd, &have_value); // NaN. |
| 4426 |
| 4427 __ movdbl(value, Operand::StaticVariable(canonical_nan_reference)); |
| 4428 __ bind(&have_value); |
| 4429 } |
| 4430 |
| 4431 __ movdbl(double_store_operand, value); |
| 4432 } else { |
| 4433 // Can't use SSE2 in the serializer |
| 4434 if (instr->hydrogen()->IsConstantHoleStore()) { |
| 4435 // This means we should store the (double) hole. No floating point |
| 4436 // registers required. |
| 4437 double nan_double = FixedDoubleArray::hole_nan_as_double(); |
| 4438 uint64_t int_val = BitCast<uint64_t, double>(nan_double); |
| 4439 int32_t lower = static_cast<int32_t>(int_val); |
| 4440 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); |
| 4441 |
| 4442 __ mov(double_store_operand, Immediate(lower)); |
| 4443 Operand double_store_operand2 = BuildFastArrayOperand( |
| 4444 instr->elements(), |
| 4445 instr->key(), |
| 4446 instr->hydrogen()->key()->representation(), |
| 4447 FAST_DOUBLE_ELEMENTS, |
| 4448 FixedDoubleArray::kHeaderSize - kHeapObjectTag + kPointerSize, |
| 4449 instr->additional_index()); |
| 4450 __ mov(double_store_operand2, Immediate(upper)); |
| 4451 } else { |
| 4452 Label no_special_nan_handling; |
| 4453 ASSERT(x87_stack_depth_ > 0); |
| 4454 |
| 4455 if (instr->NeedsCanonicalization()) { |
| 4456 __ fld(0); |
| 4457 __ fld(0); |
| 4458 __ FCmp(); |
| 4459 |
| 4460 __ j(parity_odd, &no_special_nan_handling); |
| 4461 __ sub(esp, Immediate(kDoubleSize)); |
| 4462 __ fst_d(MemOperand(esp, 0)); |
| 4463 __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)), |
| 4464 Immediate(kHoleNanUpper32)); |
| 4465 __ add(esp, Immediate(kDoubleSize)); |
| 4466 Label canonicalize; |
| 4467 __ j(not_equal, &canonicalize); |
| 4468 __ jmp(&no_special_nan_handling); |
| 4469 __ bind(&canonicalize); |
| 4470 __ fstp(0); |
| 4471 __ fld_d(Operand::StaticVariable(canonical_nan_reference)); |
| 4472 } |
| 4473 |
| 4474 __ bind(&no_special_nan_handling); |
| 4475 __ fst_d(double_store_operand); |
| 4476 } |
| 4477 } |
4377 } | 4478 } |
4378 | 4479 |
4379 | 4480 |
4380 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { | 4481 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { |
4381 Register value = ToRegister(instr->value()); | 4482 Register value = ToRegister(instr->value()); |
4382 Register elements = ToRegister(instr->elements()); | 4483 Register elements = ToRegister(instr->elements()); |
4383 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; | 4484 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; |
4384 | 4485 |
4385 Operand operand = BuildFastArrayOperand( | 4486 Operand operand = BuildFastArrayOperand( |
4386 instr->elements(), | 4487 instr->elements(), |
(...skipping 411 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4798 | 4899 |
4799 Label no_special_nan_handling; | 4900 Label no_special_nan_handling; |
4800 Label done; | 4901 Label done; |
4801 if (convert_hole) { | 4902 if (convert_hole) { |
4802 bool use_sse2 = CpuFeatures::IsSupported(SSE2); | 4903 bool use_sse2 = CpuFeatures::IsSupported(SSE2); |
4803 if (use_sse2) { | 4904 if (use_sse2) { |
4804 CpuFeatureScope scope(masm(), SSE2); | 4905 CpuFeatureScope scope(masm(), SSE2); |
4805 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 4906 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
4806 __ ucomisd(input_reg, input_reg); | 4907 __ ucomisd(input_reg, input_reg); |
4807 } else { | 4908 } else { |
4808 if (!IsX87TopOfStack(instr->value())) { | |
4809 __ fld_d(ToOperand(instr->value())); | |
4810 } | |
4811 __ fld(0); | 4909 __ fld(0); |
4812 __ fld(0); | 4910 __ fld(0); |
4813 __ FCmp(); | 4911 __ FCmp(); |
4814 } | 4912 } |
4815 | 4913 |
4816 __ j(parity_odd, &no_special_nan_handling); | 4914 __ j(parity_odd, &no_special_nan_handling); |
4817 __ sub(esp, Immediate(kDoubleSize)); | 4915 __ sub(esp, Immediate(kDoubleSize)); |
4818 if (use_sse2) { | 4916 if (use_sse2) { |
4819 CpuFeatureScope scope(masm(), SSE2); | 4917 CpuFeatureScope scope(masm(), SSE2); |
4820 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 4918 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
4821 __ movdbl(MemOperand(esp, 0), input_reg); | 4919 __ movdbl(MemOperand(esp, 0), input_reg); |
4822 } else { | 4920 } else { |
4823 __ fld(0); | 4921 __ fld(0); |
4824 __ fstp_d(MemOperand(esp, 0)); | 4922 __ fstp_d(MemOperand(esp, 0)); |
4825 } | 4923 } |
4826 __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)), | 4924 __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)), |
4827 Immediate(kHoleNanUpper32)); | 4925 Immediate(kHoleNanUpper32)); |
4828 Label canonicalize; | 4926 Label canonicalize; |
4829 __ j(not_equal, &canonicalize); | 4927 __ j(not_equal, &canonicalize); |
4830 __ add(esp, Immediate(kDoubleSize)); | 4928 __ add(esp, Immediate(kDoubleSize)); |
4831 __ mov(reg, factory()->the_hole_value()); | 4929 __ mov(reg, factory()->the_hole_value()); |
| 4930 if (!use_sse2) { |
| 4931 __ fstp(0); |
| 4932 } |
4832 __ jmp(&done); | 4933 __ jmp(&done); |
4833 __ bind(&canonicalize); | 4934 __ bind(&canonicalize); |
4834 __ add(esp, Immediate(kDoubleSize)); | 4935 __ add(esp, Immediate(kDoubleSize)); |
4835 ExternalReference nan = | 4936 ExternalReference nan = |
4836 ExternalReference::address_of_canonical_non_hole_nan(); | 4937 ExternalReference::address_of_canonical_non_hole_nan(); |
4837 if (use_sse2) { | 4938 if (use_sse2) { |
4838 CpuFeatureScope scope(masm(), SSE2); | 4939 CpuFeatureScope scope(masm(), SSE2); |
4839 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 4940 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
4840 __ movdbl(input_reg, Operand::StaticVariable(nan)); | 4941 __ movdbl(input_reg, Operand::StaticVariable(nan)); |
4841 } else { | 4942 } else { |
4842 __ fstp(0); | 4943 __ fstp(0); |
4843 __ fld_d(Operand::StaticVariable(nan)); | 4944 __ fld_d(Operand::StaticVariable(nan)); |
4844 } | 4945 } |
4845 } | 4946 } |
4846 | 4947 |
4847 __ bind(&no_special_nan_handling); | 4948 __ bind(&no_special_nan_handling); |
4848 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); | 4949 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); |
4849 if (FLAG_inline_new) { | 4950 if (FLAG_inline_new) { |
4850 Register tmp = ToRegister(instr->temp()); | 4951 Register tmp = ToRegister(instr->temp()); |
4851 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); | 4952 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); |
4852 } else { | 4953 } else { |
4853 __ jmp(deferred->entry()); | 4954 __ jmp(deferred->entry()); |
4854 } | 4955 } |
4855 __ bind(deferred->exit()); | 4956 __ bind(deferred->exit()); |
4856 if (CpuFeatures::IsSupported(SSE2)) { | 4957 if (CpuFeatures::IsSupported(SSE2)) { |
4857 CpuFeatureScope scope(masm(), SSE2); | 4958 CpuFeatureScope scope(masm(), SSE2); |
4858 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 4959 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
4859 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); | 4960 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); |
4860 } else { | 4961 } else { |
4861 if (!IsX87TopOfStack(instr->value())) { | 4962 __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset)); |
4862 __ fld_d(ToOperand(instr->value())); | |
4863 } | |
4864 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); | |
4865 } | 4963 } |
4866 __ bind(&done); | 4964 __ bind(&done); |
4867 } | 4965 } |
4868 | 4966 |
4869 | 4967 |
4870 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { | 4968 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { |
4871 // TODO(3095996): Get rid of this. For now, we need to make the | 4969 // TODO(3095996): Get rid of this. For now, we need to make the |
4872 // result register contain a valid pointer because it is already | 4970 // result register contain a valid pointer because it is already |
4873 // contained in the register pointer map. | 4971 // contained in the register pointer map. |
4874 Register reg = ToRegister(instr->result()); | 4972 Register reg = ToRegister(instr->result()); |
(...skipping 27 matching lines...) Expand all Loading... |
4902 if (instr->needs_check()) { | 5000 if (instr->needs_check()) { |
4903 __ test(ToRegister(input), Immediate(kSmiTagMask)); | 5001 __ test(ToRegister(input), Immediate(kSmiTagMask)); |
4904 DeoptimizeIf(not_zero, instr->environment()); | 5002 DeoptimizeIf(not_zero, instr->environment()); |
4905 } else { | 5003 } else { |
4906 __ AssertSmi(ToRegister(input)); | 5004 __ AssertSmi(ToRegister(input)); |
4907 } | 5005 } |
4908 __ SmiUntag(ToRegister(input)); | 5006 __ SmiUntag(ToRegister(input)); |
4909 } | 5007 } |
4910 | 5008 |
4911 | 5009 |
| 5010 void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg, |
| 5011 Register temp_reg, |
| 5012 bool deoptimize_on_undefined, |
| 5013 bool deoptimize_on_minus_zero, |
| 5014 LEnvironment* env, |
| 5015 NumberUntagDMode mode) { |
| 5016 Label load_smi, done; |
| 5017 |
| 5018 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
| 5019 // Smi check. |
| 5020 __ JumpIfSmi(input_reg, &load_smi, Label::kNear); |
| 5021 |
| 5022 // Heap number map check. |
| 5023 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), |
| 5024 factory()->heap_number_map()); |
| 5025 if (deoptimize_on_undefined) { |
| 5026 DeoptimizeIf(not_equal, env); |
| 5027 } else { |
| 5028 Label heap_number; |
| 5029 __ j(equal, &heap_number, Label::kNear); |
| 5030 |
| 5031 __ cmp(input_reg, factory()->undefined_value()); |
| 5032 DeoptimizeIf(not_equal, env); |
| 5033 |
| 5034 // Convert undefined to NaN. |
| 5035 ExternalReference nan = |
| 5036 ExternalReference::address_of_canonical_non_hole_nan(); |
| 5037 __ fld_d(Operand::StaticVariable(nan)); |
| 5038 __ jmp(&done, Label::kNear); |
| 5039 __ bind(&heap_number); |
| 5040 } |
| 5041 // Heap number to x87 conversion. |
| 5042 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| 5043 if (deoptimize_on_minus_zero) { |
| 5044 __ fldz(); |
| 5045 __ FCmp(); |
| 5046 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| 5047 __ j(not_zero, &done, Label::kNear); |
| 5048 |
| 5049 // Use general purpose registers to check if we have -0.0 |
| 5050 __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset)); |
| 5051 __ test(temp_reg, Immediate(HeapNumber::kSignMask)); |
| 5052 __ j(zero, &done, Label::kNear); |
| 5053 |
| 5054 // Pop FPU stack before deoptimizing. |
| 5055 __ fstp(0); |
| 5056 DeoptimizeIf(not_zero, env); |
| 5057 } |
| 5058 __ jmp(&done, Label::kNear); |
| 5059 } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) { |
| 5060 __ test(input_reg, Immediate(kSmiTagMask)); |
| 5061 DeoptimizeIf(not_equal, env); |
| 5062 } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) { |
| 5063 __ test(input_reg, Immediate(kSmiTagMask)); |
| 5064 __ j(zero, &load_smi); |
| 5065 ExternalReference hole_nan_reference = |
| 5066 ExternalReference::address_of_the_hole_nan(); |
| 5067 __ fld_d(Operand::StaticVariable(hole_nan_reference)); |
| 5068 __ jmp(&done, Label::kNear); |
| 5069 } else { |
| 5070 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); |
| 5071 } |
| 5072 |
| 5073 __ bind(&load_smi); |
| 5074 __ SmiUntag(input_reg); // Untag smi before converting to float. |
| 5075 __ push(input_reg); |
| 5076 __ fild_s(Operand(esp, 0)); |
| 5077 __ pop(input_reg); |
| 5078 __ SmiTag(input_reg); // Retag smi. |
| 5079 __ bind(&done); |
| 5080 } |
| 5081 |
| 5082 |
4912 void LCodeGen::EmitNumberUntagD(Register input_reg, | 5083 void LCodeGen::EmitNumberUntagD(Register input_reg, |
4913 Register temp_reg, | 5084 Register temp_reg, |
4914 XMMRegister result_reg, | 5085 XMMRegister result_reg, |
4915 bool deoptimize_on_undefined, | 5086 bool deoptimize_on_undefined, |
4916 bool deoptimize_on_minus_zero, | 5087 bool deoptimize_on_minus_zero, |
4917 LEnvironment* env, | 5088 LEnvironment* env, |
4918 NumberUntagDMode mode) { | 5089 NumberUntagDMode mode) { |
4919 Label load_smi, done; | 5090 Label load_smi, done; |
4920 | 5091 |
4921 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { | 5092 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5014 __ RecordComment("Deferred TaggedToI: exponent too big"); | 5185 __ RecordComment("Deferred TaggedToI: exponent too big"); |
5015 DeoptimizeIf(no_condition, instr->environment()); | 5186 DeoptimizeIf(no_condition, instr->environment()); |
5016 | 5187 |
5017 // Reserve space for 64 bit answer. | 5188 // Reserve space for 64 bit answer. |
5018 __ bind(&convert); | 5189 __ bind(&convert); |
5019 __ sub(Operand(esp), Immediate(kDoubleSize)); | 5190 __ sub(Operand(esp), Immediate(kDoubleSize)); |
5020 // Do conversion, which cannot fail because we checked the exponent. | 5191 // Do conversion, which cannot fail because we checked the exponent. |
5021 __ fisttp_d(Operand(esp, 0)); | 5192 __ fisttp_d(Operand(esp, 0)); |
5022 __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result. | 5193 __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result. |
5023 __ add(Operand(esp), Immediate(kDoubleSize)); | 5194 __ add(Operand(esp), Immediate(kDoubleSize)); |
5024 } else { | 5195 } else if (CpuFeatures::IsSupported(SSE2)) { |
5025 CpuFeatureScope scope(masm(), SSE2); | 5196 CpuFeatureScope scope(masm(), SSE2); |
5026 XMMRegister xmm_temp = ToDoubleRegister(instr->temp()); | 5197 XMMRegister xmm_temp = ToDoubleRegister(instr->temp()); |
5027 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 5198 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
5028 __ cvttsd2si(input_reg, Operand(xmm0)); | 5199 __ cvttsd2si(input_reg, Operand(xmm0)); |
5029 __ cmp(input_reg, 0x80000000u); | 5200 __ cmp(input_reg, 0x80000000u); |
5030 __ j(not_equal, &done); | 5201 __ j(not_equal, &done); |
5031 // Check if the input was 0x8000000 (kMinInt). | 5202 // Check if the input was 0x8000000 (kMinInt). |
5032 // If no, then we got an overflow and we deoptimize. | 5203 // If no, then we got an overflow and we deoptimize. |
5033 ExternalReference min_int = ExternalReference::address_of_min_int(); | 5204 ExternalReference min_int = ExternalReference::address_of_min_int(); |
5034 __ movdbl(xmm_temp, Operand::StaticVariable(min_int)); | 5205 __ movdbl(xmm_temp, Operand::StaticVariable(min_int)); |
5035 __ ucomisd(xmm_temp, xmm0); | 5206 __ ucomisd(xmm_temp, xmm0); |
5036 DeoptimizeIf(not_equal, instr->environment()); | 5207 DeoptimizeIf(not_equal, instr->environment()); |
5037 DeoptimizeIf(parity_even, instr->environment()); // NaN. | 5208 DeoptimizeIf(parity_even, instr->environment()); // NaN. |
| 5209 } else { |
| 5210 UNREACHABLE(); |
5038 } | 5211 } |
5039 } else if (CpuFeatures::IsSupported(SSE2)) { | 5212 } else if (CpuFeatures::IsSupported(SSE2)) { |
5040 CpuFeatureScope scope(masm(), SSE2); | 5213 CpuFeatureScope scope(masm(), SSE2); |
5041 // Deoptimize if we don't have a heap number. | 5214 // Deoptimize if we don't have a heap number. |
5042 __ RecordComment("Deferred TaggedToI: not a heap number"); | 5215 __ RecordComment("Deferred TaggedToI: not a heap number"); |
5043 DeoptimizeIf(not_equal, instr->environment()); | 5216 DeoptimizeIf(not_equal, instr->environment()); |
5044 | 5217 |
5045 XMMRegister xmm_temp = ToDoubleRegister(instr->temp()); | 5218 XMMRegister xmm_temp = ToDoubleRegister(instr->temp()); |
5046 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 5219 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
5047 __ cvttsd2si(input_reg, Operand(xmm0)); | 5220 __ cvttsd2si(input_reg, Operand(xmm0)); |
(...skipping 24 matching lines...) Expand all Loading... |
5072 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | 5245 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
5073 : LDeferredCode(codegen), instr_(instr) { } | 5246 : LDeferredCode(codegen), instr_(instr) { } |
5074 virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } | 5247 virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } |
5075 virtual LInstruction* instr() { return instr_; } | 5248 virtual LInstruction* instr() { return instr_; } |
5076 private: | 5249 private: |
5077 LTaggedToI* instr_; | 5250 LTaggedToI* instr_; |
5078 }; | 5251 }; |
5079 | 5252 |
5080 LOperand* input = instr->value(); | 5253 LOperand* input = instr->value(); |
5081 ASSERT(input->IsRegister()); | 5254 ASSERT(input->IsRegister()); |
5082 ASSERT(input->Equals(instr->result())); | |
5083 | |
5084 Register input_reg = ToRegister(input); | 5255 Register input_reg = ToRegister(input); |
| 5256 ASSERT(input_reg.is(ToRegister(instr->result()))); |
5085 | 5257 |
5086 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); | 5258 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); |
5087 | 5259 |
5088 // Smi check. | |
5089 __ JumpIfNotSmi(input_reg, deferred->entry()); | 5260 __ JumpIfNotSmi(input_reg, deferred->entry()); |
5090 | 5261 __ SmiUntag(input_reg); |
5091 // Smi to int32 conversion | |
5092 __ SmiUntag(input_reg); // Untag smi. | |
5093 | |
5094 __ bind(deferred->exit()); | 5262 __ bind(deferred->exit()); |
5095 } | 5263 } |
5096 | 5264 |
| 5265 |
| 5266 void LCodeGen::DoDeferredTaggedToINoSSE2(LTaggedToINoSSE2* instr) { |
| 5267 Label done, heap_number; |
| 5268 Register result_reg = ToRegister(instr->result()); |
| 5269 Register input_reg = ToRegister(instr->value()); |
| 5270 |
| 5271 // Heap number map check. |
| 5272 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), |
| 5273 factory()->heap_number_map()); |
| 5274 __ j(equal, &heap_number, Label::kNear); |
| 5275 // Check for undefined. Undefined is converted to zero for truncating |
| 5276 // conversions. |
| 5277 __ cmp(input_reg, factory()->undefined_value()); |
| 5278 __ RecordComment("Deferred TaggedToI: cannot truncate"); |
| 5279 DeoptimizeIf(not_equal, instr->environment()); |
| 5280 __ xor_(result_reg, result_reg); |
| 5281 __ jmp(&done, Label::kFar); |
| 5282 __ bind(&heap_number); |
| 5283 |
| 5284 // Surprisingly, all of this crazy bit manipulation is considerably |
| 5285 // faster than using the built-in x86 CPU conversion functions (about 6x). |
| 5286 Label right_exponent, adjust_bias, zero_result; |
| 5287 Register scratch = ToRegister(instr->scratch()); |
| 5288 Register scratch2 = ToRegister(instr->scratch2()); |
| 5289 // Get exponent word. |
| 5290 __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset)); |
| 5291 // Get exponent alone in scratch2. |
| 5292 __ mov(scratch2, scratch); |
| 5293 __ and_(scratch2, HeapNumber::kExponentMask); |
| 5294 __ shr(scratch2, HeapNumber::kExponentShift); |
| 5295 if (instr->truncating()) { |
| 5296 __ j(zero, &zero_result); |
| 5297 } else { |
| 5298 __ j(not_zero, &adjust_bias); |
| 5299 __ test(scratch, Immediate(HeapNumber::kMantissaMask)); |
| 5300 DeoptimizeIf(not_zero, instr->environment()); |
| 5301 __ cmp(FieldOperand(input_reg, HeapNumber::kMantissaOffset), Immediate(0)); |
| 5302 DeoptimizeIf(not_equal, instr->environment()); |
| 5303 __ bind(&adjust_bias); |
| 5304 } |
| 5305 __ sub(scratch2, Immediate(HeapNumber::kExponentBias)); |
| 5306 if (!instr->truncating()) { |
| 5307 DeoptimizeIf(negative, instr->environment()); |
| 5308 } else { |
| 5309 __ j(negative, &zero_result); |
| 5310 } |
| 5311 |
| 5312 // Get the second half of the double. For some exponents we don't |
| 5313 // actually need this because the bits get shifted out again, but |
| 5314 // it's probably slower to test than just to do it. |
| 5315 Register scratch3 = ToRegister(instr->scratch3()); |
| 5316 __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); |
| 5317 __ xor_(result_reg, result_reg); |
| 5318 |
| 5319 const uint32_t non_int32_exponent = 31; |
| 5320 __ cmp(scratch2, Immediate(non_int32_exponent)); |
| 5321 // If we have a match of the int32 exponent then skip some logic. |
| 5322 __ j(equal, &right_exponent, Label::kNear); |
| 5323 // If the number doesn't find in an int32, deopt. |
| 5324 DeoptimizeIf(greater, instr->environment()); |
| 5325 |
| 5326 // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent |
| 5327 // < 31. |
| 5328 __ mov(result_reg, Immediate(31)); |
| 5329 __ sub(result_reg, scratch2); |
| 5330 |
| 5331 __ bind(&right_exponent); |
| 5332 |
| 5333 // Save off exponent for negative check later. |
| 5334 __ mov(scratch2, scratch); |
| 5335 |
| 5336 // Here result_reg is the shift, scratch is the exponent word. |
| 5337 // Get the top bits of the mantissa. |
| 5338 __ and_(scratch, HeapNumber::kMantissaMask); |
| 5339 // Put back the implicit 1. |
| 5340 __ or_(scratch, 1 << HeapNumber::kExponentShift); |
| 5341 // Shift up the mantissa bits to take up the space the exponent used to |
| 5342 // take. We have kExponentShift + 1 significant bits int he low end of the |
| 5343 // word. Shift them to the top bits. |
| 5344 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1; |
| 5345 __ shl(scratch, shift_distance); |
| 5346 if (!instr->truncating()) { |
| 5347 // If not truncating, a non-zero value in the bottom 22 bits means a |
| 5348 // non-integral value --> trigger a deopt. |
| 5349 __ test(scratch3, Immediate((1 << (32 - shift_distance)) - 1)); |
| 5350 DeoptimizeIf(not_equal, instr->environment()); |
| 5351 } |
| 5352 // Shift down 22 bits to get the most significant 10 bits or the low |
| 5353 // mantissa word. |
| 5354 __ shr(scratch3, 32 - shift_distance); |
| 5355 __ or_(scratch3, scratch); |
| 5356 if (!instr->truncating()) { |
| 5357 // If truncating, a non-zero value in the bits that will be shifted away |
| 5358 // when adjusting the exponent means rounding --> deopt. |
| 5359 __ mov(scratch, 0x1); |
| 5360 ASSERT(result_reg.is(ecx)); |
| 5361 __ shl_cl(scratch); |
| 5362 __ dec(scratch); |
| 5363 __ test(scratch3, scratch); |
| 5364 DeoptimizeIf(not_equal, instr->environment()); |
| 5365 } |
| 5366 // Move down according to the exponent. |
| 5367 ASSERT(result_reg.is(ecx)); |
| 5368 __ shr_cl(scratch3); |
| 5369 // Now the unsigned 32-bit answer is in scratch3. We need to move it to |
| 5370 // result_reg and we may need to fix the sign. |
| 5371 Label negative_result; |
| 5372 __ xor_(result_reg, result_reg); |
| 5373 __ cmp(scratch2, result_reg); |
| 5374 __ j(less, &negative_result, Label::kNear); |
| 5375 __ cmp(scratch3, result_reg); |
| 5376 __ mov(result_reg, scratch3); |
| 5377 // If the result is > MAX_INT, result doesn't fit in signed 32-bit --> deopt. |
| 5378 DeoptimizeIf(less, instr->environment()); |
| 5379 __ jmp(&done, Label::kNear); |
| 5380 __ bind(&zero_result); |
| 5381 __ xor_(result_reg, result_reg); |
| 5382 __ jmp(&done, Label::kNear); |
| 5383 __ bind(&negative_result); |
| 5384 __ sub(result_reg, scratch3); |
| 5385 if (!instr->truncating()) { |
| 5386 // -0.0 triggers a deopt. |
| 5387 DeoptimizeIf(zero, instr->environment()); |
| 5388 } |
| 5389 // If the negative subtraction overflows into a positive number, there was an |
| 5390 // overflow --> deopt. |
| 5391 DeoptimizeIf(positive, instr->environment()); |
| 5392 __ bind(&done); |
| 5393 } |
| 5394 |
| 5395 |
| 5396 void LCodeGen::DoTaggedToINoSSE2(LTaggedToINoSSE2* instr) { |
| 5397 class DeferredTaggedToINoSSE2: public LDeferredCode { |
| 5398 public: |
| 5399 DeferredTaggedToINoSSE2(LCodeGen* codegen, LTaggedToINoSSE2* instr) |
| 5400 : LDeferredCode(codegen), instr_(instr) { } |
| 5401 virtual void Generate() { codegen()->DoDeferredTaggedToINoSSE2(instr_); } |
| 5402 virtual LInstruction* instr() { return instr_; } |
| 5403 private: |
| 5404 LTaggedToINoSSE2* instr_; |
| 5405 }; |
| 5406 |
| 5407 LOperand* input = instr->value(); |
| 5408 ASSERT(input->IsRegister()); |
| 5409 Register input_reg = ToRegister(input); |
| 5410 ASSERT(input_reg.is(ToRegister(instr->result()))); |
| 5411 |
| 5412 DeferredTaggedToINoSSE2* deferred = |
| 5413 new(zone()) DeferredTaggedToINoSSE2(this, instr); |
| 5414 |
| 5415 // Smi check. |
| 5416 __ JumpIfNotSmi(input_reg, deferred->entry()); |
| 5417 __ SmiUntag(input_reg); // Untag smi. |
| 5418 __ bind(deferred->exit()); |
| 5419 } |
| 5420 |
5097 | 5421 |
5098 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { | 5422 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { |
5099 LOperand* input = instr->value(); | 5423 LOperand* input = instr->value(); |
5100 ASSERT(input->IsRegister()); | 5424 ASSERT(input->IsRegister()); |
5101 LOperand* temp = instr->temp(); | 5425 LOperand* temp = instr->temp(); |
5102 ASSERT(temp == NULL || temp->IsRegister()); | 5426 ASSERT(temp == NULL || temp->IsRegister()); |
5103 LOperand* result = instr->result(); | 5427 LOperand* result = instr->result(); |
5104 ASSERT(result->IsDoubleRegister()); | 5428 ASSERT(result->IsDoubleRegister()); |
5105 | 5429 |
| 5430 Register input_reg = ToRegister(input); |
| 5431 bool deoptimize_on_minus_zero = |
| 5432 instr->hydrogen()->deoptimize_on_minus_zero(); |
| 5433 Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg; |
| 5434 |
| 5435 NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED; |
| 5436 HValue* value = instr->hydrogen()->value(); |
| 5437 if (value->type().IsSmi()) { |
| 5438 if (value->IsLoadKeyed()) { |
| 5439 HLoadKeyed* load = HLoadKeyed::cast(value); |
| 5440 if (load->UsesMustHandleHole()) { |
| 5441 if (load->hole_mode() == ALLOW_RETURN_HOLE) { |
| 5442 mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE; |
| 5443 } else { |
| 5444 mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE; |
| 5445 } |
| 5446 } else { |
| 5447 mode = NUMBER_CANDIDATE_IS_SMI; |
| 5448 } |
| 5449 } |
| 5450 } |
| 5451 |
5106 if (CpuFeatures::IsSupported(SSE2)) { | 5452 if (CpuFeatures::IsSupported(SSE2)) { |
5107 CpuFeatureScope scope(masm(), SSE2); | 5453 CpuFeatureScope scope(masm(), SSE2); |
5108 Register input_reg = ToRegister(input); | |
5109 XMMRegister result_reg = ToDoubleRegister(result); | 5454 XMMRegister result_reg = ToDoubleRegister(result); |
5110 | |
5111 bool deoptimize_on_minus_zero = | |
5112 instr->hydrogen()->deoptimize_on_minus_zero(); | |
5113 Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg; | |
5114 | |
5115 NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED; | |
5116 HValue* value = instr->hydrogen()->value(); | |
5117 if (value->type().IsSmi()) { | |
5118 if (value->IsLoadKeyed()) { | |
5119 HLoadKeyed* load = HLoadKeyed::cast(value); | |
5120 if (load->UsesMustHandleHole()) { | |
5121 if (load->hole_mode() == ALLOW_RETURN_HOLE) { | |
5122 mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE; | |
5123 } else { | |
5124 mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE; | |
5125 } | |
5126 } else { | |
5127 mode = NUMBER_CANDIDATE_IS_SMI; | |
5128 } | |
5129 } | |
5130 } | |
5131 | |
5132 EmitNumberUntagD(input_reg, | 5455 EmitNumberUntagD(input_reg, |
5133 temp_reg, | 5456 temp_reg, |
5134 result_reg, | 5457 result_reg, |
5135 instr->hydrogen()->deoptimize_on_undefined(), | 5458 instr->hydrogen()->deoptimize_on_undefined(), |
5136 deoptimize_on_minus_zero, | 5459 deoptimize_on_minus_zero, |
5137 instr->environment(), | 5460 instr->environment(), |
5138 mode); | 5461 mode); |
5139 } else { | 5462 } else { |
5140 UNIMPLEMENTED(); | 5463 EmitNumberUntagDNoSSE2(input_reg, |
| 5464 temp_reg, |
| 5465 instr->hydrogen()->deoptimize_on_undefined(), |
| 5466 deoptimize_on_minus_zero, |
| 5467 instr->environment(), |
| 5468 mode); |
| 5469 CurrentInstructionReturnsX87Result(); |
5141 } | 5470 } |
5142 } | 5471 } |
5143 | 5472 |
5144 | 5473 |
5145 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { | 5474 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { |
5146 LOperand* input = instr->value(); | 5475 LOperand* input = instr->value(); |
5147 ASSERT(input->IsDoubleRegister()); | 5476 ASSERT(input->IsDoubleRegister()); |
5148 LOperand* result = instr->result(); | 5477 LOperand* result = instr->result(); |
5149 ASSERT(result->IsRegister()); | 5478 ASSERT(result->IsRegister()); |
5150 CpuFeatureScope scope(masm(), SSE2); | 5479 CpuFeatureScope scope(masm(), SSE2); |
(...skipping 251 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5402 // Heap number | 5731 // Heap number |
5403 __ bind(&heap_number); | 5732 __ bind(&heap_number); |
5404 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 5733 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
5405 __ ClampDoubleToUint8(xmm0, xmm1, input_reg); | 5734 __ ClampDoubleToUint8(xmm0, xmm1, input_reg); |
5406 __ jmp(&done, Label::kNear); | 5735 __ jmp(&done, Label::kNear); |
5407 | 5736 |
5408 // smi | 5737 // smi |
5409 __ bind(&is_smi); | 5738 __ bind(&is_smi); |
5410 __ SmiUntag(input_reg); | 5739 __ SmiUntag(input_reg); |
5411 __ ClampUint8(input_reg); | 5740 __ ClampUint8(input_reg); |
5412 | |
5413 __ bind(&done); | 5741 __ bind(&done); |
5414 } | 5742 } |
5415 | 5743 |
| 5744 |
| 5745 void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) { |
| 5746 Register input_reg = ToRegister(instr->unclamped()); |
| 5747 Register result_reg = ToRegister(instr->result()); |
| 5748 Register scratch = ToRegister(instr->scratch()); |
| 5749 Register scratch2 = ToRegister(instr->scratch2()); |
| 5750 Register scratch3 = ToRegister(instr->scratch3()); |
| 5751 Label is_smi, done, heap_number, valid_exponent, |
| 5752 largest_value, zero_result, maybe_nan_or_infinity; |
| 5753 |
| 5754 __ JumpIfSmi(input_reg, &is_smi); |
| 5755 |
| 5756 // Check for heap number |
| 5757 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), |
| 5758 factory()->heap_number_map()); |
| 5759 __ j(equal, &heap_number, Label::kFar); |
| 5760 |
| 5761 // Check for undefined. Undefined is converted to zero for clamping |
| 5762 // conversions. |
| 5763 __ cmp(input_reg, factory()->undefined_value()); |
| 5764 DeoptimizeIf(not_equal, instr->environment()); |
| 5765 __ jmp(&zero_result); |
| 5766 |
| 5767 // Heap number |
| 5768 __ bind(&heap_number); |
| 5769 |
| 5770 // Surprisingly, all of the hand-crafted bit-manipulations below are much |
| 5771 // faster than the x86 FPU built-in instruction, especially since "banker's |
| 5772 // rounding" would be additionally very expensive |
| 5773 |
| 5774 // Get exponent word. |
| 5775 __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset)); |
| 5776 __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); |
| 5777 |
| 5778 // Test for negative values --> clamp to zero |
| 5779 __ test(scratch, scratch); |
| 5780 __ j(negative, &zero_result); |
| 5781 |
| 5782 // Get exponent alone in scratch2. |
| 5783 __ mov(scratch2, scratch); |
| 5784 __ and_(scratch2, HeapNumber::kExponentMask); |
| 5785 __ shr(scratch2, HeapNumber::kExponentShift); |
| 5786 __ j(zero, &zero_result); |
| 5787 __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1)); |
| 5788 __ j(negative, &zero_result); |
| 5789 |
| 5790 const uint32_t non_int8_exponent = 7; |
| 5791 __ cmp(scratch2, Immediate(non_int8_exponent + 1)); |
| 5792 // If the exponent is too big, check for special values. |
| 5793 __ j(greater, &maybe_nan_or_infinity, Label::kNear); |
| 5794 |
| 5795 __ bind(&valid_exponent); |
| 5796 // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent |
| 5797 // < 7. The shift bias is the number of bits to shift the mantissa such that |
| 5798 // with an exponent of 7 such the that top-most one is in bit 30, allowing |
| 5799 // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to |
| 5800 // 1). |
| 5801 int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1; |
| 5802 __ lea(result_reg, MemOperand(scratch2, shift_bias)); |
| 5803 // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the |
| 5804 // top bits of the mantissa. |
| 5805 __ and_(scratch, HeapNumber::kMantissaMask); |
| 5806 // Put back the implicit 1 of the mantissa |
| 5807 __ or_(scratch, 1 << HeapNumber::kExponentShift); |
| 5808 // Shift up to round |
| 5809 __ shl_cl(scratch); |
| 5810 // Use "banker's rounding" to spec: If fractional part of number is 0.5, then |
| 5811 // use the bit in the "ones" place and add it to the "halves" place, which has |
| 5812 // the effect of rounding to even. |
| 5813 __ mov(scratch2, scratch); |
| 5814 const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8; |
| 5815 const uint32_t one_bit_shift = one_half_bit_shift + 1; |
| 5816 __ and_(scratch2, Immediate((1 << one_bit_shift) - 1)); |
| 5817 __ cmp(scratch2, Immediate(1 << one_half_bit_shift)); |
| 5818 Label no_round; |
| 5819 __ j(less, &no_round); |
| 5820 Label round_up; |
| 5821 __ mov(scratch2, Immediate(1 << one_half_bit_shift)); |
| 5822 __ j(greater, &round_up); |
| 5823 __ test(scratch3, scratch3); |
| 5824 __ j(not_zero, &round_up); |
| 5825 __ mov(scratch2, scratch); |
| 5826 __ and_(scratch2, Immediate(1 << one_bit_shift)); |
| 5827 __ shr(scratch2, 1); |
| 5828 __ bind(&round_up); |
| 5829 __ add(scratch, scratch2); |
| 5830 __ j(overflow, &largest_value); |
| 5831 __ bind(&no_round); |
| 5832 __ shr(scratch, 23); |
| 5833 __ mov(result_reg, scratch); |
| 5834 __ jmp(&done, Label::kNear); |
| 5835 |
| 5836 __ bind(&maybe_nan_or_infinity); |
| 5837 // Check for NaN/Infinity, all other values map to 255 |
| 5838 __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1)); |
| 5839 __ j(not_equal, &largest_value, Label::kNear); |
| 5840 |
| 5841 // Check for NaN, which differs from Infinity in that at least one mantissa |
| 5842 // bit is set. |
| 5843 __ and_(scratch, HeapNumber::kMantissaMask); |
| 5844 __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); |
| 5845 __ j(not_zero, &zero_result); // M!=0 --> NaN |
| 5846 // Infinity -> Fall through to map to 255. |
| 5847 |
| 5848 __ bind(&largest_value); |
| 5849 __ mov(result_reg, Immediate(255)); |
| 5850 __ jmp(&done, Label::kNear); |
| 5851 |
| 5852 __ bind(&zero_result); |
| 5853 __ xor_(result_reg, result_reg); |
| 5854 __ jmp(&done); |
| 5855 |
| 5856 // smi |
| 5857 __ bind(&is_smi); |
| 5858 if (!input_reg.is(result_reg)) { |
| 5859 __ mov(result_reg, input_reg); |
| 5860 } |
| 5861 __ SmiUntag(result_reg); |
| 5862 __ ClampUint8(result_reg); |
| 5863 __ bind(&done); |
| 5864 } |
| 5865 |
5416 | 5866 |
5417 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { | 5867 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { |
5418 Register reg = ToRegister(instr->temp()); | 5868 Register reg = ToRegister(instr->temp()); |
5419 | 5869 |
5420 ZoneList<Handle<JSObject> >* prototypes = instr->prototypes(); | 5870 ZoneList<Handle<JSObject> >* prototypes = instr->prototypes(); |
5421 ZoneList<Handle<Map> >* maps = instr->maps(); | 5871 ZoneList<Handle<Map> >* maps = instr->maps(); |
5422 | 5872 |
5423 ASSERT(prototypes->length() == maps->length()); | 5873 ASSERT(prototypes->length() == maps->length()); |
5424 | 5874 |
5425 if (instr->hydrogen()->CanOmitPrototypeChecks()) { | 5875 if (instr->hydrogen()->CanOmitPrototypeChecks()) { |
(...skipping 867 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6293 FixedArray::kHeaderSize - kPointerSize)); | 6743 FixedArray::kHeaderSize - kPointerSize)); |
6294 __ bind(&done); | 6744 __ bind(&done); |
6295 } | 6745 } |
6296 | 6746 |
6297 | 6747 |
6298 #undef __ | 6748 #undef __ |
6299 | 6749 |
6300 } } // namespace v8::internal | 6750 } } // namespace v8::internal |
6301 | 6751 |
6302 #endif // V8_TARGET_ARCH_IA32 | 6752 #endif // V8_TARGET_ARCH_IA32 |
OLD | NEW |