OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1088 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1099 | 1099 |
1100 | 1100 |
1101 void LCodeGen::DeoptimizeIfNotRoot(Register rt, | 1101 void LCodeGen::DeoptimizeIfNotRoot(Register rt, |
1102 Heap::RootListIndex index, | 1102 Heap::RootListIndex index, |
1103 LEnvironment* environment) { | 1103 LEnvironment* environment) { |
1104 __ CompareRoot(rt, index); | 1104 __ CompareRoot(rt, index); |
1105 DeoptimizeIf(ne, environment); | 1105 DeoptimizeIf(ne, environment); |
1106 } | 1106 } |
1107 | 1107 |
1108 | 1108 |
| 1109 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, |
| 1110 LEnvironment* environment) { |
| 1111 __ TestForMinusZero(input); |
| 1112 DeoptimizeIf(vs, environment); |
| 1113 } |
| 1114 |
| 1115 |
1109 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { | 1116 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { |
1110 if (!info()->IsStub()) { | 1117 if (!info()->IsStub()) { |
1111 // Ensure that we have enough space after the previous lazy-bailout | 1118 // Ensure that we have enough space after the previous lazy-bailout |
1112 // instruction for patching the code here. | 1119 // instruction for patching the code here. |
1113 intptr_t current_pc = masm()->pc_offset(); | 1120 intptr_t current_pc = masm()->pc_offset(); |
1114 | 1121 |
1115 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) { | 1122 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) { |
1116 ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; | 1123 ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; |
1117 ASSERT((padding_size % kInstructionSize) == 0); | 1124 ASSERT((padding_size % kInstructionSize) == 0); |
1118 InstructionAccurateScope instruction_accurate( | 1125 InstructionAccurateScope instruction_accurate( |
(...skipping 1618 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2737 __ B(&div_ok); | 2744 __ B(&div_ok); |
2738 __ Bind(&deopt); | 2745 __ Bind(&deopt); |
2739 Deoptimize(instr->environment()); | 2746 Deoptimize(instr->environment()); |
2740 __ Bind(&div_ok); | 2747 __ Bind(&div_ok); |
2741 } | 2748 } |
2742 | 2749 |
2743 | 2750 |
2744 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) { | 2751 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) { |
2745 DoubleRegister input = ToDoubleRegister(instr->value()); | 2752 DoubleRegister input = ToDoubleRegister(instr->value()); |
2746 Register result = ToRegister32(instr->result()); | 2753 Register result = ToRegister32(instr->result()); |
2747 Label done, deopt; | |
2748 | 2754 |
2749 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 2755 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
2750 __ JumpIfMinusZero(input, &deopt); | 2756 DeoptimizeIfMinusZero(input, instr->environment()); |
2751 } | 2757 } |
2752 | 2758 |
2753 __ TryConvertDoubleToInt32(result, input, double_scratch(), &done); | 2759 __ TryConvertDoubleToInt32(result, input, double_scratch()); |
2754 __ Bind(&deopt); | 2760 DeoptimizeIf(ne, instr->environment()); |
2755 Deoptimize(instr->environment()); | |
2756 __ Bind(&done); | |
2757 | 2761 |
2758 if (instr->tag_result()) { | 2762 if (instr->tag_result()) { |
2759 __ SmiTag(result.X()); | 2763 __ SmiTag(result.X()); |
2760 } | 2764 } |
2761 } | 2765 } |
2762 | 2766 |
2763 | 2767 |
2764 void LCodeGen::DoDrop(LDrop* instr) { | 2768 void LCodeGen::DoDrop(LDrop* instr) { |
2765 __ Drop(instr->count()); | 2769 __ Drop(instr->count()); |
2766 } | 2770 } |
(...skipping 1042 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3809 double_temp1, double_temp2, | 3813 double_temp1, double_temp2, |
3810 temp1, temp2, temp3); | 3814 temp1, temp2, temp3); |
3811 } | 3815 } |
3812 | 3816 |
3813 | 3817 |
3814 void LCodeGen::DoMathFloor(LMathFloor* instr) { | 3818 void LCodeGen::DoMathFloor(LMathFloor* instr) { |
3815 // TODO(jbramley): If we could provide a double result, we could use frintm | 3819 // TODO(jbramley): If we could provide a double result, we could use frintm |
3816 // and produce a valid double result in a single instruction. | 3820 // and produce a valid double result in a single instruction. |
3817 DoubleRegister input = ToDoubleRegister(instr->value()); | 3821 DoubleRegister input = ToDoubleRegister(instr->value()); |
3818 Register result = ToRegister(instr->result()); | 3822 Register result = ToRegister(instr->result()); |
3819 Label deopt; | |
3820 Label done; | |
3821 | 3823 |
3822 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3824 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
3823 __ JumpIfMinusZero(input, &deopt); | 3825 DeoptimizeIfMinusZero(input, instr->environment()); |
3824 } | 3826 } |
3825 | 3827 |
3826 __ Fcvtms(result, input); | 3828 __ Fcvtms(result, input); |
3827 | 3829 |
3828 // Check that the result fits into a 32-bit integer. | 3830 // Check that the result fits into a 32-bit integer. |
3829 // - The result did not overflow. | 3831 // - The result did not overflow. |
3830 __ Cmp(result, Operand(result, SXTW)); | 3832 __ Cmp(result, Operand(result, SXTW)); |
3831 // - The input was not NaN. | 3833 // - The input was not NaN. |
3832 __ Fccmp(input, input, NoFlag, eq); | 3834 __ Fccmp(input, input, NoFlag, eq); |
3833 __ B(&done, eq); | 3835 DeoptimizeIf(ne, instr->environment()); |
3834 | |
3835 __ Bind(&deopt); | |
3836 Deoptimize(instr->environment()); | |
3837 | |
3838 __ Bind(&done); | |
3839 } | 3836 } |
3840 | 3837 |
3841 | 3838 |
3842 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { | 3839 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { |
3843 Register dividend = ToRegister32(instr->dividend()); | 3840 Register dividend = ToRegister32(instr->dividend()); |
3844 Register result = ToRegister32(instr->result()); | 3841 Register result = ToRegister32(instr->result()); |
3845 int32_t divisor = instr->divisor(); | 3842 int32_t divisor = instr->divisor(); |
3846 | 3843 |
3847 // If the divisor is positive, things are easy: There can be no deopts and we | 3844 // If the divisor is positive, things are easy: There can be no deopts and we |
3848 // can simply do an arithmetic right shift. | 3845 // can simply do an arithmetic right shift. |
(...skipping 670 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4519 Label done, load_smi; | 4516 Label done, load_smi; |
4520 | 4517 |
4521 // Work out what untag mode we're working with. | 4518 // Work out what untag mode we're working with. |
4522 HValue* value = instr->hydrogen()->value(); | 4519 HValue* value = instr->hydrogen()->value(); |
4523 NumberUntagDMode mode = value->representation().IsSmi() | 4520 NumberUntagDMode mode = value->representation().IsSmi() |
4524 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; | 4521 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; |
4525 | 4522 |
4526 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { | 4523 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
4527 __ JumpIfSmi(input, &load_smi); | 4524 __ JumpIfSmi(input, &load_smi); |
4528 | 4525 |
4529 Label convert_undefined, deopt; | 4526 Label convert_undefined; |
4530 | 4527 |
4531 // Heap number map check. | 4528 // Heap number map check. |
4532 Label* not_heap_number = can_convert_undefined_to_nan ? &convert_undefined | |
4533 : &deopt; | |
4534 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 4529 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
4535 __ JumpIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex, not_heap_number); | 4530 if (can_convert_undefined_to_nan) { |
| 4531 __ JumpIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex, |
| 4532 &convert_undefined); |
| 4533 } else { |
| 4534 DeoptimizeIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex, |
| 4535 instr->environment()); |
| 4536 } |
4536 | 4537 |
4537 // Load heap number. | 4538 // Load heap number. |
4538 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset)); | 4539 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset)); |
4539 if (instr->hydrogen()->deoptimize_on_minus_zero()) { | 4540 if (instr->hydrogen()->deoptimize_on_minus_zero()) { |
4540 __ JumpIfMinusZero(result, &deopt); | 4541 DeoptimizeIfMinusZero(result, instr->environment()); |
4541 } | 4542 } |
4542 __ B(&done); | 4543 __ B(&done); |
4543 | 4544 |
4544 if (can_convert_undefined_to_nan) { | 4545 if (can_convert_undefined_to_nan) { |
4545 __ Bind(&convert_undefined); | 4546 __ Bind(&convert_undefined); |
4546 __ JumpIfNotRoot(input, Heap::kUndefinedValueRootIndex, &deopt); | 4547 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, |
| 4548 instr->environment()); |
4547 | 4549 |
4548 __ LoadRoot(scratch, Heap::kNanValueRootIndex); | 4550 __ LoadRoot(scratch, Heap::kNanValueRootIndex); |
4549 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset)); | 4551 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset)); |
4550 __ B(&done); | 4552 __ B(&done); |
4551 } | 4553 } |
4552 | 4554 |
4553 __ Bind(&deopt); | |
4554 Deoptimize(instr->environment()); | |
4555 } else { | 4555 } else { |
4556 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); | 4556 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); |
4557 // Fall through to load_smi. | 4557 // Fall through to load_smi. |
4558 } | 4558 } |
4559 | 4559 |
4560 // Smi to double register conversion. | 4560 // Smi to double register conversion. |
4561 __ Bind(&load_smi); | 4561 __ Bind(&load_smi); |
4562 __ SmiUntagToDouble(result, input); | 4562 __ SmiUntagToDouble(result, input); |
4563 | 4563 |
4564 __ Bind(&done); | 4564 __ Bind(&done); |
(...skipping 922 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5487 __ B(eq, &done); | 5487 __ B(eq, &done); |
5488 | 5488 |
5489 // Output contains zero, undefined is converted to zero for truncating | 5489 // Output contains zero, undefined is converted to zero for truncating |
5490 // conversions. | 5490 // conversions. |
5491 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, | 5491 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, |
5492 instr->environment()); | 5492 instr->environment()); |
5493 } else { | 5493 } else { |
5494 Register output = ToRegister32(instr->result()); | 5494 Register output = ToRegister32(instr->result()); |
5495 | 5495 |
5496 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2); | 5496 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2); |
5497 Label converted; | |
5498 | 5497 |
5499 // Deoptimized if it's not a heap number. | 5498 // Deoptimized if it's not a heap number. |
5500 DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, | 5499 DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, |
5501 instr->environment()); | 5500 instr->environment()); |
5502 | 5501 |
5503 // A heap number: load value and convert to int32 using non-truncating | 5502 // A heap number: load value and convert to int32 using non-truncating |
5504 // function. If the result is out of range, branch to deoptimize. | 5503 // function. If the result is out of range, branch to deoptimize. |
5505 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset)); | 5504 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset)); |
5506 __ TryConvertDoubleToInt32(output, dbl_scratch1, dbl_scratch2, &converted); | 5505 __ TryConvertDoubleToInt32(output, dbl_scratch1, dbl_scratch2); |
5507 Deoptimize(instr->environment()); | 5506 DeoptimizeIf(ne, instr->environment()); |
5508 | |
5509 __ Bind(&converted); | |
5510 | 5507 |
5511 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5508 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
5512 __ Cmp(output, 0); | 5509 __ Cmp(output, 0); |
5513 __ B(ne, &done); | 5510 __ B(ne, &done); |
5514 __ Fmov(scratch1, dbl_scratch1); | 5511 __ Fmov(scratch1, dbl_scratch1); |
5515 DeoptimizeIfNegative(scratch1, instr->environment()); | 5512 DeoptimizeIfNegative(scratch1, instr->environment()); |
5516 } | 5513 } |
5517 } | 5514 } |
5518 __ Bind(&done); | 5515 __ Bind(&done); |
5519 } | 5516 } |
(...skipping 319 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5839 __ Bind(&out_of_object); | 5836 __ Bind(&out_of_object); |
5840 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); | 5837 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
5841 // Index is equal to negated out of object property index plus 1. | 5838 // Index is equal to negated out of object property index plus 1. |
5842 __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2)); | 5839 __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2)); |
5843 __ Ldr(result, FieldMemOperand(result, | 5840 __ Ldr(result, FieldMemOperand(result, |
5844 FixedArray::kHeaderSize - kPointerSize)); | 5841 FixedArray::kHeaderSize - kPointerSize)); |
5845 __ Bind(&done); | 5842 __ Bind(&done); |
5846 } | 5843 } |
5847 | 5844 |
5848 } } // namespace v8::internal | 5845 } } // namespace v8::internal |
OLD | NEW |