| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 110 if (buffer == NULL) return &exp; | 110 if (buffer == NULL) return &exp; |
| 111 ExternalReference::InitializeMathExpData(); | 111 ExternalReference::InitializeMathExpData(); |
| 112 | 112 |
| 113 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); | 113 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); |
| 114 // esp[1 * kPointerSize]: raw double input | 114 // esp[1 * kPointerSize]: raw double input |
| 115 // esp[0 * kPointerSize]: return address | 115 // esp[0 * kPointerSize]: return address |
| 116 { | 116 { |
| 117 CpuFeatureScope use_sse2(&masm, SSE2); | 117 CpuFeatureScope use_sse2(&masm, SSE2); |
| 118 XMMRegister input = xmm1; | 118 XMMRegister input = xmm1; |
| 119 XMMRegister result = xmm2; | 119 XMMRegister result = xmm2; |
| 120 __ movdbl(input, Operand(esp, 1 * kPointerSize)); | 120 __ movsd(input, Operand(esp, 1 * kPointerSize)); |
| 121 __ push(eax); | 121 __ push(eax); |
| 122 __ push(ebx); | 122 __ push(ebx); |
| 123 | 123 |
| 124 MathExpGenerator::EmitMathExp(&masm, input, result, xmm0, eax, ebx); | 124 MathExpGenerator::EmitMathExp(&masm, input, result, xmm0, eax, ebx); |
| 125 | 125 |
| 126 __ pop(ebx); | 126 __ pop(ebx); |
| 127 __ pop(eax); | 127 __ pop(eax); |
| 128 __ movdbl(Operand(esp, 1 * kPointerSize), result); | 128 __ movsd(Operand(esp, 1 * kPointerSize), result); |
| 129 __ fld_d(Operand(esp, 1 * kPointerSize)); | 129 __ fld_d(Operand(esp, 1 * kPointerSize)); |
| 130 __ Ret(); | 130 __ Ret(); |
| 131 } | 131 } |
| 132 | 132 |
| 133 CodeDesc desc; | 133 CodeDesc desc; |
| 134 masm.GetCode(&desc); | 134 masm.GetCode(&desc); |
| 135 ASSERT(!RelocInfo::RequiresRelocation(desc)); | 135 ASSERT(!RelocInfo::RequiresRelocation(desc)); |
| 136 | 136 |
| 137 CPU::FlushICache(buffer, actual_size); | 137 CPU::FlushICache(buffer, actual_size); |
| 138 OS::ProtectCode(buffer, actual_size); | 138 OS::ProtectCode(buffer, actual_size); |
| 139 return FUNCTION_CAST<UnaryMathFunction>(buffer); | 139 return FUNCTION_CAST<UnaryMathFunction>(buffer); |
| 140 } | 140 } |
| 141 | 141 |
| 142 | 142 |
| 143 UnaryMathFunction CreateSqrtFunction() { | 143 UnaryMathFunction CreateSqrtFunction() { |
| 144 size_t actual_size; | 144 size_t actual_size; |
| 145 // Allocate buffer in executable space. | 145 // Allocate buffer in executable space. |
| 146 byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, | 146 byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, |
| 147 &actual_size, | 147 &actual_size, |
| 148 true)); | 148 true)); |
| 149 // If SSE2 is not available, we can use libc's implementation to ensure | 149 // If SSE2 is not available, we can use libc's implementation to ensure |
| 150 // consistency since code by fullcodegen's calls into runtime in that case. | 150 // consistency since code by fullcodegen's calls into runtime in that case. |
| 151 if (buffer == NULL || !CpuFeatures::IsSupported(SSE2)) return &sqrt; | 151 if (buffer == NULL || !CpuFeatures::IsSupported(SSE2)) return &sqrt; |
| 152 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); | 152 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); |
| 153 // esp[1 * kPointerSize]: raw double input | 153 // esp[1 * kPointerSize]: raw double input |
| 154 // esp[0 * kPointerSize]: return address | 154 // esp[0 * kPointerSize]: return address |
| 155 // Move double input into registers. | 155 // Move double input into registers. |
| 156 { | 156 { |
| 157 CpuFeatureScope use_sse2(&masm, SSE2); | 157 CpuFeatureScope use_sse2(&masm, SSE2); |
| 158 __ movdbl(xmm0, Operand(esp, 1 * kPointerSize)); | 158 __ movsd(xmm0, Operand(esp, 1 * kPointerSize)); |
| 159 __ sqrtsd(xmm0, xmm0); | 159 __ sqrtsd(xmm0, xmm0); |
| 160 __ movdbl(Operand(esp, 1 * kPointerSize), xmm0); | 160 __ movsd(Operand(esp, 1 * kPointerSize), xmm0); |
| 161 // Load result into floating point register as return value. | 161 // Load result into floating point register as return value. |
| 162 __ fld_d(Operand(esp, 1 * kPointerSize)); | 162 __ fld_d(Operand(esp, 1 * kPointerSize)); |
| 163 __ Ret(); | 163 __ Ret(); |
| 164 } | 164 } |
| 165 | 165 |
| 166 CodeDesc desc; | 166 CodeDesc desc; |
| 167 masm.GetCode(&desc); | 167 masm.GetCode(&desc); |
| 168 ASSERT(!RelocInfo::RequiresRelocation(desc)); | 168 ASSERT(!RelocInfo::RequiresRelocation(desc)); |
| 169 | 169 |
| 170 CPU::FlushICache(buffer, actual_size); | 170 CPU::FlushICache(buffer, actual_size); |
| (...skipping 284 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 455 __ j(below_equal, &small_size); | 455 __ j(below_equal, &small_size); |
| 456 __ jmp(&medium_size); | 456 __ jmp(&medium_size); |
| 457 } | 457 } |
| 458 { | 458 { |
| 459 // Special handlers for 9 <= copy_size < 64. No assumptions about | 459 // Special handlers for 9 <= copy_size < 64. No assumptions about |
| 460 // alignment or move distance, so all reads must be unaligned and | 460 // alignment or move distance, so all reads must be unaligned and |
| 461 // must happen before any writes. | 461 // must happen before any writes. |
| 462 Label medium_handlers, f9_16, f17_32, f33_48, f49_63; | 462 Label medium_handlers, f9_16, f17_32, f33_48, f49_63; |
| 463 | 463 |
| 464 __ bind(&f9_16); | 464 __ bind(&f9_16); |
| 465 __ movdbl(xmm0, Operand(src, 0)); | 465 __ movsd(xmm0, Operand(src, 0)); |
| 466 __ movdbl(xmm1, Operand(src, count, times_1, -8)); | 466 __ movsd(xmm1, Operand(src, count, times_1, -8)); |
| 467 __ movdbl(Operand(dst, 0), xmm0); | 467 __ movsd(Operand(dst, 0), xmm0); |
| 468 __ movdbl(Operand(dst, count, times_1, -8), xmm1); | 468 __ movsd(Operand(dst, count, times_1, -8), xmm1); |
| 469 MemMoveEmitPopAndReturn(&masm); | 469 MemMoveEmitPopAndReturn(&masm); |
| 470 | 470 |
| 471 __ bind(&f17_32); | 471 __ bind(&f17_32); |
| 472 __ movdqu(xmm0, Operand(src, 0)); | 472 __ movdqu(xmm0, Operand(src, 0)); |
| 473 __ movdqu(xmm1, Operand(src, count, times_1, -0x10)); | 473 __ movdqu(xmm1, Operand(src, count, times_1, -0x10)); |
| 474 __ movdqu(Operand(dst, 0x00), xmm0); | 474 __ movdqu(Operand(dst, 0x00), xmm0); |
| 475 __ movdqu(Operand(dst, count, times_1, -0x10), xmm1); | 475 __ movdqu(Operand(dst, count, times_1, -0x10), xmm1); |
| 476 MemMoveEmitPopAndReturn(&masm); | 476 MemMoveEmitPopAndReturn(&masm); |
| 477 | 477 |
| 478 __ bind(&f33_48); | 478 __ bind(&f33_48); |
| (...skipping 255 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 734 OMIT_SMI_CHECK); | 734 OMIT_SMI_CHECK); |
| 735 | 735 |
| 736 __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset)); | 736 __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset)); |
| 737 | 737 |
| 738 // Prepare for conversion loop. | 738 // Prepare for conversion loop. |
| 739 ExternalReference canonical_the_hole_nan_reference = | 739 ExternalReference canonical_the_hole_nan_reference = |
| 740 ExternalReference::address_of_the_hole_nan(); | 740 ExternalReference::address_of_the_hole_nan(); |
| 741 XMMRegister the_hole_nan = xmm1; | 741 XMMRegister the_hole_nan = xmm1; |
| 742 if (CpuFeatures::IsSupported(SSE2)) { | 742 if (CpuFeatures::IsSupported(SSE2)) { |
| 743 CpuFeatureScope use_sse2(masm, SSE2); | 743 CpuFeatureScope use_sse2(masm, SSE2); |
| 744 __ movdbl(the_hole_nan, | 744 __ movsd(the_hole_nan, |
| 745 Operand::StaticVariable(canonical_the_hole_nan_reference)); | 745 Operand::StaticVariable(canonical_the_hole_nan_reference)); |
| 746 } | 746 } |
| 747 __ jmp(&entry); | 747 __ jmp(&entry); |
| 748 | 748 |
| 749 // Call into runtime if GC is required. | 749 // Call into runtime if GC is required. |
| 750 __ bind(&gc_required); | 750 __ bind(&gc_required); |
| 751 // Restore registers before jumping into runtime. | 751 // Restore registers before jumping into runtime. |
| 752 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); | 752 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); |
| 753 __ pop(ebx); | 753 __ pop(ebx); |
| 754 __ pop(eax); | 754 __ pop(eax); |
| 755 __ jmp(fail); | 755 __ jmp(fail); |
| 756 | 756 |
| 757 // Convert and copy elements | 757 // Convert and copy elements |
| 758 // esi: source FixedArray | 758 // esi: source FixedArray |
| 759 __ bind(&loop); | 759 __ bind(&loop); |
| 760 __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize)); | 760 __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize)); |
| 761 // ebx: current element from source | 761 // ebx: current element from source |
| 762 // edi: index of current element | 762 // edi: index of current element |
| 763 __ JumpIfNotSmi(ebx, &convert_hole); | 763 __ JumpIfNotSmi(ebx, &convert_hole); |
| 764 | 764 |
| 765 // Normal smi, convert it to double and store. | 765 // Normal smi, convert it to double and store. |
| 766 __ SmiUntag(ebx); | 766 __ SmiUntag(ebx); |
| 767 if (CpuFeatures::IsSupported(SSE2)) { | 767 if (CpuFeatures::IsSupported(SSE2)) { |
| 768 CpuFeatureScope fscope(masm, SSE2); | 768 CpuFeatureScope fscope(masm, SSE2); |
| 769 __ Cvtsi2sd(xmm0, ebx); | 769 __ Cvtsi2sd(xmm0, ebx); |
| 770 __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize), | 770 __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize), |
| 771 xmm0); | 771 xmm0); |
| 772 } else { | 772 } else { |
| 773 __ push(ebx); | 773 __ push(ebx); |
| 774 __ fild_s(Operand(esp, 0)); | 774 __ fild_s(Operand(esp, 0)); |
| 775 __ pop(ebx); | 775 __ pop(ebx); |
| 776 __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize)); | 776 __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize)); |
| 777 } | 777 } |
| 778 __ jmp(&entry); | 778 __ jmp(&entry); |
| 779 | 779 |
| 780 // Found hole, store hole_nan_as_double instead. | 780 // Found hole, store hole_nan_as_double instead. |
| 781 __ bind(&convert_hole); | 781 __ bind(&convert_hole); |
| 782 | 782 |
| 783 if (FLAG_debug_code) { | 783 if (FLAG_debug_code) { |
| 784 __ cmp(ebx, masm->isolate()->factory()->the_hole_value()); | 784 __ cmp(ebx, masm->isolate()->factory()->the_hole_value()); |
| 785 __ Assert(equal, kObjectFoundInSmiOnlyArray); | 785 __ Assert(equal, kObjectFoundInSmiOnlyArray); |
| 786 } | 786 } |
| 787 | 787 |
| 788 if (CpuFeatures::IsSupported(SSE2)) { | 788 if (CpuFeatures::IsSupported(SSE2)) { |
| 789 CpuFeatureScope use_sse2(masm, SSE2); | 789 CpuFeatureScope use_sse2(masm, SSE2); |
| 790 __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize), | 790 __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize), |
| 791 the_hole_nan); | 791 the_hole_nan); |
| 792 } else { | 792 } else { |
| 793 __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference)); | 793 __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference)); |
| 794 __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize)); | 794 __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize)); |
| 795 } | 795 } |
| 796 | 796 |
| 797 __ bind(&entry); | 797 __ bind(&entry); |
| 798 __ sub(edi, Immediate(Smi::FromInt(1))); | 798 __ sub(edi, Immediate(Smi::FromInt(1))); |
| 799 __ j(not_sign, &loop); | 799 __ j(not_sign, &loop); |
| 800 | 800 |
| (...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 889 // ebx: index of current element (smi-tagged) | 889 // ebx: index of current element (smi-tagged) |
| 890 uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); | 890 uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); |
| 891 __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32)); | 891 __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32)); |
| 892 __ j(equal, &convert_hole); | 892 __ j(equal, &convert_hole); |
| 893 | 893 |
| 894 // Non-hole double, copy value into a heap number. | 894 // Non-hole double, copy value into a heap number. |
| 895 __ AllocateHeapNumber(edx, esi, no_reg, &gc_required); | 895 __ AllocateHeapNumber(edx, esi, no_reg, &gc_required); |
| 896 // edx: new heap number | 896 // edx: new heap number |
| 897 if (CpuFeatures::IsSupported(SSE2)) { | 897 if (CpuFeatures::IsSupported(SSE2)) { |
| 898 CpuFeatureScope fscope(masm, SSE2); | 898 CpuFeatureScope fscope(masm, SSE2); |
| 899 __ movdbl(xmm0, | 899 __ movsd(xmm0, |
| 900 FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize)); | 900 FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize)); |
| 901 __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0); | 901 __ movsd(FieldOperand(edx, HeapNumber::kValueOffset), xmm0); |
| 902 } else { | 902 } else { |
| 903 __ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize)); | 903 __ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize)); |
| 904 __ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi); | 904 __ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi); |
| 905 __ mov(esi, FieldOperand(edi, ebx, times_4, offset)); | 905 __ mov(esi, FieldOperand(edi, ebx, times_4, offset)); |
| 906 __ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi); | 906 __ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi); |
| 907 } | 907 } |
| 908 __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx); | 908 __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx); |
| 909 __ mov(esi, ebx); | 909 __ mov(esi, ebx); |
| 910 __ RecordWriteArray(eax, | 910 __ RecordWriteArray(eax, |
| 911 edx, | 911 edx, |
| (...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1071 Register temp1, | 1071 Register temp1, |
| 1072 Register temp2) { | 1072 Register temp2) { |
| 1073 ASSERT(!input.is(double_scratch)); | 1073 ASSERT(!input.is(double_scratch)); |
| 1074 ASSERT(!input.is(result)); | 1074 ASSERT(!input.is(result)); |
| 1075 ASSERT(!result.is(double_scratch)); | 1075 ASSERT(!result.is(double_scratch)); |
| 1076 ASSERT(!temp1.is(temp2)); | 1076 ASSERT(!temp1.is(temp2)); |
| 1077 ASSERT(ExternalReference::math_exp_constants(0).address() != NULL); | 1077 ASSERT(ExternalReference::math_exp_constants(0).address() != NULL); |
| 1078 | 1078 |
| 1079 Label done; | 1079 Label done; |
| 1080 | 1080 |
| 1081 __ movdbl(double_scratch, ExpConstant(0)); | 1081 __ movsd(double_scratch, ExpConstant(0)); |
| 1082 __ xorpd(result, result); | 1082 __ xorpd(result, result); |
| 1083 __ ucomisd(double_scratch, input); | 1083 __ ucomisd(double_scratch, input); |
| 1084 __ j(above_equal, &done); | 1084 __ j(above_equal, &done); |
| 1085 __ ucomisd(input, ExpConstant(1)); | 1085 __ ucomisd(input, ExpConstant(1)); |
| 1086 __ movdbl(result, ExpConstant(2)); | 1086 __ movsd(result, ExpConstant(2)); |
| 1087 __ j(above_equal, &done); | 1087 __ j(above_equal, &done); |
| 1088 __ movdbl(double_scratch, ExpConstant(3)); | 1088 __ movsd(double_scratch, ExpConstant(3)); |
| 1089 __ movdbl(result, ExpConstant(4)); | 1089 __ movsd(result, ExpConstant(4)); |
| 1090 __ mulsd(double_scratch, input); | 1090 __ mulsd(double_scratch, input); |
| 1091 __ addsd(double_scratch, result); | 1091 __ addsd(double_scratch, result); |
| 1092 __ movd(temp2, double_scratch); | 1092 __ movd(temp2, double_scratch); |
| 1093 __ subsd(double_scratch, result); | 1093 __ subsd(double_scratch, result); |
| 1094 __ movdbl(result, ExpConstant(6)); | 1094 __ movsd(result, ExpConstant(6)); |
| 1095 __ mulsd(double_scratch, ExpConstant(5)); | 1095 __ mulsd(double_scratch, ExpConstant(5)); |
| 1096 __ subsd(double_scratch, input); | 1096 __ subsd(double_scratch, input); |
| 1097 __ subsd(result, double_scratch); | 1097 __ subsd(result, double_scratch); |
| 1098 __ movsd(input, double_scratch); | 1098 __ movsd(input, double_scratch); |
| 1099 __ mulsd(input, double_scratch); | 1099 __ mulsd(input, double_scratch); |
| 1100 __ mulsd(result, input); | 1100 __ mulsd(result, input); |
| 1101 __ mov(temp1, temp2); | 1101 __ mov(temp1, temp2); |
| 1102 __ mulsd(result, ExpConstant(7)); | 1102 __ mulsd(result, ExpConstant(7)); |
| 1103 __ subsd(result, double_scratch); | 1103 __ subsd(result, double_scratch); |
| 1104 __ add(temp1, Immediate(0x1ff800)); | 1104 __ add(temp1, Immediate(0x1ff800)); |
| 1105 __ addsd(result, ExpConstant(8)); | 1105 __ addsd(result, ExpConstant(8)); |
| 1106 __ and_(temp2, Immediate(0x7ff)); | 1106 __ and_(temp2, Immediate(0x7ff)); |
| 1107 __ shr(temp1, 11); | 1107 __ shr(temp1, 11); |
| 1108 __ shl(temp1, 20); | 1108 __ shl(temp1, 20); |
| 1109 __ movd(input, temp1); | 1109 __ movd(input, temp1); |
| 1110 __ pshufd(input, input, static_cast<uint8_t>(0xe1)); // Order: 11 10 00 01 | 1110 __ pshufd(input, input, static_cast<uint8_t>(0xe1)); // Order: 11 10 00 01 |
| 1111 __ movdbl(double_scratch, Operand::StaticArray( | 1111 __ movsd(double_scratch, Operand::StaticArray( |
| 1112 temp2, times_8, ExternalReference::math_exp_log_table())); | 1112 temp2, times_8, ExternalReference::math_exp_log_table())); |
| 1113 __ por(input, double_scratch); | 1113 __ por(input, double_scratch); |
| 1114 __ mulsd(result, input); | 1114 __ mulsd(result, input); |
| 1115 __ bind(&done); | 1115 __ bind(&done); |
| 1116 } | 1116 } |
| 1117 | 1117 |
| 1118 #undef __ | 1118 #undef __ |
| 1119 | 1119 |
| 1120 static const int kNoCodeAgeSequenceLength = 5; | 1120 static const int kNoCodeAgeSequenceLength = 5; |
| 1121 | 1121 |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1175 Code* stub = GetCodeAgeStub(isolate, age, parity); | 1175 Code* stub = GetCodeAgeStub(isolate, age, parity); |
| 1176 CodePatcher patcher(sequence, young_length); | 1176 CodePatcher patcher(sequence, young_length); |
| 1177 patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32); | 1177 patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32); |
| 1178 } | 1178 } |
| 1179 } | 1179 } |
| 1180 | 1180 |
| 1181 | 1181 |
| 1182 } } // namespace v8::internal | 1182 } } // namespace v8::internal |
| 1183 | 1183 |
| 1184 #endif // V8_TARGET_ARCH_IA32 | 1184 #endif // V8_TARGET_ARCH_IA32 |
| OLD | NEW |