Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(65)

Side by Side Diff: src/ia32/code-stubs-ia32.cc

Issue 23654026: Use xorps to break the cvtsi2sd unnecessary dependence due to its partially written (Closed) Base URL: git://github.com/v8/v8.git@master
Patch Set: rebase to master and address comments Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | src/ia32/codegen-ia32.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 966 matching lines...) Expand 10 before | Expand all | Expand 10 after
977 // are about to return. 977 // are about to return.
978 if (op == Token::SHR) { 978 if (op == Token::SHR) {
979 __ mov(Operand(esp, 1 * kPointerSize), left); 979 __ mov(Operand(esp, 1 * kPointerSize), left);
980 __ mov(Operand(esp, 2 * kPointerSize), Immediate(0)); 980 __ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
981 __ fild_d(Operand(esp, 1 * kPointerSize)); 981 __ fild_d(Operand(esp, 1 * kPointerSize));
982 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 982 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
983 } else { 983 } else {
984 ASSERT_EQ(Token::SHL, op); 984 ASSERT_EQ(Token::SHL, op);
985 if (CpuFeatures::IsSupported(SSE2)) { 985 if (CpuFeatures::IsSupported(SSE2)) {
986 CpuFeatureScope use_sse2(masm, SSE2); 986 CpuFeatureScope use_sse2(masm, SSE2);
987 __ cvtsi2sd(xmm0, left); 987 __ Cvtsi2sd(xmm0, left);
988 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); 988 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
989 } else { 989 } else {
990 __ mov(Operand(esp, 1 * kPointerSize), left); 990 __ mov(Operand(esp, 1 * kPointerSize), left);
991 __ fild_s(Operand(esp, 1 * kPointerSize)); 991 __ fild_s(Operand(esp, 1 * kPointerSize));
992 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 992 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
993 } 993 }
994 } 994 }
995 __ ret(2 * kPointerSize); 995 __ ret(2 * kPointerSize);
996 break; 996 break;
997 } 997 }
(...skipping 365 matching lines...) Expand 10 before | Expand all | Expand 10 after
1363 // Fall through! 1363 // Fall through!
1364 case NO_OVERWRITE: 1364 case NO_OVERWRITE:
1365 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); 1365 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
1366 __ bind(&skip_allocation); 1366 __ bind(&skip_allocation);
1367 break; 1367 break;
1368 default: UNREACHABLE(); 1368 default: UNREACHABLE();
1369 } 1369 }
1370 // Store the result in the HeapNumber and return. 1370 // Store the result in the HeapNumber and return.
1371 if (CpuFeatures::IsSupported(SSE2)) { 1371 if (CpuFeatures::IsSupported(SSE2)) {
1372 CpuFeatureScope use_sse2(masm, SSE2); 1372 CpuFeatureScope use_sse2(masm, SSE2);
1373 __ cvtsi2sd(xmm0, ebx); 1373 __ Cvtsi2sd(xmm0, ebx);
1374 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); 1374 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1375 } else { 1375 } else {
1376 __ mov(Operand(esp, 1 * kPointerSize), ebx); 1376 __ mov(Operand(esp, 1 * kPointerSize), ebx);
1377 __ fild_s(Operand(esp, 1 * kPointerSize)); 1377 __ fild_s(Operand(esp, 1 * kPointerSize));
1378 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 1378 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1379 } 1379 }
1380 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack. 1380 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
1381 } 1381 }
1382 1382
1383 __ bind(&not_floats); 1383 __ bind(&not_floats);
(...skipping 203 matching lines...) Expand 10 before | Expand all | Expand 10 after
1587 // Fall through! 1587 // Fall through!
1588 case NO_OVERWRITE: 1588 case NO_OVERWRITE:
1589 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); 1589 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
1590 __ bind(&skip_allocation); 1590 __ bind(&skip_allocation);
1591 break; 1591 break;
1592 default: UNREACHABLE(); 1592 default: UNREACHABLE();
1593 } 1593 }
1594 // Store the result in the HeapNumber and return. 1594 // Store the result in the HeapNumber and return.
1595 if (CpuFeatures::IsSupported(SSE2)) { 1595 if (CpuFeatures::IsSupported(SSE2)) {
1596 CpuFeatureScope use_sse2(masm, SSE2); 1596 CpuFeatureScope use_sse2(masm, SSE2);
1597 __ cvtsi2sd(xmm0, ebx); 1597 __ Cvtsi2sd(xmm0, ebx);
1598 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); 1598 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1599 } else { 1599 } else {
1600 __ mov(Operand(esp, 1 * kPointerSize), ebx); 1600 __ mov(Operand(esp, 1 * kPointerSize), ebx);
1601 __ fild_s(Operand(esp, 1 * kPointerSize)); 1601 __ fild_s(Operand(esp, 1 * kPointerSize));
1602 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 1602 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1603 } 1603 }
1604 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack. 1604 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
1605 } 1605 }
1606 1606
1607 __ bind(&not_floats); 1607 __ bind(&not_floats);
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after
1775 // Fall through! 1775 // Fall through!
1776 case NO_OVERWRITE: 1776 case NO_OVERWRITE:
1777 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); 1777 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
1778 __ bind(&skip_allocation); 1778 __ bind(&skip_allocation);
1779 break; 1779 break;
1780 default: UNREACHABLE(); 1780 default: UNREACHABLE();
1781 } 1781 }
1782 // Store the result in the HeapNumber and return. 1782 // Store the result in the HeapNumber and return.
1783 if (CpuFeatures::IsSupported(SSE2)) { 1783 if (CpuFeatures::IsSupported(SSE2)) {
1784 CpuFeatureScope use_sse2(masm, SSE2); 1784 CpuFeatureScope use_sse2(masm, SSE2);
1785 __ cvtsi2sd(xmm0, ebx); 1785 __ Cvtsi2sd(xmm0, ebx);
1786 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); 1786 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1787 } else { 1787 } else {
1788 __ mov(Operand(esp, 1 * kPointerSize), ebx); 1788 __ mov(Operand(esp, 1 * kPointerSize), ebx);
1789 __ fild_s(Operand(esp, 1 * kPointerSize)); 1789 __ fild_s(Operand(esp, 1 * kPointerSize));
1790 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); 1790 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1791 } 1791 }
1792 __ ret(2 * kPointerSize); 1792 __ ret(2 * kPointerSize);
1793 } 1793 }
1794 break; 1794 break;
1795 } 1795 }
(...skipping 526 matching lines...) Expand 10 before | Expand all | Expand 10 after
2322 __ j(not_equal, not_numbers); // Argument in edx is not a number. 2322 __ j(not_equal, not_numbers); // Argument in edx is not a number.
2323 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); 2323 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
2324 __ bind(&load_eax); 2324 __ bind(&load_eax);
2325 // Load operand in eax into xmm1, or branch to not_numbers. 2325 // Load operand in eax into xmm1, or branch to not_numbers.
2326 __ JumpIfSmi(eax, &load_smi_eax, Label::kNear); 2326 __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
2327 __ cmp(FieldOperand(eax, HeapObject::kMapOffset), factory->heap_number_map()); 2327 __ cmp(FieldOperand(eax, HeapObject::kMapOffset), factory->heap_number_map());
2328 __ j(equal, &load_float_eax, Label::kNear); 2328 __ j(equal, &load_float_eax, Label::kNear);
2329 __ jmp(not_numbers); // Argument in eax is not a number. 2329 __ jmp(not_numbers); // Argument in eax is not a number.
2330 __ bind(&load_smi_edx); 2330 __ bind(&load_smi_edx);
2331 __ SmiUntag(edx); // Untag smi before converting to float. 2331 __ SmiUntag(edx); // Untag smi before converting to float.
2332 __ cvtsi2sd(xmm0, edx); 2332 __ Cvtsi2sd(xmm0, edx);
2333 __ SmiTag(edx); // Retag smi for heap number overwriting test. 2333 __ SmiTag(edx); // Retag smi for heap number overwriting test.
2334 __ jmp(&load_eax); 2334 __ jmp(&load_eax);
2335 __ bind(&load_smi_eax); 2335 __ bind(&load_smi_eax);
2336 __ SmiUntag(eax); // Untag smi before converting to float. 2336 __ SmiUntag(eax); // Untag smi before converting to float.
2337 __ cvtsi2sd(xmm1, eax); 2337 __ Cvtsi2sd(xmm1, eax);
2338 __ SmiTag(eax); // Retag smi for heap number overwriting test. 2338 __ SmiTag(eax); // Retag smi for heap number overwriting test.
2339 __ jmp(&done, Label::kNear); 2339 __ jmp(&done, Label::kNear);
2340 __ bind(&load_float_eax); 2340 __ bind(&load_float_eax);
2341 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); 2341 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2342 __ bind(&done); 2342 __ bind(&done);
2343 } 2343 }
2344 2344
2345 2345
2346 void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm, 2346 void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
2347 Register scratch) { 2347 Register scratch) {
2348 const Register left = edx; 2348 const Register left = edx;
2349 const Register right = eax; 2349 const Register right = eax;
2350 __ mov(scratch, left); 2350 __ mov(scratch, left);
2351 ASSERT(!scratch.is(right)); // We're about to clobber scratch. 2351 ASSERT(!scratch.is(right)); // We're about to clobber scratch.
2352 __ SmiUntag(scratch); 2352 __ SmiUntag(scratch);
2353 __ cvtsi2sd(xmm0, scratch); 2353 __ Cvtsi2sd(xmm0, scratch);
2354 2354
2355 __ mov(scratch, right); 2355 __ mov(scratch, right);
2356 __ SmiUntag(scratch); 2356 __ SmiUntag(scratch);
2357 __ cvtsi2sd(xmm1, scratch); 2357 __ Cvtsi2sd(xmm1, scratch);
2358 } 2358 }
2359 2359
2360 2360
2361 void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm, 2361 void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm,
2362 Label* non_int32, 2362 Label* non_int32,
2363 XMMRegister operand, 2363 XMMRegister operand,
2364 Register int32_result, 2364 Register int32_result,
2365 Register scratch, 2365 Register scratch,
2366 XMMRegister xmm_scratch) { 2366 XMMRegister xmm_scratch) {
2367 __ cvttsd2si(int32_result, Operand(operand)); 2367 __ cvttsd2si(int32_result, Operand(operand));
2368 __ cvtsi2sd(xmm_scratch, int32_result); 2368 __ Cvtsi2sd(xmm_scratch, int32_result);
2369 __ pcmpeqd(xmm_scratch, operand); 2369 __ pcmpeqd(xmm_scratch, operand);
2370 __ movmskps(scratch, xmm_scratch); 2370 __ movmskps(scratch, xmm_scratch);
2371 // Two least significant bits should be both set. 2371 // Two least significant bits should be both set.
2372 __ not_(scratch); 2372 __ not_(scratch);
2373 __ test(scratch, Immediate(3)); 2373 __ test(scratch, Immediate(3));
2374 __ j(not_zero, non_int32); 2374 __ j(not_zero, non_int32);
2375 } 2375 }
2376 2376
2377 2377
2378 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, 2378 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
2463 const Register scratch = ecx; 2463 const Register scratch = ecx;
2464 const XMMRegister double_result = xmm3; 2464 const XMMRegister double_result = xmm3;
2465 const XMMRegister double_base = xmm2; 2465 const XMMRegister double_base = xmm2;
2466 const XMMRegister double_exponent = xmm1; 2466 const XMMRegister double_exponent = xmm1;
2467 const XMMRegister double_scratch = xmm4; 2467 const XMMRegister double_scratch = xmm4;
2468 2468
2469 Label call_runtime, done, exponent_not_smi, int_exponent; 2469 Label call_runtime, done, exponent_not_smi, int_exponent;
2470 2470
2471 // Save 1 in double_result - we need this several times later on. 2471 // Save 1 in double_result - we need this several times later on.
2472 __ mov(scratch, Immediate(1)); 2472 __ mov(scratch, Immediate(1));
2473 __ cvtsi2sd(double_result, scratch); 2473 __ Cvtsi2sd(double_result, scratch);
2474 2474
2475 if (exponent_type_ == ON_STACK) { 2475 if (exponent_type_ == ON_STACK) {
2476 Label base_is_smi, unpack_exponent; 2476 Label base_is_smi, unpack_exponent;
2477 // The exponent and base are supplied as arguments on the stack. 2477 // The exponent and base are supplied as arguments on the stack.
2478 // This can only happen if the stub is called from non-optimized code. 2478 // This can only happen if the stub is called from non-optimized code.
2479 // Load input parameters from stack. 2479 // Load input parameters from stack.
2480 __ mov(base, Operand(esp, 2 * kPointerSize)); 2480 __ mov(base, Operand(esp, 2 * kPointerSize));
2481 __ mov(exponent, Operand(esp, 1 * kPointerSize)); 2481 __ mov(exponent, Operand(esp, 1 * kPointerSize));
2482 2482
2483 __ JumpIfSmi(base, &base_is_smi, Label::kNear); 2483 __ JumpIfSmi(base, &base_is_smi, Label::kNear);
2484 __ cmp(FieldOperand(base, HeapObject::kMapOffset), 2484 __ cmp(FieldOperand(base, HeapObject::kMapOffset),
2485 factory->heap_number_map()); 2485 factory->heap_number_map());
2486 __ j(not_equal, &call_runtime); 2486 __ j(not_equal, &call_runtime);
2487 2487
2488 __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset)); 2488 __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset));
2489 __ jmp(&unpack_exponent, Label::kNear); 2489 __ jmp(&unpack_exponent, Label::kNear);
2490 2490
2491 __ bind(&base_is_smi); 2491 __ bind(&base_is_smi);
2492 __ SmiUntag(base); 2492 __ SmiUntag(base);
2493 __ cvtsi2sd(double_base, base); 2493 __ Cvtsi2sd(double_base, base);
2494 2494
2495 __ bind(&unpack_exponent); 2495 __ bind(&unpack_exponent);
2496 __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear); 2496 __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
2497 __ SmiUntag(exponent); 2497 __ SmiUntag(exponent);
2498 __ jmp(&int_exponent); 2498 __ jmp(&int_exponent);
2499 2499
2500 __ bind(&exponent_not_smi); 2500 __ bind(&exponent_not_smi);
2501 __ cmp(FieldOperand(exponent, HeapObject::kMapOffset), 2501 __ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
2502 factory->heap_number_map()); 2502 factory->heap_number_map());
2503 __ j(not_equal, &call_runtime); 2503 __ j(not_equal, &call_runtime);
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after
2676 __ divsd(double_scratch2, double_result); 2676 __ divsd(double_scratch2, double_result);
2677 __ movsd(double_result, double_scratch2); 2677 __ movsd(double_result, double_scratch2);
2678 // Test whether result is zero. Bail out to check for subnormal result. 2678 // Test whether result is zero. Bail out to check for subnormal result.
2679 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. 2679 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
2680 __ xorps(double_scratch2, double_scratch2); 2680 __ xorps(double_scratch2, double_scratch2);
2681 __ ucomisd(double_scratch2, double_result); // Result cannot be NaN. 2681 __ ucomisd(double_scratch2, double_result); // Result cannot be NaN.
2682 // double_exponent aliased as double_scratch2 has already been overwritten 2682 // double_exponent aliased as double_scratch2 has already been overwritten
2683 // and may not have contained the exponent value in the first place when the 2683 // and may not have contained the exponent value in the first place when the
2684 // exponent is a smi. We reset it with exponent value before bailing out. 2684 // exponent is a smi. We reset it with exponent value before bailing out.
2685 __ j(not_equal, &done); 2685 __ j(not_equal, &done);
2686 __ cvtsi2sd(double_exponent, exponent); 2686 __ Cvtsi2sd(double_exponent, exponent);
2687 2687
2688 // Returning or bailing out. 2688 // Returning or bailing out.
2689 Counters* counters = masm->isolate()->counters(); 2689 Counters* counters = masm->isolate()->counters();
2690 if (exponent_type_ == ON_STACK) { 2690 if (exponent_type_ == ON_STACK) {
2691 // The arguments are still on the stack. 2691 // The arguments are still on the stack.
2692 __ bind(&call_runtime); 2692 __ bind(&call_runtime);
2693 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); 2693 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
2694 2694
2695 // The stub is called from non-optimized code, which expects the result 2695 // The stub is called from non-optimized code, which expects the result
2696 // as heap number in exponent. 2696 // as heap number in exponent.
(...skipping 3568 matching lines...) Expand 10 before | Expand all | Expand 10 after
6265 Label done, left, left_smi, right_smi; 6265 Label done, left, left_smi, right_smi;
6266 __ JumpIfSmi(eax, &right_smi, Label::kNear); 6266 __ JumpIfSmi(eax, &right_smi, Label::kNear);
6267 __ cmp(FieldOperand(eax, HeapObject::kMapOffset), 6267 __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
6268 masm->isolate()->factory()->heap_number_map()); 6268 masm->isolate()->factory()->heap_number_map());
6269 __ j(not_equal, &maybe_undefined1, Label::kNear); 6269 __ j(not_equal, &maybe_undefined1, Label::kNear);
6270 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); 6270 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
6271 __ jmp(&left, Label::kNear); 6271 __ jmp(&left, Label::kNear);
6272 __ bind(&right_smi); 6272 __ bind(&right_smi);
6273 __ mov(ecx, eax); // Can't clobber eax because we can still jump away. 6273 __ mov(ecx, eax); // Can't clobber eax because we can still jump away.
6274 __ SmiUntag(ecx); 6274 __ SmiUntag(ecx);
6275 __ cvtsi2sd(xmm1, ecx); 6275 __ Cvtsi2sd(xmm1, ecx);
6276 6276
6277 __ bind(&left); 6277 __ bind(&left);
6278 __ JumpIfSmi(edx, &left_smi, Label::kNear); 6278 __ JumpIfSmi(edx, &left_smi, Label::kNear);
6279 __ cmp(FieldOperand(edx, HeapObject::kMapOffset), 6279 __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
6280 masm->isolate()->factory()->heap_number_map()); 6280 masm->isolate()->factory()->heap_number_map());
6281 __ j(not_equal, &maybe_undefined2, Label::kNear); 6281 __ j(not_equal, &maybe_undefined2, Label::kNear);
6282 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); 6282 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
6283 __ jmp(&done); 6283 __ jmp(&done);
6284 __ bind(&left_smi); 6284 __ bind(&left_smi);
6285 __ mov(ecx, edx); // Can't clobber edx because we can still jump away. 6285 __ mov(ecx, edx); // Can't clobber edx because we can still jump away.
6286 __ SmiUntag(ecx); 6286 __ SmiUntag(ecx);
6287 __ cvtsi2sd(xmm0, ecx); 6287 __ Cvtsi2sd(xmm0, ecx);
6288 6288
6289 __ bind(&done); 6289 __ bind(&done);
6290 // Compare operands. 6290 // Compare operands.
6291 __ ucomisd(xmm0, xmm1); 6291 __ ucomisd(xmm0, xmm1);
6292 6292
6293 // Don't base result on EFLAGS when a NaN is involved. 6293 // Don't base result on EFLAGS when a NaN is involved.
6294 __ j(parity_even, &unordered, Label::kNear); 6294 __ j(parity_even, &unordered, Label::kNear);
6295 6295
6296 // Return a result of -1, 0, or 1, based on EFLAGS. 6296 // Return a result of -1, 0, or 1, based on EFLAGS.
6297 // Performing mov, because xor would destroy the flag register. 6297 // Performing mov, because xor would destroy the flag register.
(...skipping 1230 matching lines...) Expand 10 before | Expand all | Expand 10 after
7528 __ bind(&fast_elements_case); 7528 __ bind(&fast_elements_case);
7529 GenerateCase(masm, FAST_ELEMENTS); 7529 GenerateCase(masm, FAST_ELEMENTS);
7530 } 7530 }
7531 7531
7532 7532
7533 #undef __ 7533 #undef __
7534 7534
7535 } } // namespace v8::internal 7535 } } // namespace v8::internal
7536 7536
7537 #endif // V8_TARGET_ARCH_IA32 7537 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « no previous file | src/ia32/codegen-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698