OLD | NEW |
---|---|
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 2455 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2466 const ParameterCount& actual, | 2466 const ParameterCount& actual, |
2467 InvokeFlag flag, | 2467 InvokeFlag flag, |
2468 const CallWrapper& call_wrapper) { | 2468 const CallWrapper& call_wrapper) { |
2469 // Contract with called JS functions requires that function is passed in x1. | 2469 // Contract with called JS functions requires that function is passed in x1. |
2470 // (See FullCodeGenerator::Generate().) | 2470 // (See FullCodeGenerator::Generate().) |
2471 __ LoadObject(x1, function); | 2471 __ LoadObject(x1, function); |
2472 InvokeFunction(x1, expected, actual, flag, call_wrapper); | 2472 InvokeFunction(x1, expected, actual, flag, call_wrapper); |
2473 } | 2473 } |
2474 | 2474 |
2475 | 2475 |
2476 void MacroAssembler::ECMA262ToInt32(Register result, | 2476 void MacroAssembler::TryInlineTruncateDoubleToI(Register result, |
2477 DoubleRegister input, | 2477 DoubleRegister double_input, |
2478 Register scratch1, | 2478 Label* done) { |
2479 Register scratch2, | |
2480 ECMA262ToInt32Result format) { | |
2481 ASSERT(!AreAliased(result, scratch1, scratch2)); | |
2482 ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits()); | |
2483 STATIC_ASSERT(kSmiTag == 0); | 2479 STATIC_ASSERT(kSmiTag == 0); |
2484 STATIC_ASSERT(kSmiValueSize == 32); | 2480 STATIC_ASSERT(kSmiValueSize == 32); |
2485 | 2481 |
2486 Label done, tag, manual_conversion; | 2482 // Try to convert with a FPU convert instruction. It's trivial to compute |
2487 | 2483 // the modulo operation on an integer register so we convert to a 64-bit |
2488 // 1. Try to convert with a FPU convert instruction. It's trivial to compute | 2484 // integer, then find the 32-bit result from that. |
2489 // the modulo operation on an integer register so we convert to a 64-bit | |
2490 // integer, then find the 32-bit result from that. | |
2491 // | 2485 // |
2492 // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) | 2486 // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) |
2493 // when the double is out of range. NaNs and infinities will be converted to 0 | 2487 // when the double is out of range. NaNs and infinities will be converted to 0 |
2494 // (as ECMA-262 requires). | 2488 // (as ECMA-262 requires). |
2495 Fcvtzs(result, input); | 2489 Fcvtzs(result, double_input); |
2496 | 2490 |
2497 // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not | 2491 // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not |
2498 // representable using a double, so if the result is one of those then we know | 2492 // representable using a double, so if the result is one of those then we know |
2499 // that saturation occured, and we need to manually handle the conversion. | 2493 // that saturation occured, and we need to manually handle the conversion. |
2500 // | 2494 // |
2501 // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting | 2495 // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting |
2502 // 1 will cause signed overflow. | 2496 // 1 will cause signed overflow. |
2503 Cmp(result, 1); | 2497 Cmp(result, 1); |
2504 Ccmp(result, -1, VFlag, vc); | 2498 Ccmp(result, -1, VFlag, vc); |
2505 B(vc, &tag); | |
2506 | 2499 |
2507 // 2. Manually convert the input to an int32. | 2500 B(vc, done); |
2508 Fmov(result, input); | |
2509 | |
2510 // Extract the exponent. | |
2511 Register exponent = scratch1; | |
2512 Ubfx(exponent, result, HeapNumber::kMantissaBits, HeapNumber::kExponentBits); | |
2513 | |
2514 // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since | |
2515 // the mantissa gets shifted completely out of the int32_t result. | |
2516 Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32); | |
2517 CzeroX(result, ge); | |
2518 B(ge, &done); | |
2519 | |
2520 // The Fcvtzs sequence handles all cases except where the conversion causes | |
2521 // signed overflow in the int64_t target. Since we've already handled | |
2522 // exponents >= 84, we can guarantee that 63 <= exponent < 84. | |
2523 | |
2524 if (emit_debug_code()) { | |
2525 Cmp(exponent, HeapNumber::kExponentBias + 63); | |
2526 // Exponents less than this should have been handled by the Fcvt case. | |
2527 Check(ge, kUnexpectedValue); | |
2528 } | |
2529 | |
2530 // Isolate the mantissa bits, and set the implicit '1'. | |
2531 Register mantissa = scratch2; | |
2532 Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits); | |
2533 Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits); | |
2534 | |
2535 // Negate the mantissa if necessary. | |
2536 Tst(result, kXSignMask); | |
2537 Cneg(mantissa, mantissa, ne); | |
2538 | |
2539 // Shift the mantissa bits in the correct place. We know that we have to shift | |
2540 // it left here, because exponent >= 63 >= kMantissaBits. | |
2541 Sub(exponent, exponent, | |
2542 HeapNumber::kExponentBias + HeapNumber::kMantissaBits); | |
2543 Lsl(result, mantissa, exponent); | |
2544 | |
2545 Bind(&tag); | |
2546 switch (format) { | |
2547 case INT32_IN_W: | |
2548 // There is nothing to do; the upper 32 bits are undefined. | |
2549 if (emit_debug_code()) { | |
2550 __ Mov(scratch1, 0x55555555); | |
2551 __ Bfi(result, scratch1, 32, 32); | |
2552 } | |
2553 break; | |
2554 case INT32_IN_X: | |
2555 Sxtw(result, result); | |
2556 break; | |
2557 case SMI: | |
2558 SmiTag(result); | |
2559 break; | |
2560 } | |
2561 | |
2562 Bind(&done); | |
2563 } | 2501 } |
2564 | 2502 |
2565 | 2503 |
2566 void MacroAssembler::HeapNumberECMA262ToInt32(Register result, | 2504 void MacroAssembler::TruncateDoubleToI(Register result, |
2567 Register heap_number, | 2505 DoubleRegister double_input) { |
2568 Register scratch1, | 2506 Label done; |
2569 Register scratch2, | 2507 ASSERT(jssp.Is(StackPointer())); |
2570 DoubleRegister double_scratch, | |
2571 ECMA262ToInt32Result format) { | |
2572 if (emit_debug_code()) { | |
2573 // Verify we indeed have a HeapNumber. | |
2574 Label ok; | |
2575 JumpIfHeapNumber(heap_number, &ok); | |
2576 Abort(kExpectedHeapNumber); | |
2577 Bind(&ok); | |
2578 } | |
2579 | 2508 |
2580 Ldr(double_scratch, FieldMemOperand(heap_number, HeapNumber::kValueOffset)); | 2509 TryInlineTruncateDoubleToI(result, double_input, &done); |
2581 ECMA262ToInt32(result, double_scratch, scratch1, scratch2, format); | 2510 |
2511 // If we fell through then inline version didn't succeed - call stub instead. | |
2512 Push(lr); | |
2513 Push(double_input); // Put input on stack. | |
2514 | |
2515 DoubleToIStub stub(jssp, result, 0, true, true); | |
jbramley
2014/02/13 14:20:14
Please use comments to label those bools (or make
| |
2516 CallStub(&stub); | |
2517 | |
2518 Drop(1, kDoubleSize); // Drop the double input on the stack. | |
2519 Pop(lr); | |
2520 | |
2521 Bind(&done); | |
2522 | |
2523 // TODO(rmcilroy): Remove this Sxtw once the following bug is fixed: | |
2524 // https://code.google.com/p/v8/issues/detail?id=3149 | |
2525 Sxtw(result, result); | |
jbramley
2014/02/13 14:20:14
Use Sxtw(result, result.W());
This enables a more
rmcilroy
2014/02/14 12:45:07
Done.
| |
2582 } | 2526 } |
2583 | 2527 |
2584 | 2528 |
2529 void MacroAssembler::TruncateHeapNumberToI(Register result, | |
2530 Register object) { | |
2531 Label done; | |
2532 ASSERT(!result.is(object)); | |
2533 ASSERT(jssp.Is(StackPointer())); | |
2534 | |
2535 Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset)); | |
2536 TryInlineTruncateDoubleToI(result, fp_scratch, &done); | |
2537 | |
2538 // If we fell through then inline version didn't succeed - call stub instead. | |
2539 Push(lr); | |
2540 DoubleToIStub stub(object, | |
2541 result, | |
2542 HeapNumber::kValueOffset - kHeapObjectTag, | |
2543 true, | |
2544 true); | |
2545 CallStub(&stub); | |
2546 Pop(lr); | |
2547 | |
2548 bind(&done); | |
jbramley
2014/02/13 14:20:14
Bind(&done);
rmcilroy
2014/02/14 12:45:07
Done.
| |
2549 | |
2550 // TODO(rmcilroy): Remove this Sxtw once the following bug is fixed: | |
2551 // https://code.google.com/p/v8/issues/detail?id=3149 | |
2552 Sxtw(result, result); | |
2553 } | |
2554 | |
2555 | |
2585 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) { | 2556 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) { |
2586 if (frame_mode == BUILD_STUB_FRAME) { | 2557 if (frame_mode == BUILD_STUB_FRAME) { |
2587 ASSERT(StackPointer().Is(jssp)); | 2558 ASSERT(StackPointer().Is(jssp)); |
2588 // TODO(jbramley): Does x1 contain a JSFunction here, or does it already | 2559 // TODO(jbramley): Does x1 contain a JSFunction here, or does it already |
2589 // have the special STUB smi? | 2560 // have the special STUB smi? |
2590 __ Mov(Tmp0(), Operand(Smi::FromInt(StackFrame::STUB))); | 2561 __ Mov(Tmp0(), Operand(Smi::FromInt(StackFrame::STUB))); |
2591 // Compiled stubs don't age, and so they don't need the predictable code | 2562 // Compiled stubs don't age, and so they don't need the predictable code |
2592 // ageing sequence. | 2563 // ageing sequence. |
2593 __ Push(lr, fp, cp, Tmp0()); | 2564 __ Push(lr, fp, cp, Tmp0()); |
2594 __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); | 2565 __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); |
(...skipping 2202 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4797 } | 4768 } |
4798 } | 4769 } |
4799 | 4770 |
4800 | 4771 |
4801 #undef __ | 4772 #undef __ |
4802 | 4773 |
4803 | 4774 |
4804 } } // namespace v8::internal | 4775 } } // namespace v8::internal |
4805 | 4776 |
4806 #endif // V8_TARGET_ARCH_A64 | 4777 #endif // V8_TARGET_ARCH_A64 |
OLD | NEW |