Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(467)

Side by Side Diff: src/a64/macro-assembler-a64.cc

Issue 160423002: A64 support for DoubleToIStub (truncating). (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Add test cases Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/a64/macro-assembler-a64.h ('k') | src/arm/code-stubs-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 2455 matching lines...) Expand 10 before | Expand all | Expand 10 after
2466 const ParameterCount& actual, 2466 const ParameterCount& actual,
2467 InvokeFlag flag, 2467 InvokeFlag flag,
2468 const CallWrapper& call_wrapper) { 2468 const CallWrapper& call_wrapper) {
2469 // Contract with called JS functions requires that function is passed in x1. 2469 // Contract with called JS functions requires that function is passed in x1.
2470 // (See FullCodeGenerator::Generate().) 2470 // (See FullCodeGenerator::Generate().)
2471 __ LoadObject(x1, function); 2471 __ LoadObject(x1, function);
2472 InvokeFunction(x1, expected, actual, flag, call_wrapper); 2472 InvokeFunction(x1, expected, actual, flag, call_wrapper);
2473 } 2473 }
2474 2474
2475 2475
2476 void MacroAssembler::ECMA262ToInt32(Register result, 2476 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2477 DoubleRegister input, 2477 DoubleRegister double_input,
2478 Register scratch1, 2478 Label* done) {
2479 Register scratch2,
2480 ECMA262ToInt32Result format) {
2481 ASSERT(!AreAliased(result, scratch1, scratch2));
2482 ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
2483 STATIC_ASSERT(kSmiTag == 0); 2479 STATIC_ASSERT(kSmiTag == 0);
2484 STATIC_ASSERT(kSmiValueSize == 32); 2480 STATIC_ASSERT(kSmiValueSize == 32);
2485 2481
2486 Label done, tag, manual_conversion; 2482 // Try to convert with a FPU convert instruction. It's trivial to compute
2487 2483 // the modulo operation on an integer register so we convert to a 64-bit
2488 // 1. Try to convert with a FPU convert instruction. It's trivial to compute 2484 // integer, then find the 32-bit result from that.
2489 // the modulo operation on an integer register so we convert to a 64-bit
2490 // integer, then find the 32-bit result from that.
2491 // 2485 //
2492 // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) 2486 // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
2493 // when the double is out of range. NaNs and infinities will be converted to 0 2487 // when the double is out of range. NaNs and infinities will be converted to 0
2494 // (as ECMA-262 requires). 2488 // (as ECMA-262 requires).
2495 Fcvtzs(result, input); 2489 Fcvtzs(result, double_input);
2496 2490
2497 // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not 2491 // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
2498 // representable using a double, so if the result is one of those then we know 2492 // representable using a double, so if the result is one of those then we know
2499 // that saturation occured, and we need to manually handle the conversion. 2493 // that saturation occured, and we need to manually handle the conversion.
2500 // 2494 //
2501 // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting 2495 // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
2502 // 1 will cause signed overflow. 2496 // 1 will cause signed overflow.
2503 Cmp(result, 1); 2497 Cmp(result, 1);
2504 Ccmp(result, -1, VFlag, vc); 2498 Ccmp(result, -1, VFlag, vc);
2505 B(vc, &tag);
2506 2499
2507 // 2. Manually convert the input to an int32. 2500 B(vc, done);
2508 Fmov(result, input);
2509
2510 // Extract the exponent.
2511 Register exponent = scratch1;
2512 Ubfx(exponent, result, HeapNumber::kMantissaBits, HeapNumber::kExponentBits);
2513
2514 // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
2515 // the mantissa gets shifted completely out of the int32_t result.
2516 Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
2517 CzeroX(result, ge);
2518 B(ge, &done);
2519
2520 // The Fcvtzs sequence handles all cases except where the conversion causes
2521 // signed overflow in the int64_t target. Since we've already handled
2522 // exponents >= 84, we can guarantee that 63 <= exponent < 84.
2523
2524 if (emit_debug_code()) {
2525 Cmp(exponent, HeapNumber::kExponentBias + 63);
2526 // Exponents less than this should have been handled by the Fcvt case.
2527 Check(ge, kUnexpectedValue);
2528 }
2529
2530 // Isolate the mantissa bits, and set the implicit '1'.
2531 Register mantissa = scratch2;
2532 Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
2533 Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
2534
2535 // Negate the mantissa if necessary.
2536 Tst(result, kXSignMask);
2537 Cneg(mantissa, mantissa, ne);
2538
2539 // Shift the mantissa bits in the correct place. We know that we have to shift
2540 // it left here, because exponent >= 63 >= kMantissaBits.
2541 Sub(exponent, exponent,
2542 HeapNumber::kExponentBias + HeapNumber::kMantissaBits);
2543 Lsl(result, mantissa, exponent);
2544
2545 Bind(&tag);
2546 switch (format) {
2547 case INT32_IN_W:
2548 // There is nothing to do; the upper 32 bits are undefined.
2549 if (emit_debug_code()) {
2550 __ Mov(scratch1, 0x55555555);
2551 __ Bfi(result, scratch1, 32, 32);
2552 }
2553 break;
2554 case INT32_IN_X:
2555 Sxtw(result, result);
2556 break;
2557 case SMI:
2558 SmiTag(result);
2559 break;
2560 }
2561
2562 Bind(&done);
2563 } 2501 }
2564 2502
2565 2503
2566 void MacroAssembler::HeapNumberECMA262ToInt32(Register result, 2504 void MacroAssembler::TruncateDoubleToI(Register result,
2567 Register heap_number, 2505 DoubleRegister double_input) {
2568 Register scratch1, 2506 Label done;
2569 Register scratch2, 2507 ASSERT(jssp.Is(StackPointer()));
2570 DoubleRegister double_scratch,
2571 ECMA262ToInt32Result format) {
2572 if (emit_debug_code()) {
2573 // Verify we indeed have a HeapNumber.
2574 Label ok;
2575 JumpIfHeapNumber(heap_number, &ok);
2576 Abort(kExpectedHeapNumber);
2577 Bind(&ok);
2578 }
2579 2508
2580 Ldr(double_scratch, FieldMemOperand(heap_number, HeapNumber::kValueOffset)); 2509 TryInlineTruncateDoubleToI(result, double_input, &done);
2581 ECMA262ToInt32(result, double_scratch, scratch1, scratch2, format); 2510
2511 // If we fell through then inline version didn't succeed - call stub instead.
2512 Push(lr);
2513 Push(double_input); // Put input on stack.
2514
2515 DoubleToIStub stub(jssp,
2516 result,
2517 0,
2518 true, // is_truncating
2519 true); // skip_fastpath
2520 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
2521
2522 Drop(1, kDoubleSize); // Drop the double input on the stack.
2523 Pop(lr);
2524
2525 Bind(&done);
2526
2527 // TODO(rmcilroy): Remove this Sxtw once the following bug is fixed:
2528 // https://code.google.com/p/v8/issues/detail?id=3149
2529 Sxtw(result, result.W());
2582 } 2530 }
2583 2531
2584 2532
2533 void MacroAssembler::TruncateHeapNumberToI(Register result,
2534 Register object) {
2535 Label done;
2536 ASSERT(!result.is(object));
2537 ASSERT(jssp.Is(StackPointer()));
2538
2539 Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
2540 TryInlineTruncateDoubleToI(result, fp_scratch, &done);
2541
2542 // If we fell through then inline version didn't succeed - call stub instead.
2543 Push(lr);
2544 DoubleToIStub stub(object,
2545 result,
2546 HeapNumber::kValueOffset - kHeapObjectTag,
2547 true, // is_truncating
2548 true); // skip_fastpath
2549 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
2550 Pop(lr);
2551
2552 Bind(&done);
2553
2554 // TODO(rmcilroy): Remove this Sxtw once the following bug is fixed:
2555 // https://code.google.com/p/v8/issues/detail?id=3149
2556 Sxtw(result, result.W());
2557 }
2558
2559
2585 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) { 2560 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
2586 if (frame_mode == BUILD_STUB_FRAME) { 2561 if (frame_mode == BUILD_STUB_FRAME) {
2587 ASSERT(StackPointer().Is(jssp)); 2562 ASSERT(StackPointer().Is(jssp));
2588 // TODO(jbramley): Does x1 contain a JSFunction here, or does it already 2563 // TODO(jbramley): Does x1 contain a JSFunction here, or does it already
2589 // have the special STUB smi? 2564 // have the special STUB smi?
2590 __ Mov(Tmp0(), Operand(Smi::FromInt(StackFrame::STUB))); 2565 __ Mov(Tmp0(), Operand(Smi::FromInt(StackFrame::STUB)));
2591 // Compiled stubs don't age, and so they don't need the predictable code 2566 // Compiled stubs don't age, and so they don't need the predictable code
2592 // ageing sequence. 2567 // ageing sequence.
2593 __ Push(lr, fp, cp, Tmp0()); 2568 __ Push(lr, fp, cp, Tmp0());
2594 __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); 2569 __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
(...skipping 2202 matching lines...) Expand 10 before | Expand all | Expand 10 after
4797 } 4772 }
4798 } 4773 }
4799 4774
4800 4775
4801 #undef __ 4776 #undef __
4802 4777
4803 4778
4804 } } // namespace v8::internal 4779 } } // namespace v8::internal
4805 4780
4806 #endif // V8_TARGET_ARCH_A64 4781 #endif // V8_TARGET_ARCH_A64
OLDNEW
« no previous file with comments | « src/a64/macro-assembler-a64.h ('k') | src/arm/code-stubs-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698