Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(203)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 6250126: ARM: Add support for and, or and xor to the type recording binary op stub. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | src/arm/codegen-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 378 matching lines...) Expand 10 before | Expand all | Expand 10 after
389 // floating point registers VFP3 must be supported. If core registers are 389 // floating point registers VFP3 must be supported. If core registers are
390 // requested when VFP3 is supported d6 and d7 will still be scratched. If 390 // requested when VFP3 is supported d6 and d7 will still be scratched. If
391 // either r0 or r1 is not a number (not smi and not heap number object) the 391 // either r0 or r1 is not a number (not smi and not heap number object) the
392 // not_number label is jumped to with r0 and r1 intact. 392 // not_number label is jumped to with r0 and r1 intact.
393 static void LoadOperands(MacroAssembler* masm, 393 static void LoadOperands(MacroAssembler* masm,
394 FloatingPointHelper::Destination destination, 394 FloatingPointHelper::Destination destination,
395 Register heap_number_map, 395 Register heap_number_map,
396 Register scratch1, 396 Register scratch1,
397 Register scratch2, 397 Register scratch2,
398 Label* not_number); 398 Label* not_number);
399
400 // Loads the number from object into dst as a 32-bit integer if possible. If
401 // the object is not a 32-bit integer control continues at the label
402 // not_int32. If VFP is supported double_scratch is used but not scratch2.
403 static void LoadNumberAsInteger(MacroAssembler* masm,
404 Register object,
405 Register dst,
406 Register heap_number_map,
407 Register scratch1,
408 Register scratch2,
409 DwVfpRegister double_scratch,
410 Label* not_int32);
411
399 private: 412 private:
400 static void LoadNumber(MacroAssembler* masm, 413 static void LoadNumber(MacroAssembler* masm,
401 FloatingPointHelper::Destination destination, 414 FloatingPointHelper::Destination destination,
402 Register object, 415 Register object,
403 DwVfpRegister dst, 416 DwVfpRegister dst,
404 Register dst1, 417 Register dst1,
405 Register dst2, 418 Register dst2,
406 Register heap_number_map, 419 Register heap_number_map,
407 Register scratch1, 420 Register scratch1,
408 Register scratch2, 421 Register scratch2,
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
454 LoadNumber(masm, destination, 467 LoadNumber(masm, destination,
455 r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow); 468 r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
456 469
457 // Load left operand (r1) to d7 or r0/r1. 470 // Load left operand (r1) to d7 or r0/r1.
458 LoadNumber(masm, destination, 471 LoadNumber(masm, destination,
459 r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow); 472 r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
460 } 473 }
461 474
462 475
463 void FloatingPointHelper::LoadNumber(MacroAssembler* masm, 476 void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
464 Destination destination, 477 Destination destination,
465 Register object, 478 Register object,
466 DwVfpRegister dst, 479 DwVfpRegister dst,
467 Register dst1, 480 Register dst1,
468 Register dst2, 481 Register dst2,
469 Register heap_number_map, 482 Register heap_number_map,
470 Register scratch1, 483 Register scratch1,
471 Register scratch2, 484 Register scratch2,
472 Label* not_number) { 485 Label* not_number) {
486 if (FLAG_debug_code) {
487 __ AbortIfNotRootValue(heap_number_map,
488 Heap::kHeapNumberMapRootIndex,
489 "HeapNumberMap register clobbered.");
490 }
491
473 Label is_smi, done; 492 Label is_smi, done;
474 493
475 __ JumpIfSmi(object, &is_smi); 494 __ JumpIfSmi(object, &is_smi);
476 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); 495 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
477 496
478 // Handle loading a double from a heap number. 497 // Handle loading a double from a heap number.
479 if (CpuFeatures::IsSupported(VFP3) && destination == kVFPRegisters) { 498 if (CpuFeatures::IsSupported(VFP3) && destination == kVFPRegisters) {
480 CpuFeatures::Scope scope(VFP3); 499 CpuFeatures::Scope scope(VFP3);
481 // Load the double from tagged HeapNumber to double register. 500 // Load the double from tagged HeapNumber to double register.
482 __ sub(scratch1, object, Operand(kHeapObjectTag)); 501 __ sub(scratch1, object, Operand(kHeapObjectTag));
(...skipping 24 matching lines...) Expand all
507 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); 526 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
508 __ push(lr); 527 __ push(lr);
509 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); 528 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
510 __ pop(lr); 529 __ pop(lr);
511 } 530 }
512 531
513 __ bind(&done); 532 __ bind(&done);
514 } 533 }
515 534
516 535
536 void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
537 Register object,
538 Register dst,
539 Register heap_number_map,
540 Register scratch1,
541 Register scratch2,
542 DwVfpRegister double_scratch,
543 Label* not_int32) {
544 if (FLAG_debug_code) {
545 __ AbortIfNotRootValue(heap_number_map,
546 Heap::kHeapNumberMapRootIndex,
547 "HeapNumberMap register clobbered.");
548 }
549 Label is_smi, done;
550 __ JumpIfSmi(object, &is_smi);
551 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
552 __ cmp(scratch1, heap_number_map);
553 __ b(ne, not_int32);
554 __ ConvertToInt32(
555 object, dst, scratch1, scratch2, double_scratch, not_int32);
556 __ jmp(&done);
557 __ bind(&is_smi);
558 __ SmiUntag(dst, object);
559 __ bind(&done);
560 }
561
562
563
517 // See comment for class. 564 // See comment for class.
518 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { 565 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
519 Label max_negative_int; 566 Label max_negative_int;
520 // the_int_ has the answer which is a signed int32 but not a Smi. 567 // the_int_ has the answer which is a signed int32 but not a Smi.
521 // We test for the special value that has a different exponent. This test 568 // We test for the special value that has a different exponent. This test
522 // has the neat side effect of setting the flags according to the sign. 569 // has the neat side effect of setting the flags according to the sign.
523 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); 570 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
524 __ cmp(the_int_, Operand(0x80000000u)); 571 __ cmp(the_int_, Operand(0x80000000u));
525 __ b(eq, &max_negative_int); 572 __ b(eq, &max_negative_int);
526 // Set up the correct exponent in scratch_. All non-Smi int32s have the same. 573 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
(...skipping 1142 matching lines...) Expand 10 before | Expand all | Expand 10 after
1669 Label done_checking_rhs, done_checking_lhs; 1716 Label done_checking_rhs, done_checking_lhs;
1670 1717
1671 Register heap_number_map = r6; 1718 Register heap_number_map = r6;
1672 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 1719 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1673 1720
1674 __ tst(lhs, Operand(kSmiTagMask)); 1721 __ tst(lhs, Operand(kSmiTagMask));
1675 __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number. 1722 __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number.
1676 __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset)); 1723 __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
1677 __ cmp(r4, heap_number_map); 1724 __ cmp(r4, heap_number_map);
1678 __ b(ne, &slow); 1725 __ b(ne, &slow);
1679 __ ConvertToInt32(lhs, r3, r5, r4, &slow); 1726 __ ConvertToInt32(lhs, r3, r5, r4, d0, &slow);
1680 __ jmp(&done_checking_lhs); 1727 __ jmp(&done_checking_lhs);
1681 __ bind(&lhs_is_smi); 1728 __ bind(&lhs_is_smi);
1682 __ mov(r3, Operand(lhs, ASR, 1)); 1729 __ mov(r3, Operand(lhs, ASR, 1));
1683 __ bind(&done_checking_lhs); 1730 __ bind(&done_checking_lhs);
1684 1731
1685 __ tst(rhs, Operand(kSmiTagMask)); 1732 __ tst(rhs, Operand(kSmiTagMask));
1686 __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number. 1733 __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number.
1687 __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset)); 1734 __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
1688 __ cmp(r4, heap_number_map); 1735 __ cmp(r4, heap_number_map);
1689 __ b(ne, &slow); 1736 __ b(ne, &slow);
1690 __ ConvertToInt32(rhs, r2, r5, r4, &slow); 1737 __ ConvertToInt32(rhs, r2, r5, r4, d0, &slow);
1691 __ jmp(&done_checking_rhs); 1738 __ jmp(&done_checking_rhs);
1692 __ bind(&rhs_is_smi); 1739 __ bind(&rhs_is_smi);
1693 __ mov(r2, Operand(rhs, ASR, 1)); 1740 __ mov(r2, Operand(rhs, ASR, 1));
1694 __ bind(&done_checking_rhs); 1741 __ bind(&done_checking_rhs);
1695 1742
1696 ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)))); 1743 ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))));
1697 1744
1698 // r0 and r1: Original operands (Smi or heap numbers). 1745 // r0 and r1: Original operands (Smi or heap numbers).
1699 // r2 and r3: Signed int32 operands. 1746 // r2 and r3: Signed int32 operands.
1700 switch (op_) { 1747 switch (op_) {
(...skipping 821 matching lines...) Expand 10 before | Expand all | Expand 10 after
2522 __ tst(scratch1, Operand(0x80000000u | kSmiTagMask)); 2569 __ tst(scratch1, Operand(0x80000000u | kSmiTagMask));
2523 __ b(ne, &not_smi_result); 2570 __ b(ne, &not_smi_result);
2524 2571
2525 // Check for power of two on the right hand side. 2572 // Check for power of two on the right hand side.
2526 __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result); 2573 __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
2527 2574
2528 // Perform modulus by masking. 2575 // Perform modulus by masking.
2529 __ and_(right, left, Operand(scratch1)); 2576 __ and_(right, left, Operand(scratch1));
2530 __ Ret(); 2577 __ Ret();
2531 break; 2578 break;
2579 case Token::BIT_OR:
2580 __ orr(right, left, Operand(right));
2581 __ Ret();
2582 break;
2583 case Token::BIT_AND:
2584 __ and_(right, left, Operand(right));
2585 __ Ret();
2586 break;
2587 case Token::BIT_XOR:
2588 __ eor(right, left, Operand(right));
2589 __ Ret();
2590 break;
2532 default: 2591 default:
2533 UNREACHABLE(); 2592 UNREACHABLE();
2534 } 2593 }
2535 __ bind(&not_smi_result); 2594 __ bind(&not_smi_result);
2536 } 2595 }
2537 2596
2538 2597
2539 void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, 2598 void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2540 bool smi_operands, 2599 bool smi_operands,
2541 Label* not_numbers, 2600 Label* not_numbers,
2542 Label* gc_required) { 2601 Label* gc_required) {
2543 Register left = r1; 2602 Register left = r1;
2544 Register right = r0; 2603 Register right = r0;
2545 Register scratch1 = r7; 2604 Register scratch1 = r7;
2546 Register scratch2 = r9; 2605 Register scratch2 = r9;
2547 2606
2548 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 depending 2607 ASSERT(smi_operands || (not_numbers != NULL));
2549 // on whether VFP3 is available. 2608 if (smi_operands && FLAG_debug_code) {
2550 FloatingPointHelper::Destination destination = 2609 __ AbortIfNotSmi(left);
2551 CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ? 2610 __ AbortIfNotSmi(right);
2552 FloatingPointHelper::kVFPRegisters : 2611 }
2553 FloatingPointHelper::kCoreRegisters;
2554 2612
2555 Register heap_number_map = r6; 2613 Register heap_number_map = r6;
2556 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 2614 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2557 2615
2558 // Allocate new heap number for result. 2616 switch (op_) {
2559 Register result = r5; 2617 case Token::ADD:
2560 __ AllocateHeapNumber( 2618 case Token::SUB:
2561 result, scratch1, scratch2, heap_number_map, gc_required); 2619 case Token::MUL:
2620 case Token::DIV:
2621 case Token::MOD: {
2622 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
2623 // depending on whether VFP3 is available or not.
2624 FloatingPointHelper::Destination destination =
2625 CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ?
2626 FloatingPointHelper::kVFPRegisters :
2627 FloatingPointHelper::kCoreRegisters;
2562 2628
2563 // Load the operands. 2629 // Allocate new heap number for result.
2564 if (smi_operands) { 2630 Register result = r5;
2565 if (FLAG_debug_code) { 2631 __ AllocateHeapNumber(
2566 __ AbortIfNotSmi(left); 2632 result, scratch1, scratch2, heap_number_map, gc_required);
2567 __ AbortIfNotSmi(right); 2633
2634 // Load the operands.
2635 if (smi_operands) {
2636 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2637 } else {
2638 FloatingPointHelper::LoadOperands(masm,
2639 destination,
2640 heap_number_map,
2641 scratch1,
2642 scratch2,
2643 not_numbers);
2644 }
2645
2646 // Calculate the result.
2647 if (destination == FloatingPointHelper::kVFPRegisters) {
2648 // Using VFP registers:
2649 // d6: Left value
2650 // d7: Right value
2651 CpuFeatures::Scope scope(VFP3);
2652 switch (op_) {
2653 case Token::ADD:
2654 __ vadd(d5, d6, d7);
2655 break;
2656 case Token::SUB:
2657 __ vsub(d5, d6, d7);
2658 break;
2659 case Token::MUL:
2660 __ vmul(d5, d6, d7);
2661 break;
2662 case Token::DIV:
2663 __ vdiv(d5, d6, d7);
2664 break;
2665 default:
2666 UNREACHABLE();
2667 }
2668
2669 __ sub(r0, result, Operand(kHeapObjectTag));
2670 __ vstr(d5, r0, HeapNumber::kValueOffset);
2671 __ add(r0, r0, Operand(kHeapObjectTag));
2672 __ Ret();
2673 } else {
2674 // Using core registers:
2675 // r0: Left value (least significant part of mantissa).
2676 // r1: Left value (sign, exponent, top of mantissa).
2677 // r2: Right value (least significant part of mantissa).
2678 // r3: Right value (sign, exponent, top of mantissa).
2679
2680 // Push the current return address before the C call. Return will be
2681 // through pop(pc) below.
2682 __ push(lr);
2683 __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
2684 // Call C routine that may not cause GC or other trouble. r5 is callee
2685 // save.
2686 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
2687 // Store answer in the overwritable heap number.
2688 #if !defined(USE_ARM_EABI)
2689 // Double returned in fp coprocessor register 0 and 1, encoded as
2690 // register cr8. Offsets must be divisible by 4 for coprocessor so we
2691 // need to substract the tag from r5.
2692 __ sub(scratch1, result, Operand(kHeapObjectTag));
2693 __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
2694 #else
2695 // Double returned in registers 0 and 1.
2696 __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset));
2697 #endif
2698 // Plase result in r0 and return to the pushed return address.
2699 __ mov(r0, Operand(result));
2700 __ pop(pc);
2701 }
2702 break;
2568 } 2703 }
2569 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); 2704 case Token::BIT_OR:
2570 } else { 2705 case Token::BIT_XOR:
2571 FloatingPointHelper::LoadOperands(masm, 2706 case Token::BIT_AND: {
2572 destination, 2707 if (smi_operands) {
2573 heap_number_map, 2708 __ SmiUntag(r3, left);
2574 scratch1, 2709 __ SmiUntag(r2, right);
2575 scratch2, 2710 } else {
2576 not_numbers); 2711 // Convert operands to 32-bit integers. Right in r2 and left in r3.
2577 } 2712 FloatingPointHelper::LoadNumberAsInteger(masm,
2713 left,
2714 r3,
2715 heap_number_map,
2716 scratch1,
2717 scratch2,
2718 d0,
2719 not_numbers);
2720 FloatingPointHelper::LoadNumberAsInteger(masm,
2721 right,
2722 r2,
2723 heap_number_map,
2724 scratch1,
2725 scratch2,
2726 d0,
2727 not_numbers);
2728 }
2729 switch (op_) {
2730 case Token::BIT_OR:
2731 __ orr(r2, r3, Operand(r2));
2732 break;
2733 case Token::BIT_XOR:
2734 __ eor(r2, r3, Operand(r2));
2735 break;
2736 case Token::BIT_AND:
2737 __ and_(r2, r3, Operand(r2));
2738 break;
2739 default:
2740 UNREACHABLE();
2741 }
2578 2742
2579 // Calculate the result. 2743 Label result_not_a_smi;
2580 if (destination == FloatingPointHelper::kVFPRegisters) { 2744 // Check that the *signed* result fits in a smi.
2581 // Using VFP registers: 2745 __ add(r3, r2, Operand(0x40000000), SetCC);
2582 // d6: Left value 2746 __ b(mi, &result_not_a_smi);
2583 // d7: Right value 2747 __ SmiTag(r0, r2);
2584 CpuFeatures::Scope scope(VFP3); 2748 __ Ret();
2585 switch (op_) { 2749
2586 case Token::ADD: 2750 // Allocate new heap number for result.
2587 __ vadd(d5, d6, d7); 2751 __ bind(&result_not_a_smi);
2588 break; 2752 __ AllocateHeapNumber(
2589 case Token::SUB: 2753 r5, scratch1, scratch2, heap_number_map, gc_required);
2590 __ vsub(d5, d6, d7); 2754
2591 break; 2755 // r2: Answer as signed int32.
2592 case Token::MUL: 2756 // r5: Heap number to write answer into.
2593 __ vmul(d5, d6, d7); 2757
2594 break; 2758 // Nothing can go wrong now, so move the heap number to r0, which is the
2595 case Token::DIV: 2759 // result.
2596 __ vdiv(d5, d6, d7); 2760 __ mov(r0, Operand(r5));
2597 break; 2761
2598 default: 2762 if (CpuFeatures::IsSupported(VFP3)) {
2599 UNREACHABLE(); 2763 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
2764 CpuFeatures::Scope scope(VFP3);
2765 __ vmov(s0, r2);
2766 __ vcvt_f64_s32(d0, s0);
2767 __ sub(r3, r0, Operand(kHeapObjectTag));
2768 __ vstr(d0, r3, HeapNumber::kValueOffset);
2769 __ Ret();
2770 } else {
2771 // Tail call that writes the int32 in r2 to the heap number in r0, using
2772 // r3 as scratch. r0 is preserved and returned.
2773 WriteInt32ToHeapNumberStub stub(r2, r0, r3);
2774 __ TailCallStub(&stub);
2775 }
2776 break;
2600 } 2777 }
2601 2778 default:
2602 __ sub(r0, result, Operand(kHeapObjectTag)); 2779 UNREACHABLE();
2603 __ vstr(d5, r0, HeapNumber::kValueOffset);
2604 __ add(r0, r0, Operand(kHeapObjectTag));
2605 __ Ret();
2606 } else {
2607 // Using core registers:
2608 // r0: Left value (least significant part of mantissa).
2609 // r1: Left value (sign, exponent, top of mantissa).
2610 // r2: Right value (least significant part of mantissa).
2611 // r3: Right value (sign, exponent, top of mantissa).
2612
2613 __ push(lr); // For later.
2614 __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
2615 // Call C routine that may not cause GC or other trouble. r5 is callee
2616 // save.
2617 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
2618 // Store answer in the overwritable heap number.
2619 #if !defined(USE_ARM_EABI)
2620 // Double returned in fp coprocessor register 0 and 1, encoded as
2621 // register cr8. Offsets must be divisible by 4 for coprocessor so we
2622 // need to substract the tag from r5.
2623 __ sub(scratch1, result, Operand(kHeapObjectTag));
2624 __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
2625 #else
2626 // Double returned in registers 0 and 1.
2627 __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset));
2628 #endif
2629 __ mov(r0, Operand(result));
2630 // And we are done.
2631 __ pop(pc);
2632 } 2780 }
2633 } 2781 }
2634 2782
2635 2783
2636 // Generate the smi code. If the operation on smis are successful this return is 2784 // Generate the smi code. If the operation on smis are successful this return is
2637 // generated. If the result is not a smi and heap number allocation is not 2785 // generated. If the result is not a smi and heap number allocation is not
2638 // requested the code falls through. If number allocation is requested but a 2786 // requested the code falls through. If number allocation is requested but a
2639 // heap number cannot be allocated the code jumps to the lable gc_required. 2787 // heap number cannot be allocated the code jumps to the lable gc_required.
2640 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, 2788 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
2641 Label* gc_required, 2789 Label* gc_required,
2642 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { 2790 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
2643 Label not_smis; 2791 Label not_smis;
2644 2792
2645 ASSERT(op_ == Token::ADD || 2793 ASSERT(op_ == Token::ADD ||
2646 op_ == Token::SUB || 2794 op_ == Token::SUB ||
2647 op_ == Token::MUL || 2795 op_ == Token::MUL ||
2648 op_ == Token::DIV || 2796 op_ == Token::DIV ||
2649 op_ == Token::MOD); 2797 op_ == Token::MOD ||
2798 op_ == Token::BIT_OR ||
2799 op_ == Token::BIT_AND ||
2800 op_ == Token::BIT_XOR);
2650 2801
2651 Register left = r1; 2802 Register left = r1;
2652 Register right = r0; 2803 Register right = r0;
2653 Register scratch1 = r7; 2804 Register scratch1 = r7;
2654 Register scratch2 = r9; 2805 Register scratch2 = r9;
2655 2806
2656 // Perform combined smi check on both operands. 2807 // Perform combined smi check on both operands.
2657 __ orr(scratch1, left, Operand(right)); 2808 __ orr(scratch1, left, Operand(right));
2658 STATIC_ASSERT(kSmiTag == 0); 2809 STATIC_ASSERT(kSmiTag == 0);
2659 __ tst(scratch1, Operand(kSmiTagMask)); 2810 __ tst(scratch1, Operand(kSmiTagMask));
(...skipping 11 matching lines...) Expand all
2671 } 2822 }
2672 2823
2673 2824
2674 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { 2825 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2675 Label not_smis, call_runtime; 2826 Label not_smis, call_runtime;
2676 2827
2677 ASSERT(op_ == Token::ADD || 2828 ASSERT(op_ == Token::ADD ||
2678 op_ == Token::SUB || 2829 op_ == Token::SUB ||
2679 op_ == Token::MUL || 2830 op_ == Token::MUL ||
2680 op_ == Token::DIV || 2831 op_ == Token::DIV ||
2681 op_ == Token::MOD); 2832 op_ == Token::MOD ||
2833 op_ == Token::BIT_OR ||
2834 op_ == Token::BIT_AND ||
2835 op_ == Token::BIT_XOR);
2682 2836
2683 if (result_type_ == TRBinaryOpIC::UNINITIALIZED || 2837 if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
2684 result_type_ == TRBinaryOpIC::SMI) { 2838 result_type_ == TRBinaryOpIC::SMI) {
2685 // Only allow smi results. 2839 // Only allow smi results.
2686 GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS); 2840 GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
2687 } else { 2841 } else {
2688 // Allow heap number result and don't make a transition if a heap number 2842 // Allow heap number result and don't make a transition if a heap number
2689 // cannot be allocated. 2843 // cannot be allocated.
2690 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); 2844 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
2691 } 2845 }
(...skipping 15 matching lines...) Expand all
2707 GenerateAddStrings(masm); 2861 GenerateAddStrings(masm);
2708 GenerateTypeTransition(masm); 2862 GenerateTypeTransition(masm);
2709 } 2863 }
2710 2864
2711 2865
2712 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { 2866 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2713 ASSERT(op_ == Token::ADD || 2867 ASSERT(op_ == Token::ADD ||
2714 op_ == Token::SUB || 2868 op_ == Token::SUB ||
2715 op_ == Token::MUL || 2869 op_ == Token::MUL ||
2716 op_ == Token::DIV || 2870 op_ == Token::DIV ||
2717 op_ == Token::MOD); 2871 op_ == Token::MOD ||
2872 op_ == Token::BIT_OR ||
2873 op_ == Token::BIT_AND ||
2874 op_ == Token::BIT_XOR);
2718 2875
2719 ASSERT(operands_type_ == TRBinaryOpIC::INT32); 2876 ASSERT(operands_type_ == TRBinaryOpIC::INT32);
2720 2877
2721 GenerateTypeTransition(masm); 2878 GenerateTypeTransition(masm);
2722 } 2879 }
2723 2880
2724 2881
2725 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { 2882 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2726 ASSERT(op_ == Token::ADD || 2883 ASSERT(op_ == Token::ADD ||
2727 op_ == Token::SUB || 2884 op_ == Token::SUB ||
2728 op_ == Token::MUL || 2885 op_ == Token::MUL ||
2729 op_ == Token::DIV || 2886 op_ == Token::DIV ||
2730 op_ == Token::MOD); 2887 op_ == Token::MOD ||
2888 op_ == Token::BIT_OR ||
2889 op_ == Token::BIT_AND ||
2890 op_ == Token::BIT_XOR);
2731 2891
2732 Label not_numbers, call_runtime; 2892 Label not_numbers, call_runtime;
2733 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER); 2893 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
2734 2894
2735 GenerateFPOperation(masm, false, &not_numbers, &call_runtime); 2895 GenerateFPOperation(masm, false, &not_numbers, &call_runtime);
2736 2896
2737 __ bind(&not_numbers); 2897 __ bind(&not_numbers);
2738 GenerateTypeTransition(masm); 2898 GenerateTypeTransition(masm);
2739 2899
2740 __ bind(&call_runtime); 2900 __ bind(&call_runtime);
2741 GenerateCallRuntime(masm); 2901 GenerateCallRuntime(masm);
2742 } 2902 }
2743 2903
2744 2904
2745 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { 2905 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
2746 ASSERT(op_ == Token::ADD || 2906 ASSERT(op_ == Token::ADD ||
2747 op_ == Token::SUB || 2907 op_ == Token::SUB ||
2748 op_ == Token::MUL || 2908 op_ == Token::MUL ||
2749 op_ == Token::DIV || 2909 op_ == Token::DIV ||
2750 op_ == Token::MOD); 2910 op_ == Token::MOD ||
2911 op_ == Token::BIT_OR ||
2912 op_ == Token::BIT_AND ||
2913 op_ == Token::BIT_XOR);
2751 2914
2752 Label call_runtime; 2915 Label call_runtime;
2753 2916
2754 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); 2917 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
2755 2918
2756 // If all else fails, use the runtime system to get the correct 2919 // If all else fails, use the runtime system to get the correct
2757 // result. 2920 // result.
2758 __ bind(&call_runtime); 2921 __ bind(&call_runtime);
2759 2922
2760 // Try to add strings before calling runtime. 2923 // Try to add strings before calling runtime.
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
2805 break; 2968 break;
2806 case Token::MUL: 2969 case Token::MUL:
2807 __ InvokeBuiltin(Builtins::MUL, JUMP_JS); 2970 __ InvokeBuiltin(Builtins::MUL, JUMP_JS);
2808 break; 2971 break;
2809 case Token::DIV: 2972 case Token::DIV:
2810 __ InvokeBuiltin(Builtins::DIV, JUMP_JS); 2973 __ InvokeBuiltin(Builtins::DIV, JUMP_JS);
2811 break; 2974 break;
2812 case Token::MOD: 2975 case Token::MOD:
2813 __ InvokeBuiltin(Builtins::MOD, JUMP_JS); 2976 __ InvokeBuiltin(Builtins::MOD, JUMP_JS);
2814 break; 2977 break;
2978 case Token::BIT_OR:
2979 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
2980 break;
2981 case Token::BIT_AND:
2982 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
2983 break;
2984 case Token::BIT_XOR:
2985 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
2986 break;
2815 default: 2987 default:
2816 UNREACHABLE(); 2988 UNREACHABLE();
2817 } 2989 }
2818 } 2990 }
2819 2991
2820 2992
2821 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation( 2993 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
2822 MacroAssembler* masm, 2994 MacroAssembler* masm,
2823 Register result, 2995 Register result,
2824 Register heap_number_map, 2996 Register heap_number_map,
(...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after
3030 __ Assert(ne, "Unexpected smi operand."); 3202 __ Assert(ne, "Unexpected smi operand.");
3031 } 3203 }
3032 3204
3033 // Check if the operand is a heap number. 3205 // Check if the operand is a heap number.
3034 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); 3206 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
3035 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 3207 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3036 __ cmp(r1, heap_number_map); 3208 __ cmp(r1, heap_number_map);
3037 __ b(ne, &slow); 3209 __ b(ne, &slow);
3038 3210
3039 // Convert the heap number is r0 to an untagged integer in r1. 3211 // Convert the heap number is r0 to an untagged integer in r1.
3040 __ ConvertToInt32(r0, r1, r2, r3, &slow); 3212 __ ConvertToInt32(r0, r1, r2, r3, d0, &slow);
3041 3213
3042 // Do the bitwise operation (move negated) and check if the result 3214 // Do the bitwise operation (move negated) and check if the result
3043 // fits in a smi. 3215 // fits in a smi.
3044 Label try_float; 3216 Label try_float;
3045 __ mvn(r1, Operand(r1)); 3217 __ mvn(r1, Operand(r1));
3046 __ add(r2, r1, Operand(0x40000000), SetCC); 3218 __ add(r2, r1, Operand(0x40000000), SetCC);
3047 __ b(mi, &try_float); 3219 __ b(mi, &try_float);
3048 __ mov(r0, Operand(r1, LSL, kSmiTagSize)); 3220 __ mov(r0, Operand(r1, LSL, kSmiTagSize));
3049 __ b(&done); 3221 __ b(&done);
3050 3222
(...skipping 2748 matching lines...) Expand 10 before | Expand all | Expand 10 after
5799 __ SmiTag(r0, scratch1); 5971 __ SmiTag(r0, scratch1);
5800 __ Ret(); 5972 __ Ret();
5801 } 5973 }
5802 5974
5803 5975
5804 #undef __ 5976 #undef __
5805 5977
5806 } } // namespace v8::internal 5978 } } // namespace v8::internal
5807 5979
5808 #endif // V8_TARGET_ARCH_ARM 5980 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « no previous file | src/arm/codegen-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698