Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(68)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 6250126: ARM: Add support for and, or and xor to the type recording binary op stub. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | src/arm/codegen-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 378 matching lines...) Expand 10 before | Expand all | Expand 10 after
389 // floating point registers VFP3 must be supported. If core registers are 389 // floating point registers VFP3 must be supported. If core registers are
390 // requested when VFP3 is supported d6 and d7 will still be scratched. If 390 // requested when VFP3 is supported d6 and d7 will still be scratched. If
391 // either r0 or r1 is not a number (not smi and not heap number object) the 391 // either r0 or r1 is not a number (not smi and not heap number object) the
392 // not_number label is jumped to with r0 and r1 intact. 392 // not_number label is jumped to with r0 and r1 intact.
393 static void LoadOperands(MacroAssembler* masm, 393 static void LoadOperands(MacroAssembler* masm,
394 FloatingPointHelper::Destination destination, 394 FloatingPointHelper::Destination destination,
395 Register heap_number_map, 395 Register heap_number_map,
396 Register scratch1, 396 Register scratch1,
397 Register scratch2, 397 Register scratch2,
398 Label* not_number); 398 Label* not_number);
399
400 // Loads the number from object into dst as a 32-bit integer if possible. If
401 // the object is not a 32-bit integer control continues at the label
402 // not_int32. If VFP is supported double_scratch is used but not scratch2.
403 static void LoadNumberAsInteger(MacroAssembler* masm,
404 Register object,
405 Register dst,
406 Register heap_number_map,
407 Register scratch1,
408 Register scratch2,
409 DwVfpRegister double_scratch,
410 Label* not_int32);
411
399 private: 412 private:
400 static void LoadNumber(MacroAssembler* masm, 413 static void LoadNumber(MacroAssembler* masm,
401 FloatingPointHelper::Destination destination, 414 FloatingPointHelper::Destination destination,
402 Register object, 415 Register object,
403 DwVfpRegister dst, 416 DwVfpRegister dst,
404 Register dst1, 417 Register dst1,
405 Register dst2, 418 Register dst2,
406 Register heap_number_map, 419 Register heap_number_map,
407 Register scratch1, 420 Register scratch1,
408 Register scratch2, 421 Register scratch2,
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
454 LoadNumber(masm, destination, 467 LoadNumber(masm, destination,
455 r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow); 468 r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
456 469
457 // Load left operand (r1) to d7 or r0/r1. 470 // Load left operand (r1) to d7 or r0/r1.
458 LoadNumber(masm, destination, 471 LoadNumber(masm, destination,
459 r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow); 472 r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
460 } 473 }
461 474
462 475
463 void FloatingPointHelper::LoadNumber(MacroAssembler* masm, 476 void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
464 Destination destination, 477 Destination destination,
465 Register object, 478 Register object,
466 DwVfpRegister dst, 479 DwVfpRegister dst,
467 Register dst1, 480 Register dst1,
468 Register dst2, 481 Register dst2,
469 Register heap_number_map, 482 Register heap_number_map,
470 Register scratch1, 483 Register scratch1,
471 Register scratch2, 484 Register scratch2,
472 Label* not_number) { 485 Label* not_number) {
486 if (FLAG_debug_code) {
487 __ AbortIfNotRootValue(heap_number_map,
488 Heap::kHeapNumberMapRootIndex,
489 "HeapNumberMap register clobbered.");
490 }
491
473 Label is_smi, done; 492 Label is_smi, done;
474 493
475 __ JumpIfSmi(object, &is_smi); 494 __ JumpIfSmi(object, &is_smi);
476 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); 495 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
477 496
478 // Handle loading a double from a heap number. 497 // Handle loading a double from a heap number.
479 if (CpuFeatures::IsSupported(VFP3) && destination == kVFPRegisters) { 498 if (CpuFeatures::IsSupported(VFP3) && destination == kVFPRegisters) {
480 CpuFeatures::Scope scope(VFP3); 499 CpuFeatures::Scope scope(VFP3);
481 // Load the double from tagged HeapNumber to double register. 500 // Load the double from tagged HeapNumber to double register.
482 __ sub(scratch1, object, Operand(kHeapObjectTag)); 501 __ sub(scratch1, object, Operand(kHeapObjectTag));
(...skipping 24 matching lines...) Expand all
507 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); 526 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
508 __ push(lr); 527 __ push(lr);
509 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); 528 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
510 __ pop(lr); 529 __ pop(lr);
511 } 530 }
512 531
513 __ bind(&done); 532 __ bind(&done);
514 } 533 }
515 534
516 535
536 void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
537 Register object,
538 Register dst,
539 Register heap_number_map,
540 Register scratch1,
541 Register scratch2,
542 DwVfpRegister double_scratch,
543 Label* not_number) {
Karl Klose 2011/02/04 10:19:23 not_number -> not_int32?
Søren Thygesen Gjesse 2011/02/04 10:40:51 Done.
544 if (FLAG_debug_code) {
545 __ AbortIfNotRootValue(heap_number_map,
546 Heap::kHeapNumberMapRootIndex,
547 "HeapNumberMap register clobbered.");
548 }
549 Label is_smi, done;
550 __ JumpIfSmi(object, &is_smi);
551 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
552 __ cmp(scratch1, heap_number_map);
553 __ b(ne, not_number);
554 __ ConvertToInt32(
555 object, dst, scratch1, scratch2, double_scratch, not_number);
556 __ jmp(&done);
557 __ bind(&is_smi);
558 __ SmiUntag(dst, object);
559 __ bind(&done);
560 }
561
562
563
517 // See comment for class. 564 // See comment for class.
518 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { 565 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
519 Label max_negative_int; 566 Label max_negative_int;
520 // the_int_ has the answer which is a signed int32 but not a Smi. 567 // the_int_ has the answer which is a signed int32 but not a Smi.
521 // We test for the special value that has a different exponent. This test 568 // We test for the special value that has a different exponent. This test
522 // has the neat side effect of setting the flags according to the sign. 569 // has the neat side effect of setting the flags according to the sign.
523 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); 570 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
524 __ cmp(the_int_, Operand(0x80000000u)); 571 __ cmp(the_int_, Operand(0x80000000u));
525 __ b(eq, &max_negative_int); 572 __ b(eq, &max_negative_int);
526 // Set up the correct exponent in scratch_. All non-Smi int32s have the same. 573 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
(...skipping 1142 matching lines...) Expand 10 before | Expand all | Expand 10 after
1669 Label done_checking_rhs, done_checking_lhs; 1716 Label done_checking_rhs, done_checking_lhs;
1670 1717
1671 Register heap_number_map = r6; 1718 Register heap_number_map = r6;
1672 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 1719 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1673 1720
1674 __ tst(lhs, Operand(kSmiTagMask)); 1721 __ tst(lhs, Operand(kSmiTagMask));
1675 __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number. 1722 __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number.
1676 __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset)); 1723 __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
1677 __ cmp(r4, heap_number_map); 1724 __ cmp(r4, heap_number_map);
1678 __ b(ne, &slow); 1725 __ b(ne, &slow);
1679 __ ConvertToInt32(lhs, r3, r5, r4, &slow); 1726 __ ConvertToInt32(lhs, r3, r5, r4, d0, &slow);
1680 __ jmp(&done_checking_lhs); 1727 __ jmp(&done_checking_lhs);
1681 __ bind(&lhs_is_smi); 1728 __ bind(&lhs_is_smi);
1682 __ mov(r3, Operand(lhs, ASR, 1)); 1729 __ mov(r3, Operand(lhs, ASR, 1));
1683 __ bind(&done_checking_lhs); 1730 __ bind(&done_checking_lhs);
1684 1731
1685 __ tst(rhs, Operand(kSmiTagMask)); 1732 __ tst(rhs, Operand(kSmiTagMask));
1686 __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number. 1733 __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number.
1687 __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset)); 1734 __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
1688 __ cmp(r4, heap_number_map); 1735 __ cmp(r4, heap_number_map);
1689 __ b(ne, &slow); 1736 __ b(ne, &slow);
1690 __ ConvertToInt32(rhs, r2, r5, r4, &slow); 1737 __ ConvertToInt32(rhs, r2, r5, r4, d0, &slow);
1691 __ jmp(&done_checking_rhs); 1738 __ jmp(&done_checking_rhs);
1692 __ bind(&rhs_is_smi); 1739 __ bind(&rhs_is_smi);
1693 __ mov(r2, Operand(rhs, ASR, 1)); 1740 __ mov(r2, Operand(rhs, ASR, 1));
1694 __ bind(&done_checking_rhs); 1741 __ bind(&done_checking_rhs);
1695 1742
1696 ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)))); 1743 ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))));
1697 1744
1698 // r0 and r1: Original operands (Smi or heap numbers). 1745 // r0 and r1: Original operands (Smi or heap numbers).
1699 // r2 and r3: Signed int32 operands. 1746 // r2 and r3: Signed int32 operands.
1700 switch (op_) { 1747 switch (op_) {
(...skipping 821 matching lines...) Expand 10 before | Expand all | Expand 10 after
2522 __ tst(scratch1, Operand(0x80000000u | kSmiTagMask)); 2569 __ tst(scratch1, Operand(0x80000000u | kSmiTagMask));
2523 __ b(ne, &not_smi_result); 2570 __ b(ne, &not_smi_result);
2524 2571
2525 // Check for power of two on the right hand side. 2572 // Check for power of two on the right hand side.
2526 __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result); 2573 __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
2527 2574
2528 // Perform modulus by masking. 2575 // Perform modulus by masking.
2529 __ and_(right, left, Operand(scratch1)); 2576 __ and_(right, left, Operand(scratch1));
2530 __ Ret(); 2577 __ Ret();
2531 break; 2578 break;
2579 case Token::BIT_OR:
2580 __ orr(right, left, Operand(right));
2581 __ Ret();
2582 break;
2583 case Token::BIT_AND:
2584 __ and_(right, left, Operand(right));
2585 __ Ret();
2586 break;
2587 case Token::BIT_XOR:
2588 __ eor(right, left, Operand(right));
2589 __ Ret();
2590 break;
2532 default: 2591 default:
2533 UNREACHABLE(); 2592 UNREACHABLE();
2534 } 2593 }
2535 __ bind(&not_smi_result); 2594 __ bind(&not_smi_result);
2536 } 2595 }
2537 2596
2538 2597
2539 void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, 2598 void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2540 bool smi_operands, 2599 bool smi_operands,
2541 Label* not_numbers, 2600 Label* not_numbers,
2542 Label* gc_required) { 2601 Label* gc_required) {
2543 Register left = r1; 2602 Register left = r1;
2544 Register right = r0; 2603 Register right = r0;
2545 Register scratch1 = r7; 2604 Register scratch1 = r7;
2546 Register scratch2 = r9; 2605 Register scratch2 = r9;
2547 2606
2548 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 depending 2607 ASSERT(smi_operands || (not_numbers != NULL));
2549 // on whether VFP3 is available. 2608 if (smi_operands && FLAG_debug_code) {
2550 FloatingPointHelper::Destination destination = 2609 __ AbortIfNotSmi(left);
2551 CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ? 2610 __ AbortIfNotSmi(right);
2552 FloatingPointHelper::kVFPRegisters : 2611 }
2553 FloatingPointHelper::kCoreRegisters;
2554 2612
2555 Register heap_number_map = r6; 2613 Register heap_number_map = r6;
2556 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 2614 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2557 2615
2558 // Allocate new heap number for result. 2616 switch (op_) {
2559 Register result = r5; 2617 case Token::ADD:
2560 __ AllocateHeapNumber( 2618 case Token::SUB:
2561 result, scratch1, scratch2, heap_number_map, gc_required); 2619 case Token::MUL:
2620 case Token::DIV:
2621 case Token::MOD: {
2622 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
2623 // depending on whether VFP3 is available or not.
2624 FloatingPointHelper::Destination destination =
2625 CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ?
2626 FloatingPointHelper::kVFPRegisters :
2627 FloatingPointHelper::kCoreRegisters;
2562 2628
2563 // Load the operands. 2629 // Allocate new heap number for result.
2564 if (smi_operands) { 2630 Register result = r5;
2565 if (FLAG_debug_code) { 2631 __ AllocateHeapNumber(
2566 __ AbortIfNotSmi(left); 2632 result, scratch1, scratch2, heap_number_map, gc_required);
2567 __ AbortIfNotSmi(right); 2633
2634 // Load the operands.
2635 if (smi_operands) {
2636 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2637 } else {
2638 FloatingPointHelper::LoadOperands(masm,
2639 destination,
2640 heap_number_map,
2641 scratch1,
2642 scratch2,
2643 not_numbers);
2644 }
2645
2646 // Calculate the result.
2647 if (destination == FloatingPointHelper::kVFPRegisters) {
2648 // Using VFP registers:
2649 // d6: Left value
2650 // d7: Right value
2651 CpuFeatures::Scope scope(VFP3);
2652 switch (op_) {
2653 case Token::ADD:
2654 __ vadd(d5, d6, d7);
2655 break;
2656 case Token::SUB:
2657 __ vsub(d5, d6, d7);
2658 break;
2659 case Token::MUL:
2660 __ vmul(d5, d6, d7);
2661 break;
2662 case Token::DIV:
2663 __ vdiv(d5, d6, d7);
2664 break;
2665 default:
2666 UNREACHABLE();
2667 }
2668
2669 __ sub(r0, result, Operand(kHeapObjectTag));
2670 __ vstr(d5, r0, HeapNumber::kValueOffset);
2671 __ add(r0, r0, Operand(kHeapObjectTag));
2672 __ Ret();
2673 } else {
2674 // Using core registers:
2675 // r0: Left value (least significant part of mantissa).
2676 // r1: Left value (sign, exponent, top of mantissa).
2677 // r2: Right value (least significant part of mantissa).
2678 // r3: Right value (sign, exponent, top of mantissa).
2679
2680 __ push(lr); // For later.
Karl Klose 2011/02/04 10:19:23 Where is it used later?
Søren Thygesen Gjesse 2011/02/04 10:40:51 There is a pop(pc) below to return. I have updated
2681 __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
2682 // Call C routine that may not cause GC or other trouble. r5 is callee
2683 // save.
2684 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
2685 // Store answer in the overwritable heap number.
2686 #if !defined(USE_ARM_EABI)
2687 // Double returned in fp coprocessor register 0 and 1, encoded as
2688 // register cr8. Offsets must be divisible by 4 for coprocessor so we
2689 // need to substract the tag from r5.
2690 __ sub(scratch1, result, Operand(kHeapObjectTag));
2691 __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
2692 #else
2693 // Double returned in registers 0 and 1.
2694 __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset));
2695 #endif
2696 __ mov(r0, Operand(result));
2697 // And we are done.
2698 __ pop(pc);
2699 }
2700 break;
2568 } 2701 }
2569 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); 2702 case Token::BIT_OR:
2570 } else { 2703 case Token::BIT_XOR:
2571 FloatingPointHelper::LoadOperands(masm, 2704 case Token::BIT_AND: {
2572 destination, 2705 if (smi_operands) {
2573 heap_number_map, 2706 __ SmiUntag(r3, left);
2574 scratch1, 2707 __ SmiUntag(r2, right);
2575 scratch2, 2708 } else {
2576 not_numbers); 2709 // Convert operands to 32-bit integers. Right in r2 and left in r3.
2577 } 2710 FloatingPointHelper::LoadNumberAsInteger(masm,
2711 left,
2712 r3,
2713 heap_number_map,
2714 scratch1,
2715 scratch2,
2716 d0,
2717 not_numbers);
2718 FloatingPointHelper::LoadNumberAsInteger(masm,
2719 right,
2720 r2,
2721 heap_number_map,
2722 scratch1,
2723 scratch2,
2724 d0,
2725 not_numbers);
2726 }
2727 switch (op_) {
2728 case Token::BIT_OR:
2729 __ orr(r2, r3, Operand(r2));
2730 break;
2731 case Token::BIT_XOR:
2732 __ eor(r2, r3, Operand(r2));
2733 break;
2734 case Token::BIT_AND:
2735 __ and_(r2, r3, Operand(r2));
2736 break;
2737 default:
2738 UNREACHABLE();
2739 }
2578 2740
2579 // Calculate the result. 2741 Label result_not_a_smi;
2580 if (destination == FloatingPointHelper::kVFPRegisters) { 2742 // Check that the *signed* result fits in a smi.
2581 // Using VFP registers: 2743 __ add(r3, r2, Operand(0x40000000), SetCC);
2582 // d6: Left value 2744 __ b(mi, &result_not_a_smi);
2583 // d7: Right value 2745 __ SmiTag(r0, r2);
2584 CpuFeatures::Scope scope(VFP3); 2746 __ Ret();
2585 switch (op_) { 2747
2586 case Token::ADD: 2748 // Allocate new heap number for result.
2587 __ vadd(d5, d6, d7); 2749 __ bind(&result_not_a_smi);
2588 break; 2750 __ AllocateHeapNumber(
2589 case Token::SUB: 2751 r5, scratch1, scratch2, heap_number_map, gc_required);
2590 __ vsub(d5, d6, d7); 2752
2591 break; 2753 // r2: Answer as signed int32.
2592 case Token::MUL: 2754 // r5: Heap number to write answer into.
2593 __ vmul(d5, d6, d7); 2755
2594 break; 2756 // Nothing can go wrong now, so move the heap number to r0, which is the
2595 case Token::DIV: 2757 // result.
2596 __ vdiv(d5, d6, d7); 2758 __ mov(r0, Operand(r5));
2597 break; 2759
2598 default: 2760 if (CpuFeatures::IsSupported(VFP3)) {
2599 UNREACHABLE(); 2761 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
2762 CpuFeatures::Scope scope(VFP3);
2763 __ vmov(s0, r2);
2764 __ vcvt_f64_s32(d0, s0);
2765 __ sub(r3, r0, Operand(kHeapObjectTag));
2766 __ vstr(d0, r3, HeapNumber::kValueOffset);
2767 __ Ret();
2768 } else {
2769 // Tail call that writes the int32 in r2 to the heap number in r0, using
2770 // r3 as scratch. r0 is preserved and returned.
2771 WriteInt32ToHeapNumberStub stub(r2, r0, r3);
2772 __ TailCallStub(&stub);
2773 }
2774 break;
2600 } 2775 }
2601 2776 default:
2602 __ sub(r0, result, Operand(kHeapObjectTag)); 2777 UNREACHABLE();
2603 __ vstr(d5, r0, HeapNumber::kValueOffset);
2604 __ add(r0, r0, Operand(kHeapObjectTag));
2605 __ Ret();
2606 } else {
2607 // Using core registers:
2608 // r0: Left value (least significant part of mantissa).
2609 // r1: Left value (sign, exponent, top of mantissa).
2610 // r2: Right value (least significant part of mantissa).
2611 // r3: Right value (sign, exponent, top of mantissa).
2612
2613 __ push(lr); // For later.
2614 __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
2615 // Call C routine that may not cause GC or other trouble. r5 is callee
2616 // save.
2617 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
2618 // Store answer in the overwritable heap number.
2619 #if !defined(USE_ARM_EABI)
2620 // Double returned in fp coprocessor register 0 and 1, encoded as
2621 // register cr8. Offsets must be divisible by 4 for coprocessor so we
2622 // need to substract the tag from r5.
2623 __ sub(scratch1, result, Operand(kHeapObjectTag));
2624 __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
2625 #else
2626 // Double returned in registers 0 and 1.
2627 __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset));
2628 #endif
2629 __ mov(r0, Operand(result));
2630 // And we are done.
2631 __ pop(pc);
2632 } 2778 }
2633 } 2779 }
2634 2780
2635 2781
2636 // Generate the smi code. If the operation on smis are successful this return is 2782 // Generate the smi code. If the operation on smis are successful this return is
2637 // generated. If the result is not a smi and heap number allocation is not 2783 // generated. If the result is not a smi and heap number allocation is not
2638 // requested the code falls through. If number allocation is requested but a 2784 // requested the code falls through. If number allocation is requested but a
2639 // heap number cannot be allocated the code jumps to the lable gc_required. 2785 // heap number cannot be allocated the code jumps to the lable gc_required.
2640 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, 2786 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
2641 Label* gc_required, 2787 Label* gc_required,
2642 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { 2788 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
2643 Label not_smis; 2789 Label not_smis;
2644 2790
2645 ASSERT(op_ == Token::ADD || 2791 ASSERT(op_ == Token::ADD ||
2646 op_ == Token::SUB || 2792 op_ == Token::SUB ||
2647 op_ == Token::MUL || 2793 op_ == Token::MUL ||
2648 op_ == Token::DIV || 2794 op_ == Token::DIV ||
2649 op_ == Token::MOD); 2795 op_ == Token::MOD ||
2796 op_ == Token::BIT_OR ||
2797 op_ == Token::BIT_AND ||
2798 op_ == Token::BIT_XOR);
2650 2799
2651 Register left = r1; 2800 Register left = r1;
2652 Register right = r0; 2801 Register right = r0;
2653 Register scratch1 = r7; 2802 Register scratch1 = r7;
2654 Register scratch2 = r9; 2803 Register scratch2 = r9;
2655 2804
2656 // Perform combined smi check on both operands. 2805 // Perform combined smi check on both operands.
2657 __ orr(scratch1, left, Operand(right)); 2806 __ orr(scratch1, left, Operand(right));
2658 STATIC_ASSERT(kSmiTag == 0); 2807 STATIC_ASSERT(kSmiTag == 0);
2659 __ tst(scratch1, Operand(kSmiTagMask)); 2808 __ tst(scratch1, Operand(kSmiTagMask));
(...skipping 11 matching lines...) Expand all
2671 } 2820 }
2672 2821
2673 2822
2674 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { 2823 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2675 Label not_smis, call_runtime; 2824 Label not_smis, call_runtime;
2676 2825
2677 ASSERT(op_ == Token::ADD || 2826 ASSERT(op_ == Token::ADD ||
2678 op_ == Token::SUB || 2827 op_ == Token::SUB ||
2679 op_ == Token::MUL || 2828 op_ == Token::MUL ||
2680 op_ == Token::DIV || 2829 op_ == Token::DIV ||
2681 op_ == Token::MOD); 2830 op_ == Token::MOD ||
2831 op_ == Token::BIT_OR ||
2832 op_ == Token::BIT_AND ||
2833 op_ == Token::BIT_XOR);
2682 2834
2683 if (result_type_ == TRBinaryOpIC::UNINITIALIZED || 2835 if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
2684 result_type_ == TRBinaryOpIC::SMI) { 2836 result_type_ == TRBinaryOpIC::SMI) {
2685 // Only allow smi results. 2837 // Only allow smi results.
2686 GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS); 2838 GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
2687 } else { 2839 } else {
2688 // Allow heap number result and don't make a transition if a heap number 2840 // Allow heap number result and don't make a transition if a heap number
2689 // cannot be allocated. 2841 // cannot be allocated.
2690 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); 2842 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
2691 } 2843 }
(...skipping 15 matching lines...) Expand all
2707 GenerateAddStrings(masm); 2859 GenerateAddStrings(masm);
2708 GenerateTypeTransition(masm); 2860 GenerateTypeTransition(masm);
2709 } 2861 }
2710 2862
2711 2863
2712 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { 2864 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2713 ASSERT(op_ == Token::ADD || 2865 ASSERT(op_ == Token::ADD ||
2714 op_ == Token::SUB || 2866 op_ == Token::SUB ||
2715 op_ == Token::MUL || 2867 op_ == Token::MUL ||
2716 op_ == Token::DIV || 2868 op_ == Token::DIV ||
2717 op_ == Token::MOD); 2869 op_ == Token::MOD ||
2870 op_ == Token::BIT_OR ||
2871 op_ == Token::BIT_AND ||
2872 op_ == Token::BIT_XOR);
2718 2873
2719 ASSERT(operands_type_ == TRBinaryOpIC::INT32); 2874 ASSERT(operands_type_ == TRBinaryOpIC::INT32);
2720 2875
2721 GenerateTypeTransition(masm); 2876 GenerateTypeTransition(masm);
2722 } 2877 }
2723 2878
2724 2879
2725 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { 2880 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2726 ASSERT(op_ == Token::ADD || 2881 ASSERT(op_ == Token::ADD ||
2727 op_ == Token::SUB || 2882 op_ == Token::SUB ||
2728 op_ == Token::MUL || 2883 op_ == Token::MUL ||
2729 op_ == Token::DIV || 2884 op_ == Token::DIV ||
2730 op_ == Token::MOD); 2885 op_ == Token::MOD ||
2886 op_ == Token::BIT_OR ||
2887 op_ == Token::BIT_AND ||
2888 op_ == Token::BIT_XOR);
2731 2889
2732 Label not_numbers, call_runtime; 2890 Label not_numbers, call_runtime;
2733 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER); 2891 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
2734 2892
2735 GenerateFPOperation(masm, false, &not_numbers, &call_runtime); 2893 GenerateFPOperation(masm, false, &not_numbers, &call_runtime);
2736 2894
2737 __ bind(&not_numbers); 2895 __ bind(&not_numbers);
2738 GenerateTypeTransition(masm); 2896 GenerateTypeTransition(masm);
2739 2897
2740 __ bind(&call_runtime); 2898 __ bind(&call_runtime);
2741 GenerateCallRuntime(masm); 2899 GenerateCallRuntime(masm);
2742 } 2900 }
2743 2901
2744 2902
2745 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { 2903 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
2746 ASSERT(op_ == Token::ADD || 2904 ASSERT(op_ == Token::ADD ||
2747 op_ == Token::SUB || 2905 op_ == Token::SUB ||
2748 op_ == Token::MUL || 2906 op_ == Token::MUL ||
2749 op_ == Token::DIV || 2907 op_ == Token::DIV ||
2750 op_ == Token::MOD); 2908 op_ == Token::MOD ||
2909 op_ == Token::BIT_OR ||
2910 op_ == Token::BIT_AND ||
2911 op_ == Token::BIT_XOR);
2751 2912
2752 Label call_runtime; 2913 Label call_runtime;
2753 2914
2754 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); 2915 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
2755 2916
2756 // If all else fails, use the runtime system to get the correct 2917 // If all else fails, use the runtime system to get the correct
2757 // result. 2918 // result.
2758 __ bind(&call_runtime); 2919 __ bind(&call_runtime);
2759 2920
2760 // Try to add strings before calling runtime. 2921 // Try to add strings before calling runtime.
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
2805 break; 2966 break;
2806 case Token::MUL: 2967 case Token::MUL:
2807 __ InvokeBuiltin(Builtins::MUL, JUMP_JS); 2968 __ InvokeBuiltin(Builtins::MUL, JUMP_JS);
2808 break; 2969 break;
2809 case Token::DIV: 2970 case Token::DIV:
2810 __ InvokeBuiltin(Builtins::DIV, JUMP_JS); 2971 __ InvokeBuiltin(Builtins::DIV, JUMP_JS);
2811 break; 2972 break;
2812 case Token::MOD: 2973 case Token::MOD:
2813 __ InvokeBuiltin(Builtins::MOD, JUMP_JS); 2974 __ InvokeBuiltin(Builtins::MOD, JUMP_JS);
2814 break; 2975 break;
2976 case Token::BIT_OR:
2977 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
2978 break;
2979 case Token::BIT_AND:
2980 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
2981 break;
2982 case Token::BIT_XOR:
2983 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
2984 break;
2815 default: 2985 default:
2816 UNREACHABLE(); 2986 UNREACHABLE();
2817 } 2987 }
2818 } 2988 }
2819 2989
2820 2990
2821 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation( 2991 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
2822 MacroAssembler* masm, 2992 MacroAssembler* masm,
2823 Register result, 2993 Register result,
2824 Register heap_number_map, 2994 Register heap_number_map,
(...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after
3030 __ Assert(ne, "Unexpected smi operand."); 3200 __ Assert(ne, "Unexpected smi operand.");
3031 } 3201 }
3032 3202
3033 // Check if the operand is a heap number. 3203 // Check if the operand is a heap number.
3034 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); 3204 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
3035 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 3205 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3036 __ cmp(r1, heap_number_map); 3206 __ cmp(r1, heap_number_map);
3037 __ b(ne, &slow); 3207 __ b(ne, &slow);
3038 3208
3039 // Convert the heap number is r0 to an untagged integer in r1. 3209 // Convert the heap number is r0 to an untagged integer in r1.
3040 __ ConvertToInt32(r0, r1, r2, r3, &slow); 3210 __ ConvertToInt32(r0, r1, r2, r3, d0, &slow);
3041 3211
3042 // Do the bitwise operation (move negated) and check if the result 3212 // Do the bitwise operation (move negated) and check if the result
3043 // fits in a smi. 3213 // fits in a smi.
3044 Label try_float; 3214 Label try_float;
3045 __ mvn(r1, Operand(r1)); 3215 __ mvn(r1, Operand(r1));
3046 __ add(r2, r1, Operand(0x40000000), SetCC); 3216 __ add(r2, r1, Operand(0x40000000), SetCC);
3047 __ b(mi, &try_float); 3217 __ b(mi, &try_float);
3048 __ mov(r0, Operand(r1, LSL, kSmiTagSize)); 3218 __ mov(r0, Operand(r1, LSL, kSmiTagSize));
3049 __ b(&done); 3219 __ b(&done);
3050 3220
(...skipping 2748 matching lines...) Expand 10 before | Expand all | Expand 10 after
5799 __ SmiTag(r0, scratch1); 5969 __ SmiTag(r0, scratch1);
5800 __ Ret(); 5970 __ Ret();
5801 } 5971 }
5802 5972
5803 5973
5804 #undef __ 5974 #undef __
5805 5975
5806 } } // namespace v8::internal 5976 } } // namespace v8::internal
5807 5977
5808 #endif // V8_TARGET_ARCH_ARM 5978 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « no previous file | src/arm/codegen-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698