Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(57)

Side by Side Diff: src/codegen-arm.cc

Issue 67163: Avoid a call to the runtime system when doing binary fp ops on ARM... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 11 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/codegen-arm.h ('k') | src/codegen-ia32.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. 1 // Copyright 2006-2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 20 matching lines...) Expand all
31 #include "codegen-inl.h" 31 #include "codegen-inl.h"
32 #include "debug.h" 32 #include "debug.h"
33 #include "parser.h" 33 #include "parser.h"
34 #include "register-allocator-inl.h" 34 #include "register-allocator-inl.h"
35 #include "runtime.h" 35 #include "runtime.h"
36 #include "scopes.h" 36 #include "scopes.h"
37 37
38 38
39 namespace v8 { namespace internal { 39 namespace v8 { namespace internal {
40 40
41 #define __ masm_-> 41 #define __ DEFINE_MASM(masm_)
42
42 43
43 // ------------------------------------------------------------------------- 44 // -------------------------------------------------------------------------
44 // CodeGenState implementation. 45 // CodeGenState implementation.
45 46
46 CodeGenState::CodeGenState(CodeGenerator* owner) 47 CodeGenState::CodeGenState(CodeGenerator* owner)
47 : owner_(owner), 48 : owner_(owner),
48 typeof_state_(NOT_INSIDE_TYPEOF), 49 typeof_state_(NOT_INSIDE_TYPEOF),
49 true_target_(NULL), 50 true_target_(NULL),
50 false_target_(NULL), 51 false_target_(NULL),
51 previous_(NULL) { 52 previous_(NULL) {
(...skipping 618 matching lines...) Expand 10 before | Expand all | Expand 10 after
670 671
671 private: 672 private:
672 Major MajorKey() { return SetProperty; } 673 Major MajorKey() { return SetProperty; }
673 int MinorKey() { return 0; } 674 int MinorKey() { return 0; }
674 void Generate(MacroAssembler* masm); 675 void Generate(MacroAssembler* masm);
675 }; 676 };
676 677
677 678
678 class GenericBinaryOpStub : public CodeStub { 679 class GenericBinaryOpStub : public CodeStub {
679 public: 680 public:
680 explicit GenericBinaryOpStub(Token::Value op) : op_(op) { } 681 GenericBinaryOpStub(Token::Value op,
682 OverwriteMode mode)
683 : op_(op), mode_(mode) { }
681 684
682 private: 685 private:
683 Token::Value op_; 686 Token::Value op_;
687 OverwriteMode mode_;
688
689 // Minor key encoding in 16 bits.
690 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
691 class OpBits: public BitField<Token::Value, 2, 14> {};
684 692
685 Major MajorKey() { return GenericBinaryOp; } 693 Major MajorKey() { return GenericBinaryOp; }
686 int MinorKey() { return static_cast<int>(op_); } 694 int MinorKey() {
695 // Encode the parameters in a unique 16 bit value.
696 return OpBits::encode(op_)
697 | ModeBits::encode(mode_);
698 }
699
687 void Generate(MacroAssembler* masm); 700 void Generate(MacroAssembler* masm);
688 701
689 const char* GetName() { 702 const char* GetName() {
690 switch (op_) { 703 switch (op_) {
691 case Token::ADD: return "GenericBinaryOpStub_ADD"; 704 case Token::ADD: return "GenericBinaryOpStub_ADD";
692 case Token::SUB: return "GenericBinaryOpStub_SUB"; 705 case Token::SUB: return "GenericBinaryOpStub_SUB";
693 case Token::MUL: return "GenericBinaryOpStub_MUL"; 706 case Token::MUL: return "GenericBinaryOpStub_MUL";
694 case Token::DIV: return "GenericBinaryOpStub_DIV"; 707 case Token::DIV: return "GenericBinaryOpStub_DIV";
695 case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR"; 708 case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
696 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND"; 709 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
697 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR"; 710 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
698 case Token::SAR: return "GenericBinaryOpStub_SAR"; 711 case Token::SAR: return "GenericBinaryOpStub_SAR";
699 case Token::SHL: return "GenericBinaryOpStub_SHL"; 712 case Token::SHL: return "GenericBinaryOpStub_SHL";
700 case Token::SHR: return "GenericBinaryOpStub_SHR"; 713 case Token::SHR: return "GenericBinaryOpStub_SHR";
701 default: return "GenericBinaryOpStub"; 714 default: return "GenericBinaryOpStub";
702 } 715 }
703 } 716 }
704 717
705 #ifdef DEBUG 718 #ifdef DEBUG
706 void Print() { PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_)); } 719 void Print() { PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_)); }
707 #endif 720 #endif
708 }; 721 };
709 722
710 723
711 void CodeGenerator::GenericBinaryOperation(Token::Value op) { 724 void CodeGenerator::GenericBinaryOperation(Token::Value op,
725 OverwriteMode overwrite_mode) {
712 VirtualFrame::SpilledScope spilled_scope(this); 726 VirtualFrame::SpilledScope spilled_scope(this);
713 // sp[0] : y 727 // sp[0] : y
714 // sp[1] : x 728 // sp[1] : x
715 // result : r0 729 // result : r0
716 730
717 // Stub is entered with a call: 'return address' is in lr. 731 // Stub is entered with a call: 'return address' is in lr.
718 switch (op) { 732 switch (op) {
719 case Token::ADD: // fall through. 733 case Token::ADD: // fall through.
720 case Token::SUB: // fall through. 734 case Token::SUB: // fall through.
721 case Token::MUL: 735 case Token::MUL:
722 case Token::BIT_OR: 736 case Token::BIT_OR:
723 case Token::BIT_AND: 737 case Token::BIT_AND:
724 case Token::BIT_XOR: 738 case Token::BIT_XOR:
725 case Token::SHL: 739 case Token::SHL:
726 case Token::SHR: 740 case Token::SHR:
727 case Token::SAR: { 741 case Token::SAR: {
728 frame_->EmitPop(r0); // r0 : y 742 frame_->EmitPop(r0); // r0 : y
729 frame_->EmitPop(r1); // r1 : x 743 frame_->EmitPop(r1); // r1 : x
730 GenericBinaryOpStub stub(op); 744 GenericBinaryOpStub stub(op, overwrite_mode);
731 frame_->CallStub(&stub, 0); 745 frame_->CallStub(&stub, 0);
732 break; 746 break;
733 } 747 }
734 748
735 case Token::DIV: { 749 case Token::DIV: {
736 Result arg_count = allocator_->Allocate(r0); 750 Result arg_count = allocator_->Allocate(r0);
737 ASSERT(arg_count.is_valid()); 751 ASSERT(arg_count.is_valid());
738 __ mov(arg_count.reg(), Operand(1)); 752 __ mov(arg_count.reg(), Operand(1));
739 frame_->InvokeBuiltin(Builtins::DIV, CALL_JS, &arg_count, 2); 753 frame_->InvokeBuiltin(Builtins::DIV, CALL_JS, &arg_count, 2);
740 break; 754 break;
(...skipping 19 matching lines...) Expand all
760 break; 774 break;
761 } 775 }
762 } 776 }
763 777
764 778
765 class DeferredInlineSmiOperation: public DeferredCode { 779 class DeferredInlineSmiOperation: public DeferredCode {
766 public: 780 public:
767 DeferredInlineSmiOperation(CodeGenerator* generator, 781 DeferredInlineSmiOperation(CodeGenerator* generator,
768 Token::Value op, 782 Token::Value op,
769 int value, 783 int value,
770 bool reversed) 784 bool reversed,
785 OverwriteMode overwrite_mode)
771 : DeferredCode(generator), 786 : DeferredCode(generator),
772 op_(op), 787 op_(op),
773 value_(value), 788 value_(value),
774 reversed_(reversed) { 789 reversed_(reversed),
790 overwrite_mode_(overwrite_mode) {
775 set_comment("[ DeferredInlinedSmiOperation"); 791 set_comment("[ DeferredInlinedSmiOperation");
776 } 792 }
777 793
778 virtual void Generate(); 794 virtual void Generate();
779 795
780 private: 796 private:
781 Token::Value op_; 797 Token::Value op_;
782 int value_; 798 int value_;
783 bool reversed_; 799 bool reversed_;
800 OverwriteMode overwrite_mode_;
784 }; 801 };
785 802
786 803
787 void DeferredInlineSmiOperation::Generate() { 804 void DeferredInlineSmiOperation::Generate() {
788 enter()->Bind(); 805 enter()->Bind();
789 VirtualFrame::SpilledScope spilled_scope(generator()); 806 VirtualFrame::SpilledScope spilled_scope(generator());
790 807
791 switch (op_) { 808 switch (op_) {
792 case Token::ADD: { 809 case Token::ADD: {
793 if (reversed_) { 810 if (reversed_) {
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
837 } 854 }
838 break; 855 break;
839 } 856 }
840 857
841 default: 858 default:
842 // other cases should have been handled before this point. 859 // other cases should have been handled before this point.
843 UNREACHABLE(); 860 UNREACHABLE();
844 break; 861 break;
845 } 862 }
846 863
847 GenericBinaryOpStub igostub(op_); 864 GenericBinaryOpStub igostub(op_, overwrite_mode_);
848 Result arg0 = generator()->allocator()->Allocate(r1); 865 Result arg0 = generator()->allocator()->Allocate(r1);
849 ASSERT(arg0.is_valid()); 866 ASSERT(arg0.is_valid());
850 Result arg1 = generator()->allocator()->Allocate(r0); 867 Result arg1 = generator()->allocator()->Allocate(r0);
851 ASSERT(arg1.is_valid()); 868 ASSERT(arg1.is_valid());
852 generator()->frame()->CallStub(&igostub, &arg0, &arg1); 869 generator()->frame()->CallStub(&igostub, &arg0, &arg1);
853 exit_.Jump(); 870 exit_.Jump();
854 } 871 }
855 872
856 873
857 void CodeGenerator::SmiOperation(Token::Value op, 874 void CodeGenerator::SmiOperation(Token::Value op,
858 Handle<Object> value, 875 Handle<Object> value,
859 bool reversed) { 876 bool reversed,
877 OverwriteMode mode) {
860 VirtualFrame::SpilledScope spilled_scope(this); 878 VirtualFrame::SpilledScope spilled_scope(this);
861 // NOTE: This is an attempt to inline (a bit) more of the code for 879 // NOTE: This is an attempt to inline (a bit) more of the code for
862 // some possible smi operations (like + and -) when (at least) one 880 // some possible smi operations (like + and -) when (at least) one
863 // of the operands is a literal smi. With this optimization, the 881 // of the operands is a literal smi. With this optimization, the
864 // performance of the system is increased by ~15%, and the generated 882 // performance of the system is increased by ~15%, and the generated
865 // code size is increased by ~1% (measured on a combination of 883 // code size is increased by ~1% (measured on a combination of
866 // different benchmarks). 884 // different benchmarks).
867 885
868 // sp[0] : operand 886 // sp[0] : operand
869 887
870 int int_value = Smi::cast(*value)->value(); 888 int int_value = Smi::cast(*value)->value();
871 889
872 JumpTarget exit(this); 890 JumpTarget exit(this);
873 frame_->EmitPop(r0); 891 frame_->EmitPop(r0);
874 892
875 switch (op) { 893 switch (op) {
876 case Token::ADD: { 894 case Token::ADD: {
877 DeferredCode* deferred = 895 DeferredCode* deferred =
878 new DeferredInlineSmiOperation(this, op, int_value, reversed); 896 new DeferredInlineSmiOperation(this, op, int_value, reversed, mode);
879 897
880 __ add(r0, r0, Operand(value), SetCC); 898 __ add(r0, r0, Operand(value), SetCC);
881 deferred->enter()->Branch(vs); 899 deferred->enter()->Branch(vs);
882 __ tst(r0, Operand(kSmiTagMask)); 900 __ tst(r0, Operand(kSmiTagMask));
883 deferred->enter()->Branch(ne); 901 deferred->enter()->Branch(ne);
884 deferred->BindExit(); 902 deferred->BindExit();
885 break; 903 break;
886 } 904 }
887 905
888 case Token::SUB: { 906 case Token::SUB: {
889 DeferredCode* deferred = 907 DeferredCode* deferred =
890 new DeferredInlineSmiOperation(this, op, int_value, reversed); 908 new DeferredInlineSmiOperation(this, op, int_value, reversed, mode);
891 909
892 if (!reversed) { 910 if (!reversed) {
893 __ sub(r0, r0, Operand(value), SetCC); 911 __ sub(r0, r0, Operand(value), SetCC);
894 } else { 912 } else {
895 __ rsb(r0, r0, Operand(value), SetCC); 913 __ rsb(r0, r0, Operand(value), SetCC);
896 } 914 }
897 deferred->enter()->Branch(vs); 915 deferred->enter()->Branch(vs);
898 __ tst(r0, Operand(kSmiTagMask)); 916 __ tst(r0, Operand(kSmiTagMask));
899 deferred->enter()->Branch(ne); 917 deferred->enter()->Branch(ne);
900 deferred->BindExit(); 918 deferred->BindExit();
901 break; 919 break;
902 } 920 }
903 921
904 case Token::BIT_OR: 922 case Token::BIT_OR:
905 case Token::BIT_XOR: 923 case Token::BIT_XOR:
906 case Token::BIT_AND: { 924 case Token::BIT_AND: {
907 DeferredCode* deferred = 925 DeferredCode* deferred =
908 new DeferredInlineSmiOperation(this, op, int_value, reversed); 926 new DeferredInlineSmiOperation(this, op, int_value, reversed, mode);
909 __ tst(r0, Operand(kSmiTagMask)); 927 __ tst(r0, Operand(kSmiTagMask));
910 deferred->enter()->Branch(ne); 928 deferred->enter()->Branch(ne);
911 switch (op) { 929 switch (op) {
912 case Token::BIT_OR: __ orr(r0, r0, Operand(value)); break; 930 case Token::BIT_OR: __ orr(r0, r0, Operand(value)); break;
913 case Token::BIT_XOR: __ eor(r0, r0, Operand(value)); break; 931 case Token::BIT_XOR: __ eor(r0, r0, Operand(value)); break;
914 case Token::BIT_AND: __ and_(r0, r0, Operand(value)); break; 932 case Token::BIT_AND: __ and_(r0, r0, Operand(value)); break;
915 default: UNREACHABLE(); 933 default: UNREACHABLE();
916 } 934 }
917 deferred->BindExit(); 935 deferred->BindExit();
918 break; 936 break;
919 } 937 }
920 938
921 case Token::SHL: 939 case Token::SHL:
922 case Token::SHR: 940 case Token::SHR:
923 case Token::SAR: { 941 case Token::SAR: {
924 if (reversed) { 942 if (reversed) {
925 __ mov(ip, Operand(value)); 943 __ mov(ip, Operand(value));
926 frame_->EmitPush(ip); 944 frame_->EmitPush(ip);
927 frame_->EmitPush(r0); 945 frame_->EmitPush(r0);
928 GenericBinaryOperation(op); 946 GenericBinaryOperation(op, mode);
929 947
930 } else { 948 } else {
931 int shift_value = int_value & 0x1f; // least significant 5 bits 949 int shift_value = int_value & 0x1f; // least significant 5 bits
932 DeferredCode* deferred = 950 DeferredCode* deferred =
933 new DeferredInlineSmiOperation(this, op, shift_value, false); 951 new DeferredInlineSmiOperation(this, op, shift_value, false, mode);
934 __ tst(r0, Operand(kSmiTagMask)); 952 __ tst(r0, Operand(kSmiTagMask));
935 deferred->enter()->Branch(ne); 953 deferred->enter()->Branch(ne);
936 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags 954 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags
937 switch (op) { 955 switch (op) {
938 case Token::SHL: { 956 case Token::SHL: {
939 __ mov(r2, Operand(r2, LSL, shift_value)); 957 __ mov(r2, Operand(r2, LSL, shift_value));
940 // check that the *unsigned* result fits in a smi 958 // check that the *unsigned* result fits in a smi
941 __ add(r3, r2, Operand(0x40000000), SetCC); 959 __ add(r3, r2, Operand(0x40000000), SetCC);
942 deferred->enter()->Branch(mi); 960 deferred->enter()->Branch(mi);
943 break; 961 break;
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
975 default: 993 default:
976 if (!reversed) { 994 if (!reversed) {
977 frame_->EmitPush(r0); 995 frame_->EmitPush(r0);
978 __ mov(r0, Operand(value)); 996 __ mov(r0, Operand(value));
979 frame_->EmitPush(r0); 997 frame_->EmitPush(r0);
980 } else { 998 } else {
981 __ mov(ip, Operand(value)); 999 __ mov(ip, Operand(value));
982 frame_->EmitPush(ip); 1000 frame_->EmitPush(ip);
983 frame_->EmitPush(r0); 1001 frame_->EmitPush(r0);
984 } 1002 }
985 GenericBinaryOperation(op); 1003 GenericBinaryOperation(op, mode);
986 break; 1004 break;
987 } 1005 }
988 1006
989 exit.Bind(); 1007 exit.Bind();
990 } 1008 }
991 1009
992 1010
993 void CodeGenerator::Comparison(Condition cc, bool strict) { 1011 void CodeGenerator::Comparison(Condition cc, bool strict) {
994 VirtualFrame::SpilledScope spilled_scope(this); 1012 VirtualFrame::SpilledScope spilled_scope(this);
995 // sp[0] : y 1013 // sp[0] : y
(...skipping 484 matching lines...) Expand 10 before | Expand all | Expand 10 after
1480 // "default" for failure to hit the jump table. 1498 // "default" for failure to hit the jump table.
1481 JumpTarget* default_target = 1499 JumpTarget* default_target =
1482 (default_label == NULL) ? node->break_target() : &setup_default; 1500 (default_label == NULL) ? node->break_target() : &setup_default;
1483 1501
1484 ASSERT(kSmiTag == 0 && kSmiTagSize <= 2); 1502 ASSERT(kSmiTag == 0 && kSmiTagSize <= 2);
1485 frame_->EmitPop(r0); 1503 frame_->EmitPop(r0);
1486 1504
1487 // Test for a Smi value in a HeapNumber. 1505 // Test for a Smi value in a HeapNumber.
1488 __ tst(r0, Operand(kSmiTagMask)); 1506 __ tst(r0, Operand(kSmiTagMask));
1489 is_smi.Branch(eq); 1507 is_smi.Branch(eq);
1490 __ ldr(r1, MemOperand(r0, HeapObject::kMapOffset - kHeapObjectTag)); 1508 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
1491 __ ldrb(r1, MemOperand(r1, Map::kInstanceTypeOffset - kHeapObjectTag)); 1509 __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
1492 __ cmp(r1, Operand(HEAP_NUMBER_TYPE)); 1510 __ cmp(r1, Operand(HEAP_NUMBER_TYPE));
1493 default_target->Branch(ne); 1511 default_target->Branch(ne);
1494 frame_->EmitPush(r0); 1512 frame_->EmitPush(r0);
1495 frame_->CallRuntime(Runtime::kNumberToSmi, 1); 1513 frame_->CallRuntime(Runtime::kNumberToSmi, 1);
1496 is_smi.Bind(); 1514 is_smi.Bind();
1497 1515
1498 if (min_index != 0) { 1516 if (min_index != 0) {
1499 // Small positive numbers can be immediate operands. 1517 // Small positive numbers can be immediate operands.
1500 if (min_index < 0) { 1518 if (min_index < 0) {
1501 // If min_index is Smi::kMinValue, -min_index is not a Smi. 1519 // If min_index is Smi::kMinValue, -min_index is not a Smi.
(...skipping 1009 matching lines...) Expand 10 before | Expand all | Expand 10 after
2511 context = tmp; 2529 context = tmp;
2512 } 2530 }
2513 // If no outer scope calls eval, we do not need to check more 2531 // If no outer scope calls eval, we do not need to check more
2514 // context extensions. 2532 // context extensions.
2515 if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break; 2533 if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
2516 s = s->outer_scope(); 2534 s = s->outer_scope();
2517 } 2535 }
2518 2536
2519 if (s->is_eval_scope()) { 2537 if (s->is_eval_scope()) {
2520 Label next, fast; 2538 Label next, fast;
2521 if (!context.is(tmp)) __ mov(tmp, Operand(context)); 2539 if (!context.is(tmp)) {
2540 __ mov(tmp, Operand(context));
2541 }
2522 __ bind(&next); 2542 __ bind(&next);
2523 // Terminate at global context. 2543 // Terminate at global context.
2524 __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset)); 2544 __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
2525 __ cmp(tmp2, Operand(Factory::global_context_map())); 2545 __ cmp(tmp2, Operand(Factory::global_context_map()));
2526 __ b(eq, &fast); 2546 __ b(eq, &fast);
2527 // Check that extension is NULL. 2547 // Check that extension is NULL.
2528 __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX)); 2548 __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
2529 __ tst(tmp2, tmp2); 2549 __ tst(tmp2, tmp2);
2530 slow->Branch(ne); 2550 slow->Branch(ne);
2531 // Load next context in chain. 2551 // Load next context in chain.
(...skipping 390 matching lines...) Expand 10 before | Expand all | Expand 10 after
2922 ASSERT(frame_->height() == original_height + 1); 2942 ASSERT(frame_->height() == original_height + 1);
2923 return; 2943 return;
2924 } 2944 }
2925 2945
2926 if (node->op() == Token::ASSIGN || 2946 if (node->op() == Token::ASSIGN ||
2927 node->op() == Token::INIT_VAR || 2947 node->op() == Token::INIT_VAR ||
2928 node->op() == Token::INIT_CONST) { 2948 node->op() == Token::INIT_CONST) {
2929 LoadAndSpill(node->value()); 2949 LoadAndSpill(node->value());
2930 2950
2931 } else { 2951 } else {
2952 // +=, *= and similar binary assignments.
2953 // Get the old value of the lhs.
2932 target.GetValueAndSpill(NOT_INSIDE_TYPEOF); 2954 target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
2933 Literal* literal = node->value()->AsLiteral(); 2955 Literal* literal = node->value()->AsLiteral();
2956 bool overwrite =
2957 (node->value()->AsBinaryOperation() != NULL &&
2958 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
2934 if (literal != NULL && literal->handle()->IsSmi()) { 2959 if (literal != NULL && literal->handle()->IsSmi()) {
2935 SmiOperation(node->binary_op(), literal->handle(), false); 2960 SmiOperation(node->binary_op(),
2961 literal->handle(),
2962 false,
2963 overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
2936 frame_->EmitPush(r0); 2964 frame_->EmitPush(r0);
2937 2965
2938 } else { 2966 } else {
2939 LoadAndSpill(node->value()); 2967 LoadAndSpill(node->value());
2940 GenericBinaryOperation(node->binary_op()); 2968 GenericBinaryOperation(node->binary_op(),
2969 overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
2941 frame_->EmitPush(r0); 2970 frame_->EmitPush(r0);
2942 } 2971 }
2943 } 2972 }
2944 2973
2945 Variable* var = node->target()->AsVariableProxy()->AsVariable(); 2974 Variable* var = node->target()->AsVariableProxy()->AsVariable();
2946 if (var != NULL && 2975 if (var != NULL &&
2947 (var->mode() == Variable::CONST) && 2976 (var->mode() == Variable::CONST) &&
2948 node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) { 2977 node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
2949 // Assignment ignored - leave the value on the stack. 2978 // Assignment ignored - leave the value on the stack.
2950 2979
(...skipping 859 matching lines...) Expand 10 before | Expand all | Expand 10 after
3810 3839
3811 // Exit (always with a materialized value). 3840 // Exit (always with a materialized value).
3812 exit.Bind(); 3841 exit.Bind();
3813 } 3842 }
3814 3843
3815 } else { 3844 } else {
3816 // Optimize for the case where (at least) one of the expressions 3845 // Optimize for the case where (at least) one of the expressions
3817 // is a literal small integer. 3846 // is a literal small integer.
3818 Literal* lliteral = node->left()->AsLiteral(); 3847 Literal* lliteral = node->left()->AsLiteral();
3819 Literal* rliteral = node->right()->AsLiteral(); 3848 Literal* rliteral = node->right()->AsLiteral();
3849 // NOTE: The code below assumes that the slow cases (calls to runtime)
3850 // never return a constant/immutable object.
3851 bool overwrite_left =
3852 (node->left()->AsBinaryOperation() != NULL &&
3853 node->left()->AsBinaryOperation()->ResultOverwriteAllowed());
3854 bool overwrite_right =
3855 (node->right()->AsBinaryOperation() != NULL &&
3856 node->right()->AsBinaryOperation()->ResultOverwriteAllowed());
3820 3857
3821 if (rliteral != NULL && rliteral->handle()->IsSmi()) { 3858 if (rliteral != NULL && rliteral->handle()->IsSmi()) {
3822 LoadAndSpill(node->left()); 3859 LoadAndSpill(node->left());
3823 SmiOperation(node->op(), rliteral->handle(), false); 3860 SmiOperation(node->op(),
3861 rliteral->handle(),
3862 false,
3863 overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
3824 3864
3825 } else if (lliteral != NULL && lliteral->handle()->IsSmi()) { 3865 } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
3826 LoadAndSpill(node->right()); 3866 LoadAndSpill(node->right());
3827 SmiOperation(node->op(), lliteral->handle(), true); 3867 SmiOperation(node->op(),
3868 lliteral->handle(),
3869 true,
3870 overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
3828 3871
3829 } else { 3872 } else {
3873 OverwriteMode overwrite_mode = NO_OVERWRITE;
3874 if (overwrite_left) {
3875 overwrite_mode = OVERWRITE_LEFT;
3876 } else if (overwrite_right) {
3877 overwrite_mode = OVERWRITE_RIGHT;
3878 }
3830 LoadAndSpill(node->left()); 3879 LoadAndSpill(node->left());
3831 LoadAndSpill(node->right()); 3880 LoadAndSpill(node->right());
3832 GenericBinaryOperation(node->op()); 3881 GenericBinaryOperation(node->op(), overwrite_mode);
3833 } 3882 }
3834 frame_->EmitPush(r0); 3883 frame_->EmitPush(r0);
3835 } 3884 }
3836 ASSERT((has_cc() && frame_->height() == original_height) || 3885 ASSERT((has_cc() && frame_->height() == original_height) ||
3837 (!has_cc() && frame_->height() == original_height + 1)); 3886 (!has_cc() && frame_->height() == original_height + 1));
3838 } 3887 }
3839 3888
3840 3889
3841 void CodeGenerator::VisitThisFunction(ThisFunction* node) { 3890 void CodeGenerator::VisitThisFunction(ThisFunction* node) {
3842 #ifdef DEBUG 3891 #ifdef DEBUG
(...skipping 212 matching lines...) Expand 10 before | Expand all | Expand 10 after
4055 (!has_cc() && frame_->height() == original_height + 1)); 4104 (!has_cc() && frame_->height() == original_height + 1));
4056 } 4105 }
4057 4106
4058 4107
4059 #ifdef DEBUG 4108 #ifdef DEBUG
4060 bool CodeGenerator::HasValidEntryRegisters() { return true; } 4109 bool CodeGenerator::HasValidEntryRegisters() { return true; }
4061 #endif 4110 #endif
4062 4111
4063 4112
4064 #undef __ 4113 #undef __
4065 #define __ masm-> 4114 #define __ DEFINE_MASM(masm)
4115
4066 4116
4067 Handle<String> Reference::GetName() { 4117 Handle<String> Reference::GetName() {
4068 ASSERT(type_ == NAMED); 4118 ASSERT(type_ == NAMED);
4069 Property* property = expression_->AsProperty(); 4119 Property* property = expression_->AsProperty();
4070 if (property == NULL) { 4120 if (property == NULL) {
4071 // Global variable reference treated as a named property reference. 4121 // Global variable reference treated as a named property reference.
4072 VariableProxy* proxy = expression_->AsVariableProxy(); 4122 VariableProxy* proxy = expression_->AsVariableProxy();
4073 ASSERT(proxy->AsVariable() != NULL); 4123 ASSERT(proxy->AsVariable() != NULL);
4074 ASSERT(proxy->AsVariable()->is_global()); 4124 ASSERT(proxy->AsVariable()->is_global());
4075 return proxy->name(); 4125 return proxy->name();
(...skipping 381 matching lines...) Expand 10 before | Expand all | Expand 10 after
4457 __ tst(r0, Operand(kSmiTagMask)); 4507 __ tst(r0, Operand(kSmiTagMask));
4458 __ b(eq, &exit); 4508 __ b(eq, &exit);
4459 // Update write barrier for the elements array address. 4509 // Update write barrier for the elements array address.
4460 __ sub(r1, r2, Operand(r3)); 4510 __ sub(r1, r2, Operand(r3));
4461 __ RecordWrite(r3, r1, r2); 4511 __ RecordWrite(r3, r1, r2);
4462 __ bind(&exit); 4512 __ bind(&exit);
4463 __ StubReturn(1); 4513 __ StubReturn(1);
4464 } 4514 }
4465 4515
4466 4516
4517 static void HandleBinaryOpSlowCases(MacroAssembler* masm,
4518 Label* not_smi,
4519 const Builtins::JavaScript& builtin,
4520 Token::Value operation,
4521 int swi_number,
4522 OverwriteMode mode) {
4523 Label slow;
4524 if (mode == NO_OVERWRITE) {
4525 __ bind(not_smi);
4526 }
4527 __ bind(&slow);
4528 __ push(r1);
4529 __ push(r0);
4530 __ mov(r0, Operand(1)); // Set number of arguments.
4531 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call.
4532
4533 // Could it be a double-double op? If we already have a place to put
4534 // the answer then we can do the op and skip the builtin and runtime call.
4535 if (mode != NO_OVERWRITE) {
4536 __ bind(not_smi);
4537 __ tst(r0, Operand(kSmiTagMask));
4538 __ b(eq, &slow); // We can't handle a Smi-double combination yet.
4539 __ tst(r1, Operand(kSmiTagMask));
4540 __ b(eq, &slow); // We can't handle a Smi-double combination yet.
4541 // Get map of r0 into r2.
4542 __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
4543 // Get type of r0 into r3.
4544 __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset));
4545 __ cmp(r3, Operand(HEAP_NUMBER_TYPE));
4546 __ b(ne, &slow);
4547 // Get type of r1 into r3.
4548 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
4549 // Check they are both the same map (heap number map).
4550 __ cmp(r2, r3);
4551 __ b(ne, &slow);
4552 // Both are doubles.
4553 // Calling convention says that second double is in r2 and r3.
4554 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
4555 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
4556 __ push(lr);
4557 if (mode == OVERWRITE_LEFT) {
4558 __ push(r1);
4559 } else {
4560 __ push(r0);
4561 }
4562 // Calling convention says that first double is in r0 and r1.
4563 __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
4564 __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
4565 // Call C routine that may not cause GC or other trouble.
4566 __ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
4567 #if !defined(__arm__)
4568 // Notify the simulator that we are calling an add routine in C.
4569 __ swi(swi_number);
4570 #else
4571 // Actually call the add routine written in C.
4572 __ blx(r5);
4573 #endif
4574 // Store answer in the overwritable heap number.
4575 __ pop(r4);
4576 __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
4577 __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + kPointerSize));
4578 __ mov(r0, Operand(r4));
4579 // And we are done.
4580 __ pop(pc);
4581 }
4582 }
4583
4584
4467 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { 4585 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
4468 // r1 : x 4586 // r1 : x
4469 // r0 : y 4587 // r0 : y
4470 // result : r0 4588 // result : r0
4471 4589
4590 // All ops need to know whether we are dealing with two Smis. Set up r2 to
4591 // tell us that.
4592 __ orr(r2, r1, Operand(r0)); // r2 = x | y;
4593
4472 switch (op_) { 4594 switch (op_) {
4473 case Token::ADD: { 4595 case Token::ADD: {
4474 Label slow, exit; 4596 Label not_smi;
4475 // fast path 4597 // Fast path.
4476 __ orr(r2, r1, Operand(r0)); // r2 = x | y; 4598 ASSERT(kSmiTag == 0); // Adjust code below.
4477 __ add(r0, r1, Operand(r0), SetCC); // add y optimistically
4478 // go slow-path in case of overflow
4479 __ b(vs, &slow);
4480 // go slow-path in case of non-smi operands
4481 ASSERT(kSmiTag == 0); // adjust code below
4482 __ tst(r2, Operand(kSmiTagMask)); 4599 __ tst(r2, Operand(kSmiTagMask));
4483 __ b(eq, &exit); 4600 __ b(ne, &not_smi);
4484 // slow path 4601 __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
4485 __ bind(&slow); 4602 // Return if no overflow.
4486 __ sub(r0, r0, Operand(r1)); // revert optimistic add 4603 __ Ret(vc);
4487 __ push(r1); 4604 __ sub(r0, r0, Operand(r1)); // Revert optimistic add.
4488 __ push(r0); 4605
4489 __ mov(r0, Operand(1)); // set number of arguments 4606 HandleBinaryOpSlowCases(masm,
4490 __ InvokeBuiltin(Builtins::ADD, JUMP_JS); 4607 &not_smi,
4491 // done 4608 Builtins::ADD,
4492 __ bind(&exit); 4609 Token::ADD,
4610 assembler::arm::simulator_fp_add,
4611 mode_);
4493 break; 4612 break;
4494 } 4613 }
4495 4614
4496 case Token::SUB: { 4615 case Token::SUB: {
4497 Label slow, exit; 4616 Label not_smi;
4498 // fast path 4617 // Fast path.
4499 __ orr(r2, r1, Operand(r0)); // r2 = x | y; 4618 ASSERT(kSmiTag == 0); // Adjust code below.
4500 __ sub(r3, r1, Operand(r0), SetCC); // subtract y optimistically
4501 // go slow-path in case of overflow
4502 __ b(vs, &slow);
4503 // go slow-path in case of non-smi operands
4504 ASSERT(kSmiTag == 0); // adjust code below
4505 __ tst(r2, Operand(kSmiTagMask)); 4619 __ tst(r2, Operand(kSmiTagMask));
4506 __ mov(r0, Operand(r3), LeaveCC, eq); // conditionally set r0 to result 4620 __ b(ne, &not_smi);
4507 __ b(eq, &exit); 4621 __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically.
4508 // slow path 4622 // Return if no overflow.
4509 __ bind(&slow); 4623 __ Ret(vc);
4510 __ push(r1); 4624 __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract.
4511 __ push(r0); 4625
4512 __ mov(r0, Operand(1)); // set number of arguments 4626 HandleBinaryOpSlowCases(masm,
4513 __ InvokeBuiltin(Builtins::SUB, JUMP_JS); 4627 &not_smi,
4514 // done 4628 Builtins::SUB,
4515 __ bind(&exit); 4629 Token::SUB,
4630 assembler::arm::simulator_fp_sub,
4631 mode_);
4516 break; 4632 break;
4517 } 4633 }
4518 4634
4519 case Token::MUL: { 4635 case Token::MUL: {
4520 Label slow, exit; 4636 Label not_smi, slow;
4521 // tag check
4522 __ orr(r2, r1, Operand(r0)); // r2 = x | y;
4523 ASSERT(kSmiTag == 0); // adjust code below 4637 ASSERT(kSmiTag == 0); // adjust code below
4524 __ tst(r2, Operand(kSmiTagMask)); 4638 __ tst(r2, Operand(kSmiTagMask));
4525 __ b(ne, &slow); 4639 __ b(ne, &not_smi);
4526 // remove tag from one operand (but keep sign), so that result is smi 4640 // Remove tag from one operand (but keep sign), so that result is Smi.
4527 __ mov(ip, Operand(r0, ASR, kSmiTagSize)); 4641 __ mov(ip, Operand(r0, ASR, kSmiTagSize));
4528 // do multiplication 4642 // Do multiplication
4529 __ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1 4643 __ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1.
4530 // go slow on overflows (overflow bit is not set) 4644 // Go slow on overflows (overflow bit is not set).
4531 __ mov(ip, Operand(r3, ASR, 31)); 4645 __ mov(ip, Operand(r3, ASR, 31));
4532 __ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical 4646 __ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical
4533 __ b(ne, &slow); 4647 __ b(ne, &slow);
4534 // go slow on zero result to handle -0 4648 // Go slow on zero result to handle -0.
4535 __ tst(r3, Operand(r3)); 4649 __ tst(r3, Operand(r3));
4536 __ mov(r0, Operand(r3), LeaveCC, ne); 4650 __ mov(r0, Operand(r3), LeaveCC, ne);
4537 __ b(ne, &exit); 4651 __ Ret(ne);
4538 // slow case 4652 // Slow case.
4539 __ bind(&slow); 4653 __ bind(&slow);
4540 __ push(r1); 4654
4541 __ push(r0); 4655 HandleBinaryOpSlowCases(masm,
4542 __ mov(r0, Operand(1)); // set number of arguments 4656 &not_smi,
4543 __ InvokeBuiltin(Builtins::MUL, JUMP_JS); 4657 Builtins::MUL,
4544 // done 4658 Token::MUL,
4545 __ bind(&exit); 4659 assembler::arm::simulator_fp_mul,
4660 mode_);
4546 break; 4661 break;
4547 } 4662 }
4548 4663
4549 case Token::BIT_OR: 4664 case Token::BIT_OR:
4550 case Token::BIT_AND: 4665 case Token::BIT_AND:
4551 case Token::BIT_XOR: { 4666 case Token::BIT_XOR: {
4552 Label slow, exit; 4667 Label slow;
4553 // tag check
4554 __ orr(r2, r1, Operand(r0)); // r2 = x | y;
4555 ASSERT(kSmiTag == 0); // adjust code below 4668 ASSERT(kSmiTag == 0); // adjust code below
4556 __ tst(r2, Operand(kSmiTagMask)); 4669 __ tst(r2, Operand(kSmiTagMask));
4557 __ b(ne, &slow); 4670 __ b(ne, &slow);
4558 switch (op_) { 4671 switch (op_) {
4559 case Token::BIT_OR: __ orr(r0, r0, Operand(r1)); break; 4672 case Token::BIT_OR: __ orr(r0, r0, Operand(r1)); break;
4560 case Token::BIT_AND: __ and_(r0, r0, Operand(r1)); break; 4673 case Token::BIT_AND: __ and_(r0, r0, Operand(r1)); break;
4561 case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break; 4674 case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break;
4562 default: UNREACHABLE(); 4675 default: UNREACHABLE();
4563 } 4676 }
4564 __ b(&exit); 4677 __ Ret();
4565 __ bind(&slow); 4678 __ bind(&slow);
4566 __ push(r1); // restore stack 4679 __ push(r1); // restore stack
4567 __ push(r0); 4680 __ push(r0);
4568 __ mov(r0, Operand(1)); // 1 argument (not counting receiver). 4681 __ mov(r0, Operand(1)); // 1 argument (not counting receiver).
4569 switch (op_) { 4682 switch (op_) {
4570 case Token::BIT_OR: 4683 case Token::BIT_OR:
4571 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); 4684 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
4572 break; 4685 break;
4573 case Token::BIT_AND: 4686 case Token::BIT_AND:
4574 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS); 4687 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
4575 break; 4688 break;
4576 case Token::BIT_XOR: 4689 case Token::BIT_XOR:
4577 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS); 4690 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
4578 break; 4691 break;
4579 default: 4692 default:
4580 UNREACHABLE(); 4693 UNREACHABLE();
4581 } 4694 }
4582 __ bind(&exit);
4583 break; 4695 break;
4584 } 4696 }
4585 4697
4586 case Token::SHL: 4698 case Token::SHL:
4587 case Token::SHR: 4699 case Token::SHR:
4588 case Token::SAR: { 4700 case Token::SAR: {
4589 Label slow, exit; 4701 Label slow;
4590 // tag check
4591 __ orr(r2, r1, Operand(r0)); // r2 = x | y;
4592 ASSERT(kSmiTag == 0); // adjust code below 4702 ASSERT(kSmiTag == 0); // adjust code below
4593 __ tst(r2, Operand(kSmiTagMask)); 4703 __ tst(r2, Operand(kSmiTagMask));
4594 __ b(ne, &slow); 4704 __ b(ne, &slow);
4595 // remove tags from operands (but keep sign) 4705 // remove tags from operands (but keep sign)
4596 __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x 4706 __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x
4597 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y 4707 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y
4598 // use only the 5 least significant bits of the shift count 4708 // use only the 5 least significant bits of the shift count
4599 __ and_(r2, r2, Operand(0x1f)); 4709 __ and_(r2, r2, Operand(0x1f));
4600 // perform operation 4710 // perform operation
4601 switch (op_) { 4711 switch (op_) {
(...skipping 19 matching lines...) Expand all
4621 // check that the *signed* result fits in a smi 4731 // check that the *signed* result fits in a smi
4622 __ add(r2, r3, Operand(0x40000000), SetCC); 4732 __ add(r2, r3, Operand(0x40000000), SetCC);
4623 __ b(mi, &slow); 4733 __ b(mi, &slow);
4624 break; 4734 break;
4625 4735
4626 default: UNREACHABLE(); 4736 default: UNREACHABLE();
4627 } 4737 }
4628 // tag result and store it in r0 4738 // tag result and store it in r0
4629 ASSERT(kSmiTag == 0); // adjust code below 4739 ASSERT(kSmiTag == 0); // adjust code below
4630 __ mov(r0, Operand(r3, LSL, kSmiTagSize)); 4740 __ mov(r0, Operand(r3, LSL, kSmiTagSize));
4631 __ b(&exit); 4741 __ Ret();
4632 // slow case 4742 // slow case
4633 __ bind(&slow); 4743 __ bind(&slow);
4634 __ push(r1); // restore stack 4744 __ push(r1); // restore stack
4635 __ push(r0); 4745 __ push(r0);
4636 __ mov(r0, Operand(1)); // 1 argument (not counting receiver). 4746 __ mov(r0, Operand(1)); // 1 argument (not counting receiver).
4637 switch (op_) { 4747 switch (op_) {
4638 case Token::SAR: __ InvokeBuiltin(Builtins::SAR, JUMP_JS); break; 4748 case Token::SAR: __ InvokeBuiltin(Builtins::SAR, JUMP_JS); break;
4639 case Token::SHR: __ InvokeBuiltin(Builtins::SHR, JUMP_JS); break; 4749 case Token::SHR: __ InvokeBuiltin(Builtins::SHR, JUMP_JS); break;
4640 case Token::SHL: __ InvokeBuiltin(Builtins::SHL, JUMP_JS); break; 4750 case Token::SHL: __ InvokeBuiltin(Builtins::SHL, JUMP_JS); break;
4641 default: UNREACHABLE(); 4751 default: UNREACHABLE();
4642 } 4752 }
4643 __ bind(&exit);
4644 break; 4753 break;
4645 } 4754 }
4646 4755
4647 default: UNREACHABLE(); 4756 default: UNREACHABLE();
4648 } 4757 }
4649 __ Ret(); 4758 // This code should be unreachable.
4759 __ stop("Unreachable");
4650 } 4760 }
4651 4761
4652 4762
4653 void StackCheckStub::Generate(MacroAssembler* masm) { 4763 void StackCheckStub::Generate(MacroAssembler* masm) {
4654 Label within_limit; 4764 Label within_limit;
4655 __ mov(ip, Operand(ExternalReference::address_of_stack_guard_limit())); 4765 __ mov(ip, Operand(ExternalReference::address_of_stack_guard_limit()));
4656 __ ldr(ip, MemOperand(ip)); 4766 __ ldr(ip, MemOperand(ip));
4657 __ cmp(sp, Operand(ip)); 4767 __ cmp(sp, Operand(ip));
4658 __ b(hs, &within_limit); 4768 __ b(hs, &within_limit);
4659 // Do tail-call to runtime routine. 4769 // Do tail-call to runtime routine.
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
4709 __ str(r2, MemOperand(r3)); 4819 __ str(r2, MemOperand(r3));
4710 // restore parameter- and frame-pointer and pop state. 4820 // restore parameter- and frame-pointer and pop state.
4711 __ ldm(ia_w, sp, r3.bit() | pp.bit() | fp.bit()); 4821 __ ldm(ia_w, sp, r3.bit() | pp.bit() | fp.bit());
4712 // Before returning we restore the context from the frame pointer if not NULL. 4822 // Before returning we restore the context from the frame pointer if not NULL.
4713 // The frame pointer is NULL in the exception handler of a JS entry frame. 4823 // The frame pointer is NULL in the exception handler of a JS entry frame.
4714 __ cmp(fp, Operand(0)); 4824 __ cmp(fp, Operand(0));
4715 // Set cp to NULL if fp is NULL. 4825 // Set cp to NULL if fp is NULL.
4716 __ mov(cp, Operand(0), LeaveCC, eq); 4826 __ mov(cp, Operand(0), LeaveCC, eq);
4717 // Restore cp otherwise. 4827 // Restore cp otherwise.
4718 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); 4828 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
4719 if (kDebug && FLAG_debug_code) __ mov(lr, Operand(pc)); 4829 if (kDebug && FLAG_debug_code) {
4830 __ mov(lr, Operand(pc));
4831 }
4720 __ pop(pc); 4832 __ pop(pc);
4721 } 4833 }
4722 4834
4723 4835
4724 void CEntryStub::GenerateThrowOutOfMemory(MacroAssembler* masm) { 4836 void CEntryStub::GenerateThrowOutOfMemory(MacroAssembler* masm) {
4725 // Fetch top stack handler. 4837 // Fetch top stack handler.
4726 __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); 4838 __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
4727 __ ldr(r3, MemOperand(r3)); 4839 __ ldr(r3, MemOperand(r3));
4728 4840
4729 // Unwind the handlers until the ENTRY handler is found. 4841 // Unwind the handlers until the ENTRY handler is found.
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
4772 // Discard ENTRY state (r2 is not used), and restore parameter- 4884 // Discard ENTRY state (r2 is not used), and restore parameter-
4773 // and frame-pointer and pop state. 4885 // and frame-pointer and pop state.
4774 __ ldm(ia_w, sp, r2.bit() | r3.bit() | pp.bit() | fp.bit()); 4886 __ ldm(ia_w, sp, r2.bit() | r3.bit() | pp.bit() | fp.bit());
4775 // Before returning we restore the context from the frame pointer if not NULL. 4887 // Before returning we restore the context from the frame pointer if not NULL.
4776 // The frame pointer is NULL in the exception handler of a JS entry frame. 4888 // The frame pointer is NULL in the exception handler of a JS entry frame.
4777 __ cmp(fp, Operand(0)); 4889 __ cmp(fp, Operand(0));
4778 // Set cp to NULL if fp is NULL. 4890 // Set cp to NULL if fp is NULL.
4779 __ mov(cp, Operand(0), LeaveCC, eq); 4891 __ mov(cp, Operand(0), LeaveCC, eq);
4780 // Restore cp otherwise. 4892 // Restore cp otherwise.
4781 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); 4893 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
4782 if (kDebug && FLAG_debug_code) __ mov(lr, Operand(pc)); 4894 if (kDebug && FLAG_debug_code) {
4895 __ mov(lr, Operand(pc));
4896 }
4783 __ pop(pc); 4897 __ pop(pc);
4784 } 4898 }
4785 4899
4786 4900
4787 void CEntryStub::GenerateCore(MacroAssembler* masm, 4901 void CEntryStub::GenerateCore(MacroAssembler* masm,
4788 Label* throw_normal_exception, 4902 Label* throw_normal_exception,
4789 Label* throw_out_of_memory_exception, 4903 Label* throw_out_of_memory_exception,
4790 StackFrame::Type frame_type, 4904 StackFrame::Type frame_type,
4791 bool do_gc, 4905 bool do_gc,
4792 bool always_allocate) { 4906 bool always_allocate) {
(...skipping 238 matching lines...) Expand 10 before | Expand all | Expand 10 after
5031 // r4: argv 5145 // r4: argv
5032 if (is_construct) { 5146 if (is_construct) {
5033 ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline); 5147 ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
5034 __ mov(ip, Operand(construct_entry)); 5148 __ mov(ip, Operand(construct_entry));
5035 } else { 5149 } else {
5036 ExternalReference entry(Builtins::JSEntryTrampoline); 5150 ExternalReference entry(Builtins::JSEntryTrampoline);
5037 __ mov(ip, Operand(entry)); 5151 __ mov(ip, Operand(entry));
5038 } 5152 }
5039 __ ldr(ip, MemOperand(ip)); // deref address 5153 __ ldr(ip, MemOperand(ip)); // deref address
5040 5154
5041 // Branch and link to JSEntryTrampoline 5155 // Branch and link to JSEntryTrampoline. We don't use the double underscore
5156 // macro for the add instruction because we don't want the coverage tool
5157 // inserting instructions here after we read the pc.
5042 __ mov(lr, Operand(pc)); 5158 __ mov(lr, Operand(pc));
5043 __ add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); 5159 masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
5044 5160
5045 // Unlink this frame from the handler chain. When reading the 5161 // Unlink this frame from the handler chain. When reading the
5046 // address of the next handler, there is no need to use the address 5162 // address of the next handler, there is no need to use the address
5047 // displacement since the current stack pointer (sp) points directly 5163 // displacement since the current stack pointer (sp) points directly
5048 // to the stack handler. 5164 // to the stack handler.
5049 __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset)); 5165 __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
5050 __ mov(ip, Operand(ExternalReference(Top::k_handler_address))); 5166 __ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
5051 __ str(r3, MemOperand(ip)); 5167 __ str(r3, MemOperand(ip));
5052 // No need to restore registers 5168 // No need to restore registers
5053 __ add(sp, sp, Operand(StackHandlerConstants::kSize)); 5169 __ add(sp, sp, Operand(StackHandlerConstants::kSize));
5054 5170
5171
5055 __ bind(&exit); // r0 holds result 5172 __ bind(&exit); // r0 holds result
5056 // Restore the top frame descriptors from the stack. 5173 // Restore the top frame descriptors from the stack.
5057 __ pop(r3); 5174 __ pop(r3);
5058 __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address))); 5175 __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
5059 __ str(r3, MemOperand(ip)); 5176 __ str(r3, MemOperand(ip));
5060 5177
5061 // Reset the stack to the callee saved registers. 5178 // Reset the stack to the callee saved registers.
5062 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); 5179 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
5063 5180
5064 // Restore callee-saved registers and return. 5181 // Restore callee-saved registers and return.
5065 #ifdef DEBUG 5182 #ifdef DEBUG
5066 if (FLAG_debug_code) __ mov(lr, Operand(pc)); 5183 if (FLAG_debug_code) {
5184 __ mov(lr, Operand(pc));
5185 }
5067 #endif 5186 #endif
5068 __ ldm(ia_w, sp, kCalleeSaved | pc.bit()); 5187 __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
5069 } 5188 }
5070 5189
5071 5190
5072 void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) { 5191 void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
5073 // Check if the calling frame is an arguments adaptor frame. 5192 // Check if the calling frame is an arguments adaptor frame.
5074 Label adaptor; 5193 Label adaptor;
5075 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 5194 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5076 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); 5195 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
5189 __ mov(r2, Operand(0)); 5308 __ mov(r2, Operand(0));
5190 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); 5309 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
5191 __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)), 5310 __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)),
5192 RelocInfo::CODE_TARGET); 5311 RelocInfo::CODE_TARGET);
5193 } 5312 }
5194 5313
5195 5314
5196 #undef __ 5315 #undef __
5197 5316
5198 } } // namespace v8::internal 5317 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/codegen-arm.h ('k') | src/codegen-ia32.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698