Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(213)

Side by Side Diff: runtime/vm/flow_graph_compiler_arm.cc

Issue 1192103004: VM: New calling convention for generated code. (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: fixed comments Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/flow_graph_compiler.cc ('k') | runtime/vm/flow_graph_compiler_arm64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/flow_graph_compiler.h" 8 #include "vm/flow_graph_compiler.h"
9 9
10 #include "vm/ast_printer.h" 10 #include "vm/ast_printer.h"
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
93 93
94 // Emit all kMaterializeObject instructions describing objects to be 94 // Emit all kMaterializeObject instructions describing objects to be
95 // materialized on the deoptimization as a prefix to the deoptimization info. 95 // materialized on the deoptimization as a prefix to the deoptimization info.
96 EmitMaterializations(deopt_env_, builder); 96 EmitMaterializations(deopt_env_, builder);
97 97
98 // The real frame starts here. 98 // The real frame starts here.
99 builder->MarkFrameStart(); 99 builder->MarkFrameStart();
100 100
101 Zone* zone = compiler->zone(); 101 Zone* zone = compiler->zone();
102 102
103 // Current PP, FP, and PC.
104 builder->AddPp(current->function(), slot_ix++); 103 builder->AddPp(current->function(), slot_ix++);
104 builder->AddPcMarker(Function::Handle(zone), slot_ix++);
105 builder->AddCallerFp(slot_ix++); 105 builder->AddCallerFp(slot_ix++);
106 builder->AddReturnAddress(current->function(), deopt_id(), slot_ix++); 106 builder->AddReturnAddress(current->function(), deopt_id(), slot_ix++);
107 107
108 // Callee's PC marker is not used anymore. Pass Function::null() to set to 0.
109 builder->AddPcMarker(Function::Handle(zone), slot_ix++);
110
111 // Emit all values that are needed for materialization as a part of the 108 // Emit all values that are needed for materialization as a part of the
112 // expression stack for the bottom-most frame. This guarantees that GC 109 // expression stack for the bottom-most frame. This guarantees that GC
113 // will be able to find them during materialization. 110 // will be able to find them during materialization.
114 slot_ix = builder->EmitMaterializationArguments(slot_ix); 111 slot_ix = builder->EmitMaterializationArguments(slot_ix);
115 112
116 // For the innermost environment, set outgoing arguments and the locals. 113 // For the innermost environment, set outgoing arguments and the locals.
117 for (intptr_t i = current->Length() - 1; 114 for (intptr_t i = current->Length() - 1;
118 i >= current->fixed_parameter_count(); 115 i >= current->fixed_parameter_count();
119 i--) { 116 i--) {
120 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++); 117 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
121 } 118 }
122 119
123 Environment* previous = current; 120 Environment* previous = current;
124 current = current->outer(); 121 current = current->outer();
125 while (current != NULL) { 122 while (current != NULL) {
126 // PP, FP, and PC.
127 builder->AddPp(current->function(), slot_ix++); 123 builder->AddPp(current->function(), slot_ix++);
124 builder->AddPcMarker(previous->function(), slot_ix++);
128 builder->AddCallerFp(slot_ix++); 125 builder->AddCallerFp(slot_ix++);
129 126
130 // For any outer environment the deopt id is that of the call instruction 127 // For any outer environment the deopt id is that of the call instruction
131 // which is recorded in the outer environment. 128 // which is recorded in the outer environment.
132 builder->AddReturnAddress( 129 builder->AddReturnAddress(
133 current->function(), 130 current->function(),
134 Isolate::ToDeoptAfter(current->deopt_id()), 131 Isolate::ToDeoptAfter(current->deopt_id()),
135 slot_ix++); 132 slot_ix++);
136 133
137 // PC marker.
138 builder->AddPcMarker(previous->function(), slot_ix++);
139
140 // The values of outgoing arguments can be changed from the inlined call so 134 // The values of outgoing arguments can be changed from the inlined call so
141 // we must read them from the previous environment. 135 // we must read them from the previous environment.
142 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) { 136 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
143 builder->AddCopy(previous->ValueAt(i), 137 builder->AddCopy(previous->ValueAt(i),
144 previous->LocationAt(i), 138 previous->LocationAt(i),
145 slot_ix++); 139 slot_ix++);
146 } 140 }
147 141
148 // Set the locals, note that outgoing arguments are not in the environment. 142 // Set the locals, note that outgoing arguments are not in the environment.
149 for (intptr_t i = current->Length() - 1; 143 for (intptr_t i = current->Length() - 1;
150 i >= current->fixed_parameter_count(); 144 i >= current->fixed_parameter_count();
151 i--) { 145 i--) {
152 builder->AddCopy(current->ValueAt(i), 146 builder->AddCopy(current->ValueAt(i),
153 current->LocationAt(i), 147 current->LocationAt(i),
154 slot_ix++); 148 slot_ix++);
155 } 149 }
156 150
157 // Iterate on the outer environment. 151 // Iterate on the outer environment.
158 previous = current; 152 previous = current;
159 current = current->outer(); 153 current = current->outer();
160 } 154 }
161 // The previous pointer is now the outermost environment. 155 // The previous pointer is now the outermost environment.
162 ASSERT(previous != NULL); 156 ASSERT(previous != NULL);
163 157
164 // For the outermost environment, set caller PC, caller PP, and caller FP. 158 // Set slots for the outermost environment.
165 builder->AddCallerPp(slot_ix++); 159 builder->AddCallerPp(slot_ix++);
160 builder->AddPcMarker(previous->function(), slot_ix++);
166 builder->AddCallerFp(slot_ix++); 161 builder->AddCallerFp(slot_ix++);
167 builder->AddCallerPc(slot_ix++); 162 builder->AddCallerPc(slot_ix++);
168 163
169 // PC marker.
170 builder->AddPcMarker(previous->function(), slot_ix++);
171
172 // For the outermost environment, set the incoming arguments. 164 // For the outermost environment, set the incoming arguments.
173 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) { 165 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
174 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++); 166 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++);
175 } 167 }
176 168
177 return builder->CreateDeoptInfo(deopt_table); 169 return builder->CreateDeoptInfo(deopt_table);
178 } 170 }
179 171
180 172
181 void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler, 173 void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
182 intptr_t stub_ix) { 174 intptr_t stub_ix) {
183 // Calls do not need stubs, they share a deoptimization trampoline. 175 // Calls do not need stubs, they share a deoptimization trampoline.
184 ASSERT(reason() != ICData::kDeoptAtCall); 176 ASSERT(reason() != ICData::kDeoptAtCall);
185 Assembler* assem = compiler->assembler(); 177 Assembler* assem = compiler->assembler();
186 #define __ assem-> 178 #define __ assem->
187 __ Comment("%s", Name()); 179 __ Comment("%s", Name());
188 __ Bind(entry_label()); 180 __ Bind(entry_label());
189 if (FLAG_trap_on_deoptimization) { 181 if (FLAG_trap_on_deoptimization) {
190 __ bkpt(0); 182 __ bkpt(0);
191 } 183 }
192 184
193 ASSERT(deopt_env() != NULL); 185 ASSERT(deopt_env() != NULL);
194 186
195 // LR may be live. It will be clobbered by BranchLink, so cache it in IP. 187 // LR may be live. It will be clobbered by BranchLink, so cache it in IP.
196 // It will be restored at the top of the deoptimization stub, specifically in 188 // It will be restored at the top of the deoptimization stub, specifically in
197 // GenerateDeoptimizationSequence in stub_code_arm.cc. 189 // GenerateDeoptimizationSequence in stub_code_arm.cc.
190 __ Push(CODE_REG);
198 __ mov(IP, Operand(LR)); 191 __ mov(IP, Operand(LR));
199 __ BranchLink(*StubCode::Deoptimize_entry()); 192 __ BranchLink(*StubCode::Deoptimize_entry());
200 set_pc_offset(assem->CodeSize()); 193 set_pc_offset(assem->CodeSize());
201 #undef __ 194 #undef __
202 } 195 }
203 196
204 197
205 #define __ assembler()-> 198 #define __ assembler()->
206 199
207 200
(...skipping 544 matching lines...) Expand 10 before | Expand all | Expand 10 after
752 const int num_params = 745 const int num_params =
753 num_fixed_params + num_opt_pos_params + num_opt_named_params; 746 num_fixed_params + num_opt_pos_params + num_opt_named_params;
754 ASSERT(function.NumParameters() == num_params); 747 ASSERT(function.NumParameters() == num_params);
755 ASSERT(parsed_function().first_parameter_index() == kFirstLocalSlotFromFp); 748 ASSERT(parsed_function().first_parameter_index() == kFirstLocalSlotFromFp);
756 749
757 // Check that min_num_pos_args <= num_pos_args <= max_num_pos_args, 750 // Check that min_num_pos_args <= num_pos_args <= max_num_pos_args,
758 // where num_pos_args is the number of positional arguments passed in. 751 // where num_pos_args is the number of positional arguments passed in.
759 const int min_num_pos_args = num_fixed_params; 752 const int min_num_pos_args = num_fixed_params;
760 const int max_num_pos_args = num_fixed_params + num_opt_pos_params; 753 const int max_num_pos_args = num_fixed_params + num_opt_pos_params;
761 754
762 __ ldr(R9, FieldAddress(R4, ArgumentsDescriptor::positional_count_offset())); 755 __ ldr(R10, FieldAddress(R4, ArgumentsDescriptor::positional_count_offset()));
763 // Check that min_num_pos_args <= num_pos_args. 756 // Check that min_num_pos_args <= num_pos_args.
764 Label wrong_num_arguments; 757 Label wrong_num_arguments;
765 __ CompareImmediate(R9, Smi::RawValue(min_num_pos_args)); 758 __ CompareImmediate(R10, Smi::RawValue(min_num_pos_args));
766 __ b(&wrong_num_arguments, LT); 759 __ b(&wrong_num_arguments, LT);
767 // Check that num_pos_args <= max_num_pos_args. 760 // Check that num_pos_args <= max_num_pos_args.
768 __ CompareImmediate(R9, Smi::RawValue(max_num_pos_args)); 761 __ CompareImmediate(R10, Smi::RawValue(max_num_pos_args));
769 __ b(&wrong_num_arguments, GT); 762 __ b(&wrong_num_arguments, GT);
770 763
771 // Copy positional arguments. 764 // Copy positional arguments.
772 // Argument i passed at fp[kParamEndSlotFromFp + num_args - i] is copied 765 // Argument i passed at fp[kParamEndSlotFromFp + num_args - i] is copied
773 // to fp[kFirstLocalSlotFromFp - i]. 766 // to fp[kFirstLocalSlotFromFp - i].
774 767
775 __ ldr(R7, FieldAddress(R4, ArgumentsDescriptor::count_offset())); 768 __ ldr(R7, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
776 // Since R7 and R9 are Smi, use LSL 1 instead of LSL 2. 769 // Since R7 and R10 are Smi, use LSL 1 instead of LSL 2.
777 // Let R7 point to the last passed positional argument, i.e. to 770 // Let R7 point to the last passed positional argument, i.e. to
778 // fp[kParamEndSlotFromFp + num_args - (num_pos_args - 1)]. 771 // fp[kParamEndSlotFromFp + num_args - (num_pos_args - 1)].
779 __ sub(R7, R7, Operand(R9)); 772 __ sub(R7, R7, Operand(R10));
780 __ add(R7, FP, Operand(R7, LSL, 1)); 773 __ add(R7, FP, Operand(R7, LSL, 1));
781 __ add(R7, R7, Operand((kParamEndSlotFromFp + 1) * kWordSize)); 774 __ add(R7, R7, Operand((kParamEndSlotFromFp + 1) * kWordSize));
782 775
783 // Let R6 point to the last copied positional argument, i.e. to 776 // Let R6 point to the last copied positional argument, i.e. to
784 // fp[kFirstLocalSlotFromFp - (num_pos_args - 1)]. 777 // fp[kFirstLocalSlotFromFp - (num_pos_args - 1)].
785 __ AddImmediate(R6, FP, (kFirstLocalSlotFromFp + 1) * kWordSize); 778 __ AddImmediate(R6, FP, (kFirstLocalSlotFromFp + 1) * kWordSize);
786 __ sub(R6, R6, Operand(R9, LSL, 1)); // R9 is a Smi. 779 __ sub(R6, R6, Operand(R10, LSL, 1)); // R10 is a Smi.
787 __ SmiUntag(R9); 780 __ SmiUntag(R10);
788 Label loop, loop_condition; 781 Label loop, loop_condition;
789 __ b(&loop_condition); 782 __ b(&loop_condition);
790 // We do not use the final allocation index of the variable here, i.e. 783 // We do not use the final allocation index of the variable here, i.e.
791 // scope->VariableAt(i)->index(), because captured variables still need 784 // scope->VariableAt(i)->index(), because captured variables still need
792 // to be copied to the context that is not yet allocated. 785 // to be copied to the context that is not yet allocated.
793 const Address argument_addr(R7, R9, LSL, 2); 786 const Address argument_addr(R7, R10, LSL, 2);
794 const Address copy_addr(R6, R9, LSL, 2); 787 const Address copy_addr(R6, R10, LSL, 2);
795 __ Bind(&loop); 788 __ Bind(&loop);
796 __ ldr(IP, argument_addr); 789 __ ldr(IP, argument_addr);
797 __ str(IP, copy_addr); 790 __ str(IP, copy_addr);
798 __ Bind(&loop_condition); 791 __ Bind(&loop_condition);
799 __ subs(R9, R9, Operand(1)); 792 __ subs(R10, R10, Operand(1));
800 __ b(&loop, PL); 793 __ b(&loop, PL);
801 794
802 // Copy or initialize optional named arguments. 795 // Copy or initialize optional named arguments.
803 Label all_arguments_processed; 796 Label all_arguments_processed;
804 #ifdef DEBUG 797 #ifdef DEBUG
805 const bool check_correct_named_args = true; 798 const bool check_correct_named_args = true;
806 #else 799 #else
807 const bool check_correct_named_args = function.IsClosureFunction(); 800 const bool check_correct_named_args = function.IsClosureFunction();
808 #endif 801 #endif
809 if (num_opt_named_params > 0) { 802 if (num_opt_named_params > 0) {
(...skipping 10 matching lines...) Expand all
820 ASSERT(result != 0); 813 ASSERT(result != 0);
821 if (result > 0) break; 814 if (result > 0) break;
822 opt_param[i + 1] = opt_param[i]; 815 opt_param[i + 1] = opt_param[i];
823 opt_param_position[i + 1] = opt_param_position[i]; 816 opt_param_position[i + 1] = opt_param_position[i];
824 } 817 }
825 opt_param[i + 1] = parameter; 818 opt_param[i + 1] = parameter;
826 opt_param_position[i + 1] = pos; 819 opt_param_position[i + 1] = pos;
827 } 820 }
828 // Generate code handling each optional parameter in alphabetical order. 821 // Generate code handling each optional parameter in alphabetical order.
829 __ ldr(R7, FieldAddress(R4, ArgumentsDescriptor::count_offset())); 822 __ ldr(R7, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
830 __ ldr(R9, 823 __ ldr(R10,
831 FieldAddress(R4, ArgumentsDescriptor::positional_count_offset())); 824 FieldAddress(R4, ArgumentsDescriptor::positional_count_offset()));
832 __ SmiUntag(R9); 825 __ SmiUntag(R10);
833 // Let R7 point to the first passed argument, i.e. to 826 // Let R7 point to the first passed argument, i.e. to
834 // fp[kParamEndSlotFromFp + num_args - 0]; num_args (R7) is Smi. 827 // fp[kParamEndSlotFromFp + num_args - 0]; num_args (R7) is Smi.
835 __ add(R7, FP, Operand(R7, LSL, 1)); 828 __ add(R7, FP, Operand(R7, LSL, 1));
836 __ AddImmediate(R7, R7, kParamEndSlotFromFp * kWordSize); 829 __ AddImmediate(R7, R7, kParamEndSlotFromFp * kWordSize);
837 // Let R6 point to the entry of the first named argument. 830 // Let R6 point to the entry of the first named argument.
838 __ add(R6, R4, Operand( 831 __ add(R6, R4, Operand(
839 ArgumentsDescriptor::first_named_entry_offset() - kHeapObjectTag)); 832 ArgumentsDescriptor::first_named_entry_offset() - kHeapObjectTag));
840 for (int i = 0; i < num_opt_named_params; i++) { 833 for (int i = 0; i < num_opt_named_params; i++) {
841 Label load_default_value, assign_optional_parameter; 834 Label load_default_value, assign_optional_parameter;
842 const int param_pos = opt_param_position[i]; 835 const int param_pos = opt_param_position[i];
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
874 delete[] opt_param_position; 867 delete[] opt_param_position;
875 if (check_correct_named_args) { 868 if (check_correct_named_args) {
876 // Check that R6 now points to the null terminator in the arguments 869 // Check that R6 now points to the null terminator in the arguments
877 // descriptor. 870 // descriptor.
878 __ ldr(R5, Address(R6, 0)); 871 __ ldr(R5, Address(R6, 0));
879 __ CompareObject(R5, Object::null_object()); 872 __ CompareObject(R5, Object::null_object());
880 __ b(&all_arguments_processed, EQ); 873 __ b(&all_arguments_processed, EQ);
881 } 874 }
882 } else { 875 } else {
883 ASSERT(num_opt_pos_params > 0); 876 ASSERT(num_opt_pos_params > 0);
884 __ ldr(R9, 877 __ ldr(R10,
885 FieldAddress(R4, ArgumentsDescriptor::positional_count_offset())); 878 FieldAddress(R4, ArgumentsDescriptor::positional_count_offset()));
886 __ SmiUntag(R9); 879 __ SmiUntag(R10);
887 for (int i = 0; i < num_opt_pos_params; i++) { 880 for (int i = 0; i < num_opt_pos_params; i++) {
888 Label next_parameter; 881 Label next_parameter;
889 // Handle this optional positional parameter only if k or fewer positional 882 // Handle this optional positional parameter only if k or fewer positional
890 // arguments have been passed, where k is param_pos, the position of this 883 // arguments have been passed, where k is param_pos, the position of this
891 // optional parameter in the formal parameter list. 884 // optional parameter in the formal parameter list.
892 const int param_pos = num_fixed_params + i; 885 const int param_pos = num_fixed_params + i;
893 __ CompareImmediate(R9, param_pos); 886 __ CompareImmediate(R10, param_pos);
894 __ b(&next_parameter, GT); 887 __ b(&next_parameter, GT);
895 // Load R5 with default argument. 888 // Load R5 with default argument.
896 const Object& value = parsed_function().DefaultParameterValueAt(i); 889 const Object& value = parsed_function().DefaultParameterValueAt(i);
897 __ LoadObject(R5, value); 890 __ LoadObject(R5, value);
898 // Assign R5 to fp[kFirstLocalSlotFromFp - param_pos]. 891 // Assign R5 to fp[kFirstLocalSlotFromFp - param_pos].
899 // We do not use the final allocation index of the variable here, i.e. 892 // We do not use the final allocation index of the variable here, i.e.
900 // scope->VariableAt(i)->index(), because captured variables still need 893 // scope->VariableAt(i)->index(), because captured variables still need
901 // to be copied to the context that is not yet allocated. 894 // to be copied to the context that is not yet allocated.
902 const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos; 895 const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos;
903 const Address param_addr(FP, computed_param_pos * kWordSize); 896 const Address param_addr(FP, computed_param_pos * kWordSize);
904 __ str(R5, param_addr); 897 __ str(R5, param_addr);
905 __ Bind(&next_parameter); 898 __ Bind(&next_parameter);
906 } 899 }
907 if (check_correct_named_args) { 900 if (check_correct_named_args) {
908 __ ldr(R7, FieldAddress(R4, ArgumentsDescriptor::count_offset())); 901 __ ldr(R7, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
909 __ SmiUntag(R7); 902 __ SmiUntag(R7);
910 // Check that R9 equals R7, i.e. no named arguments passed. 903 // Check that R10 equals R7, i.e. no named arguments passed.
911 __ cmp(R9, Operand(R7)); 904 __ cmp(R10, Operand(R7));
912 __ b(&all_arguments_processed, EQ); 905 __ b(&all_arguments_processed, EQ);
913 } 906 }
914 } 907 }
915 908
916 __ Bind(&wrong_num_arguments); 909 __ Bind(&wrong_num_arguments);
917 if (function.IsClosureFunction()) { 910 if (function.IsClosureFunction()) {
918 ASSERT(assembler()->constant_pool_allowed()); 911 __ LeaveDartFrame(kKeepCalleePP); // The arguments are still on the stack.
919 __ LeaveDartFrame(); // The arguments are still on the stack.
920 // Do not use caller's pool ptr in branch.
921 ASSERT(!assembler()->constant_pool_allowed());
922 __ Branch(*StubCode::CallClosureNoSuchMethod_entry()); 912 __ Branch(*StubCode::CallClosureNoSuchMethod_entry());
923 __ set_constant_pool_allowed(true);
924 // The noSuchMethod call may return to the caller, but not here. 913 // The noSuchMethod call may return to the caller, but not here.
925 } else if (check_correct_named_args) { 914 } else if (check_correct_named_args) {
926 __ Stop("Wrong arguments"); 915 __ Stop("Wrong arguments");
927 } 916 }
928 917
929 __ Bind(&all_arguments_processed); 918 __ Bind(&all_arguments_processed);
930 // Nullify originally passed arguments only after they have been copied and 919 // Nullify originally passed arguments only after they have been copied and
931 // checked, otherwise noSuchMethod would not see their original values. 920 // checked, otherwise noSuchMethod would not see their original values.
932 // This step can be skipped in case we decide that formal parameters are 921 // This step can be skipped in case we decide that formal parameters are
933 // implicitly final, since garbage collecting the unmodified value is not 922 // implicitly final, since garbage collecting the unmodified value is not
934 // an issue anymore. 923 // an issue anymore.
935 924
936 // R4 : arguments descriptor array. 925 // R4 : arguments descriptor array.
937 __ ldr(R9, FieldAddress(R4, ArgumentsDescriptor::count_offset())); 926 __ ldr(R10, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
938 __ SmiUntag(R9); 927 __ SmiUntag(R10);
939 __ add(R7, FP, Operand((kParamEndSlotFromFp + 1) * kWordSize)); 928 __ add(R7, FP, Operand((kParamEndSlotFromFp + 1) * kWordSize));
940 const Address original_argument_addr(R7, R9, LSL, 2); 929 const Address original_argument_addr(R7, R10, LSL, 2);
941 __ LoadObject(IP, Object::null_object()); 930 __ LoadObject(IP, Object::null_object());
942 Label null_args_loop, null_args_loop_condition; 931 Label null_args_loop, null_args_loop_condition;
943 __ b(&null_args_loop_condition); 932 __ b(&null_args_loop_condition);
944 __ Bind(&null_args_loop); 933 __ Bind(&null_args_loop);
945 __ str(IP, original_argument_addr); 934 __ str(IP, original_argument_addr);
946 __ Bind(&null_args_loop_condition); 935 __ Bind(&null_args_loop_condition);
947 __ subs(R9, R9, Operand(1)); 936 __ subs(R10, R10, Operand(1));
948 __ b(&null_args_loop, PL); 937 __ b(&null_args_loop, PL);
949 } 938 }
950 939
951 940
952 void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) { 941 void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) {
953 // LR: return address. 942 // LR: return address.
954 // SP: receiver. 943 // SP: receiver.
955 // Sequence node has one return node, its input is load field node. 944 // Sequence node has one return node, its input is load field node.
956 __ Comment("Inlined Getter"); 945 __ Comment("Inlined Getter");
957 __ ldr(R0, Address(SP, 0 * kWordSize)); 946 __ ldr(R0, Address(SP, 0 * kWordSize));
958 __ LoadFromOffset(kWord, R0, R0, offset - kHeapObjectTag); 947 __ LoadFromOffset(kWord, R0, R0, offset - kHeapObjectTag);
959 __ Ret(); 948 __ Ret();
960 } 949 }
961 950
962 951
963 void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) { 952 void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) {
964 // LR: return address. 953 // LR: return address.
965 // SP+1: receiver. 954 // SP+1: receiver.
966 // SP+0: value. 955 // SP+0: value.
967 // Sequence node has one store node and one return NULL node. 956 // Sequence node has one store node and one return NULL node.
968 __ Comment("Inlined Setter"); 957 __ Comment("Inlined Setter");
969 __ ldr(R0, Address(SP, 1 * kWordSize)); // Receiver. 958 __ ldr(R0, Address(SP, 1 * kWordSize)); // Receiver.
970 __ ldr(R1, Address(SP, 0 * kWordSize)); // Value. 959 __ ldr(R1, Address(SP, 0 * kWordSize)); // Value.
971 __ StoreIntoObjectOffset(R0, offset, R1); 960 __ StoreIntoObjectOffset(R0, offset, R1);
972 __ LoadObject(R0, Object::null_object()); 961 __ LoadObject(R0, Object::null_object());
973 __ Ret(); 962 __ Ret();
974 } 963 }
975 964
976 965
966 static const Register new_pp = R7;
967
968
977 void FlowGraphCompiler::EmitFrameEntry() { 969 void FlowGraphCompiler::EmitFrameEntry() {
978 const Function& function = parsed_function().function(); 970 const Function& function = parsed_function().function();
979 if (CanOptimizeFunction() && 971 if (CanOptimizeFunction() &&
980 function.IsOptimizable() && 972 function.IsOptimizable() &&
981 (!is_optimizing() || may_reoptimize())) { 973 (!is_optimizing() || may_reoptimize())) {
982 const Register function_reg = R6; 974 const Register function_reg = R6;
983 975
984 // The pool pointer is not setup before entering the Dart frame. 976 // The pool pointer is not setup before entering the Dart frame.
985 // Preserve PP of caller.
986 __ mov(R7, Operand(PP));
987 // Temporarily setup pool pointer for this dart function. 977 // Temporarily setup pool pointer for this dart function.
988 __ LoadPoolPointer(); 978 __ LoadPoolPointer(new_pp);
989 // Load function object from object pool. 979 // Load function object from object pool.
990 __ LoadObject(function_reg, function); // Uses PP. 980 __ LoadFunctionFromCalleePool(function_reg, function, new_pp);
991 // Restore PP of caller.
992 __ mov(PP, Operand(R7));
993 __ set_constant_pool_allowed(false);
994 981
995 // Patch point is after the eventually inlined function object. 982 __ ldr(R3, FieldAddress(function_reg,
996 entry_patch_pc_offset_ = assembler()->CodeSize();
997
998 __ ldr(R7, FieldAddress(function_reg,
999 Function::usage_counter_offset())); 983 Function::usage_counter_offset()));
1000 // Reoptimization of an optimized function is triggered by counting in 984 // Reoptimization of an optimized function is triggered by counting in
1001 // IC stubs, but not at the entry of the function. 985 // IC stubs, but not at the entry of the function.
1002 if (!is_optimizing()) { 986 if (!is_optimizing()) {
1003 __ add(R7, R7, Operand(1)); 987 __ add(R3, R3, Operand(1));
1004 __ str(R7, FieldAddress(function_reg, 988 __ str(R3, FieldAddress(function_reg,
1005 Function::usage_counter_offset())); 989 Function::usage_counter_offset()));
1006 } 990 }
1007 __ CompareImmediate(R7, GetOptimizationThreshold()); 991 __ CompareImmediate(R3, GetOptimizationThreshold());
1008 ASSERT(function_reg == R6); 992 ASSERT(function_reg == R6);
1009 __ Branch(*StubCode::OptimizeFunction_entry(), GE); 993 __ Branch(*StubCode::OptimizeFunction_entry(), kNotPatchable, new_pp, GE);
1010 } else if (!flow_graph().IsCompiledForOsr()) {
1011 entry_patch_pc_offset_ = assembler()->CodeSize();
1012 } 994 }
1013 __ Comment("Enter frame"); 995 __ Comment("Enter frame");
1014 if (flow_graph().IsCompiledForOsr()) { 996 if (flow_graph().IsCompiledForOsr()) {
1015 intptr_t extra_slots = StackSize() 997 intptr_t extra_slots = StackSize()
1016 - flow_graph().num_stack_locals() 998 - flow_graph().num_stack_locals()
1017 - flow_graph().num_copied_params(); 999 - flow_graph().num_copied_params();
1018 ASSERT(extra_slots >= 0); 1000 ASSERT(extra_slots >= 0);
1019 __ EnterOsrFrame(extra_slots * kWordSize); 1001 __ EnterOsrFrame(extra_slots * kWordSize);
1020 } else { 1002 } else {
1021 ASSERT(StackSize() >= 0); 1003 ASSERT(StackSize() >= 0);
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
1064 __ ldr(R0, FieldAddress(R4, ArgumentsDescriptor::count_offset())); 1046 __ ldr(R0, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
1065 __ CompareImmediate(R0, Smi::RawValue(num_fixed_params)); 1047 __ CompareImmediate(R0, Smi::RawValue(num_fixed_params));
1066 __ b(&wrong_num_arguments, NE); 1048 __ b(&wrong_num_arguments, NE);
1067 __ ldr(R1, FieldAddress(R4, 1049 __ ldr(R1, FieldAddress(R4,
1068 ArgumentsDescriptor::positional_count_offset())); 1050 ArgumentsDescriptor::positional_count_offset()));
1069 __ cmp(R0, Operand(R1)); 1051 __ cmp(R0, Operand(R1));
1070 __ b(&correct_num_arguments, EQ); 1052 __ b(&correct_num_arguments, EQ);
1071 __ Bind(&wrong_num_arguments); 1053 __ Bind(&wrong_num_arguments);
1072 if (function.IsClosureFunction()) { 1054 if (function.IsClosureFunction()) {
1073 ASSERT(assembler()->constant_pool_allowed()); 1055 ASSERT(assembler()->constant_pool_allowed());
1074 __ LeaveDartFrame(); // The arguments are still on the stack. 1056 __ LeaveDartFrame(kKeepCalleePP); // Arguments are still on the stack.
1075 // Do not use caller's pool ptr in branch.
1076 ASSERT(!assembler()->constant_pool_allowed());
1077 __ Branch(*StubCode::CallClosureNoSuchMethod_entry()); 1057 __ Branch(*StubCode::CallClosureNoSuchMethod_entry());
1078 __ set_constant_pool_allowed(true);
1079 // The noSuchMethod call may return to the caller, but not here. 1058 // The noSuchMethod call may return to the caller, but not here.
1080 } else { 1059 } else {
1081 __ Stop("Wrong number of arguments"); 1060 __ Stop("Wrong number of arguments");
1082 } 1061 }
1083 __ Bind(&correct_num_arguments); 1062 __ Bind(&correct_num_arguments);
1084 } 1063 }
1085 } else if (!flow_graph().IsCompiledForOsr()) { 1064 } else if (!flow_graph().IsCompiledForOsr()) {
1086 CopyParameters(); 1065 CopyParameters();
1087 } 1066 }
1088 1067
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
1121 __ StoreToOffset(kWord, R0, FP, (slot_base - i) * kWordSize); 1100 __ StoreToOffset(kWord, R0, FP, (slot_base - i) * kWordSize);
1122 } 1101 }
1123 } 1102 }
1124 } 1103 }
1125 1104
1126 VisitBlocks(); 1105 VisitBlocks();
1127 1106
1128 __ bkpt(0); 1107 __ bkpt(0);
1129 ASSERT(assembler()->constant_pool_allowed()); 1108 ASSERT(assembler()->constant_pool_allowed());
1130 GenerateDeferredCode(); 1109 GenerateDeferredCode();
1131 // Emit function patching code. This will be swapped with the first 3
1132 // instructions at entry point.
1133 patch_code_pc_offset_ = assembler()->CodeSize();
1134 __ BranchPatchable(*StubCode::FixCallersTarget_entry());
1135 1110
1136 if (is_optimizing()) { 1111 if (is_optimizing()) {
1137 lazy_deopt_pc_offset_ = assembler()->CodeSize(); 1112 lazy_deopt_pc_offset_ = assembler()->CodeSize();
1138 __ Branch(*StubCode::DeoptimizeLazy_entry()); 1113 __ Branch(*StubCode::DeoptimizeLazy_entry());
1139 } 1114 }
1140 } 1115 }
1141 1116
1142 1117
1143 void FlowGraphCompiler::GenerateCall(intptr_t token_pos, 1118 void FlowGraphCompiler::GenerateCall(intptr_t token_pos,
1144 const StubEntry& stub_entry, 1119 const StubEntry& stub_entry,
(...skipping 750 matching lines...) Expand 10 before | Expand all | Expand 10 after
1895 DRegister dreg = EvenDRegisterOf(reg); 1870 DRegister dreg = EvenDRegisterOf(reg);
1896 __ vldrd(dreg, Address(SP, kDoubleSize, Address::PostIndex)); 1871 __ vldrd(dreg, Address(SP, kDoubleSize, Address::PostIndex));
1897 } 1872 }
1898 1873
1899 1874
1900 #undef __ 1875 #undef __
1901 1876
1902 } // namespace dart 1877 } // namespace dart
1903 1878
1904 #endif // defined TARGET_ARCH_ARM 1879 #endif // defined TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « runtime/vm/flow_graph_compiler.cc ('k') | runtime/vm/flow_graph_compiler_arm64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698