OLD | NEW |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" | 5 #include "vm/globals.h" |
6 #if defined(TARGET_ARCH_ARM) | 6 #if defined(TARGET_ARCH_ARM) |
7 | 7 |
8 #include "vm/assembler.h" | 8 #include "vm/assembler.h" |
9 #include "vm/code_generator.h" | 9 #include "vm/code_generator.h" |
10 #include "vm/cpu.h" | 10 #include "vm/cpu.h" |
(...skipping 24 matching lines...) Expand all Loading... |
35 // SP : address of last argument in argument array. | 35 // SP : address of last argument in argument array. |
36 // SP + 4*R4 - 4 : address of first argument in argument array. | 36 // SP + 4*R4 - 4 : address of first argument in argument array. |
37 // SP + 4*R4 : address of return value. | 37 // SP + 4*R4 : address of return value. |
38 // R5 : address of the runtime function to call. | 38 // R5 : address of the runtime function to call. |
39 // R4 : number of arguments to the call. | 39 // R4 : number of arguments to the call. |
40 void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) { | 40 void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) { |
41 const intptr_t thread_offset = NativeArguments::thread_offset(); | 41 const intptr_t thread_offset = NativeArguments::thread_offset(); |
42 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); | 42 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); |
43 const intptr_t argv_offset = NativeArguments::argv_offset(); | 43 const intptr_t argv_offset = NativeArguments::argv_offset(); |
44 const intptr_t retval_offset = NativeArguments::retval_offset(); | 44 const intptr_t retval_offset = NativeArguments::retval_offset(); |
| 45 const intptr_t exitframe_last_param_slot_from_fp = 2; |
45 | 46 |
46 __ EnterStubFrame(); | 47 __ EnterStubFrame(); |
47 | 48 |
48 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R7)) != 0); | 49 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R9)) != 0); |
49 __ LoadIsolate(R7); | 50 __ LoadIsolate(R9); |
50 | 51 |
51 // Save exit frame information to enable stack walking as we are about | 52 // Save exit frame information to enable stack walking as we are about |
52 // to transition to Dart VM C++ code. | 53 // to transition to Dart VM C++ code. |
53 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset()); | 54 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset()); |
54 | 55 |
55 #if defined(DEBUG) | 56 #if defined(DEBUG) |
56 { Label ok; | 57 { Label ok; |
57 // Check that we are always entering from Dart code. | 58 // Check that we are always entering from Dart code. |
58 __ LoadFromOffset(kWord, R6, R7, Isolate::vm_tag_offset()); | 59 __ LoadFromOffset(kWord, R6, R9, Isolate::vm_tag_offset()); |
59 __ CompareImmediate(R6, VMTag::kDartTagId); | 60 __ CompareImmediate(R6, VMTag::kDartTagId); |
60 __ b(&ok, EQ); | 61 __ b(&ok, EQ); |
61 __ Stop("Not coming from Dart code."); | 62 __ Stop("Not coming from Dart code."); |
62 __ Bind(&ok); | 63 __ Bind(&ok); |
63 } | 64 } |
64 #endif | 65 #endif |
65 | 66 |
66 // Mark that the isolate is executing VM code. | 67 // Mark that the isolate is executing VM code. |
67 __ StoreToOffset(kWord, R5, R7, Isolate::vm_tag_offset()); | 68 __ StoreToOffset(kWord, R5, R9, Isolate::vm_tag_offset()); |
68 | 69 |
69 // Reserve space for arguments and align frame before entering C++ world. | 70 // Reserve space for arguments and align frame before entering C++ world. |
70 // NativeArguments are passed in registers. | 71 // NativeArguments are passed in registers. |
71 ASSERT(sizeof(NativeArguments) == 4 * kWordSize); | 72 ASSERT(sizeof(NativeArguments) == 4 * kWordSize); |
72 __ ReserveAlignedFrameSpace(0); | 73 __ ReserveAlignedFrameSpace(0); |
73 | 74 |
74 // Pass NativeArguments structure by value and call runtime. | 75 // Pass NativeArguments structure by value and call runtime. |
75 // Registers R0, R1, R2, and R3 are used. | 76 // Registers R0, R1, R2, and R3 are used. |
76 | 77 |
77 ASSERT(thread_offset == 0 * kWordSize); | 78 ASSERT(thread_offset == 0 * kWordSize); |
78 // Set thread in NativeArgs. | 79 // Set thread in NativeArgs. |
79 __ mov(R0, Operand(THR)); | 80 __ mov(R0, Operand(THR)); |
80 | 81 |
81 // There are no runtime calls to closures, so we do not need to set the tag | 82 // There are no runtime calls to closures, so we do not need to set the tag |
82 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. | 83 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. |
83 ASSERT(argc_tag_offset == 1 * kWordSize); | 84 ASSERT(argc_tag_offset == 1 * kWordSize); |
84 __ mov(R1, Operand(R4)); // Set argc in NativeArguments. | 85 __ mov(R1, Operand(R4)); // Set argc in NativeArguments. |
85 | 86 |
86 ASSERT(argv_offset == 2 * kWordSize); | 87 ASSERT(argv_offset == 2 * kWordSize); |
87 __ add(R2, FP, Operand(R4, LSL, 2)); // Compute argv. | 88 __ add(R2, FP, Operand(R4, LSL, 2)); // Compute argv. |
88 // Set argv in NativeArguments. | 89 // Set argv in NativeArguments. |
89 __ AddImmediate(R2, kParamEndSlotFromFp * kWordSize); | 90 __ AddImmediate(R2, exitframe_last_param_slot_from_fp * kWordSize); |
90 | 91 |
91 ASSERT(retval_offset == 3 * kWordSize); | 92 ASSERT(retval_offset == 3 * kWordSize); |
92 __ add(R3, R2, Operand(kWordSize)); // Retval is next to 1st argument. | 93 __ add(R3, R2, Operand(kWordSize)); // Retval is next to 1st argument. |
93 | 94 |
94 // Call runtime or redirection via simulator. | 95 // Call runtime or redirection via simulator. |
95 __ blx(R5); | 96 __ blx(R5); |
96 | 97 |
97 // Mark that the isolate is executing Dart code. | 98 // Mark that the isolate is executing Dart code. |
98 __ LoadImmediate(R2, VMTag::kDartTagId); | 99 __ LoadImmediate(R2, VMTag::kDartTagId); |
99 __ StoreToOffset(kWord, R2, R7, Isolate::vm_tag_offset()); | 100 __ StoreToOffset(kWord, R2, R9, Isolate::vm_tag_offset()); |
100 | 101 |
101 // Reset exit frame information in Isolate structure. | 102 // Reset exit frame information in Isolate structure. |
102 __ LoadImmediate(R2, 0); | 103 __ LoadImmediate(R2, 0); |
103 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset()); | 104 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset()); |
104 | 105 |
105 __ LeaveStubFrame(); | 106 __ LeaveStubFrame(); |
106 __ Ret(); | 107 __ Ret(); |
107 } | 108 } |
108 | 109 |
109 | 110 |
(...skipping 23 matching lines...) Expand all Loading... |
133 // R2 : address of first argument in argument array. | 134 // R2 : address of first argument in argument array. |
134 // R1 : argc_tag including number of arguments and function kind. | 135 // R1 : argc_tag including number of arguments and function kind. |
135 void StubCode::GenerateCallNativeCFunctionStub(Assembler* assembler) { | 136 void StubCode::GenerateCallNativeCFunctionStub(Assembler* assembler) { |
136 const intptr_t thread_offset = NativeArguments::thread_offset(); | 137 const intptr_t thread_offset = NativeArguments::thread_offset(); |
137 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); | 138 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); |
138 const intptr_t argv_offset = NativeArguments::argv_offset(); | 139 const intptr_t argv_offset = NativeArguments::argv_offset(); |
139 const intptr_t retval_offset = NativeArguments::retval_offset(); | 140 const intptr_t retval_offset = NativeArguments::retval_offset(); |
140 | 141 |
141 __ EnterStubFrame(); | 142 __ EnterStubFrame(); |
142 | 143 |
143 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R7)) != 0); | 144 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R9)) != 0); |
144 __ LoadIsolate(R7); | 145 __ LoadIsolate(R9); |
145 | 146 |
146 // Save exit frame information to enable stack walking as we are about | 147 // Save exit frame information to enable stack walking as we are about |
147 // to transition to native code. | 148 // to transition to native code. |
148 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset()); | 149 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset()); |
149 | 150 |
150 #if defined(DEBUG) | 151 #if defined(DEBUG) |
151 { Label ok; | 152 { Label ok; |
152 // Check that we are always entering from Dart code. | 153 // Check that we are always entering from Dart code. |
153 __ LoadFromOffset(kWord, R6, R7, Isolate::vm_tag_offset()); | 154 __ LoadFromOffset(kWord, R6, R9, Isolate::vm_tag_offset()); |
154 __ CompareImmediate(R6, VMTag::kDartTagId); | 155 __ CompareImmediate(R6, VMTag::kDartTagId); |
155 __ b(&ok, EQ); | 156 __ b(&ok, EQ); |
156 __ Stop("Not coming from Dart code."); | 157 __ Stop("Not coming from Dart code."); |
157 __ Bind(&ok); | 158 __ Bind(&ok); |
158 } | 159 } |
159 #endif | 160 #endif |
160 | 161 |
161 // Mark that the isolate is executing Native code. | 162 // Mark that the isolate is executing Native code. |
162 __ StoreToOffset(kWord, R5, R7, Isolate::vm_tag_offset()); | 163 __ StoreToOffset(kWord, R5, R9, Isolate::vm_tag_offset()); |
163 | 164 |
164 // Reserve space for the native arguments structure passed on the stack (the | 165 // Reserve space for the native arguments structure passed on the stack (the |
165 // outgoing pointer parameter to the native arguments structure is passed in | 166 // outgoing pointer parameter to the native arguments structure is passed in |
166 // R0) and align frame before entering the C++ world. | 167 // R0) and align frame before entering the C++ world. |
167 __ ReserveAlignedFrameSpace(sizeof(NativeArguments)); | 168 __ ReserveAlignedFrameSpace(sizeof(NativeArguments)); |
168 | 169 |
169 // Initialize NativeArguments structure and call native function. | 170 // Initialize NativeArguments structure and call native function. |
170 // Registers R0, R1, R2, and R3 are used. | 171 // Registers R0, R1, R2, and R3 are used. |
171 | 172 |
172 ASSERT(thread_offset == 0 * kWordSize); | 173 ASSERT(thread_offset == 0 * kWordSize); |
173 // Set thread in NativeArgs. | 174 // Set thread in NativeArgs. |
174 __ mov(R0, Operand(THR)); | 175 __ mov(R0, Operand(THR)); |
175 | 176 |
176 // There are no native calls to closures, so we do not need to set the tag | 177 // There are no native calls to closures, so we do not need to set the tag |
177 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. | 178 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. |
178 ASSERT(argc_tag_offset == 1 * kWordSize); | 179 ASSERT(argc_tag_offset == 1 * kWordSize); |
179 // Set argc in NativeArguments: R1 already contains argc. | 180 // Set argc in NativeArguments: R1 already contains argc. |
180 | 181 |
181 ASSERT(argv_offset == 2 * kWordSize); | 182 ASSERT(argv_offset == 2 * kWordSize); |
182 // Set argv in NativeArguments: R2 already contains argv. | 183 // Set argv in NativeArguments: R2 already contains argv. |
183 | 184 |
184 ASSERT(retval_offset == 3 * kWordSize); | 185 ASSERT(retval_offset == 3 * kWordSize); |
185 // Set retval in NativeArgs. | 186 __ add(R3, FP, Operand(3 * kWordSize)); // Set retval in NativeArgs. |
186 __ add(R3, FP, Operand(kCallerSpSlotFromFp * kWordSize)); | |
187 | 187 |
188 // Passing the structure by value as in runtime calls would require changing | 188 // Passing the structure by value as in runtime calls would require changing |
189 // Dart API for native functions. | 189 // Dart API for native functions. |
190 // For now, space is reserved on the stack and we pass a pointer to it. | 190 // For now, space is reserved on the stack and we pass a pointer to it. |
191 __ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3)); | 191 __ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3)); |
192 __ mov(R0, Operand(SP)); // Pass the pointer to the NativeArguments. | 192 __ mov(R0, Operand(SP)); // Pass the pointer to the NativeArguments. |
193 | 193 |
194 __ mov(R1, Operand(R5)); // Pass the function entrypoint to call. | 194 __ mov(R1, Operand(R5)); // Pass the function entrypoint to call. |
195 // Call native function invocation wrapper or redirection via simulator. | 195 // Call native function invocation wrapper or redirection via simulator. |
196 #if defined(USING_SIMULATOR) | 196 #if defined(USING_SIMULATOR) |
197 uword entry = reinterpret_cast<uword>(NativeEntry::NativeCallWrapper); | 197 uword entry = reinterpret_cast<uword>(NativeEntry::NativeCallWrapper); |
198 const ExternalLabel label(Simulator::RedirectExternalReference( | 198 entry = Simulator::RedirectExternalReference( |
199 entry, Simulator::kNativeCall, NativeEntry::kNumCallWrapperArguments)); | 199 entry, Simulator::kNativeCall, NativeEntry::kNumCallWrapperArguments); |
200 __ LoadExternalLabel(R2, &label, kNotPatchable); | 200 __ LoadImmediate(R2, entry); |
201 __ blx(R2); | 201 __ blx(R2); |
202 #else | 202 #else |
203 __ LoadExternalLabel( | 203 __ BranchLink(&NativeEntry::NativeCallWrapperLabel(), kNotPatchable); |
204 LR, &NativeEntry::NativeCallWrapperLabel(), kNotPatchable); | |
205 __ blx(LR); | |
206 #endif | 204 #endif |
207 | 205 |
208 // Mark that the isolate is executing Dart code. | 206 // Mark that the isolate is executing Dart code. |
209 __ LoadImmediate(R2, VMTag::kDartTagId); | 207 __ LoadImmediate(R2, VMTag::kDartTagId); |
210 __ StoreToOffset(kWord, R2, R7, Isolate::vm_tag_offset()); | 208 __ StoreToOffset(kWord, R2, R9, Isolate::vm_tag_offset()); |
211 | 209 |
212 // Reset exit frame information in Isolate structure. | 210 // Reset exit frame information in Isolate structure. |
213 __ LoadImmediate(R2, 0); | 211 __ LoadImmediate(R2, 0); |
214 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset()); | 212 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset()); |
215 | 213 |
216 __ LeaveStubFrame(); | 214 __ LeaveStubFrame(); |
217 __ Ret(); | 215 __ Ret(); |
218 } | 216 } |
219 | 217 |
220 | 218 |
221 // Input parameters: | 219 // Input parameters: |
222 // LR : return address. | 220 // LR : return address. |
223 // SP : address of return value. | 221 // SP : address of return value. |
224 // R5 : address of the native function to call. | 222 // R5 : address of the native function to call. |
225 // R2 : address of first argument in argument array. | 223 // R2 : address of first argument in argument array. |
226 // R1 : argc_tag including number of arguments and function kind. | 224 // R1 : argc_tag including number of arguments and function kind. |
227 void StubCode::GenerateCallBootstrapCFunctionStub(Assembler* assembler) { | 225 void StubCode::GenerateCallBootstrapCFunctionStub(Assembler* assembler) { |
228 const intptr_t thread_offset = NativeArguments::thread_offset(); | 226 const intptr_t thread_offset = NativeArguments::thread_offset(); |
229 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); | 227 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); |
230 const intptr_t argv_offset = NativeArguments::argv_offset(); | 228 const intptr_t argv_offset = NativeArguments::argv_offset(); |
231 const intptr_t retval_offset = NativeArguments::retval_offset(); | 229 const intptr_t retval_offset = NativeArguments::retval_offset(); |
232 | 230 |
233 __ EnterStubFrame(); | 231 __ EnterStubFrame(); |
234 | 232 |
235 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R7)) != 0); | 233 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R9)) != 0); |
236 __ LoadIsolate(R7); | 234 __ LoadIsolate(R9); |
237 | 235 |
238 // Save exit frame information to enable stack walking as we are about | 236 // Save exit frame information to enable stack walking as we are about |
239 // to transition to native code. | 237 // to transition to native code. |
240 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset()); | 238 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset()); |
241 | 239 |
242 #if defined(DEBUG) | 240 #if defined(DEBUG) |
243 { Label ok; | 241 { Label ok; |
244 // Check that we are always entering from Dart code. | 242 // Check that we are always entering from Dart code. |
245 __ LoadFromOffset(kWord, R6, R7, Isolate::vm_tag_offset()); | 243 __ LoadFromOffset(kWord, R6, R9, Isolate::vm_tag_offset()); |
246 __ CompareImmediate(R6, VMTag::kDartTagId); | 244 __ CompareImmediate(R6, VMTag::kDartTagId); |
247 __ b(&ok, EQ); | 245 __ b(&ok, EQ); |
248 __ Stop("Not coming from Dart code."); | 246 __ Stop("Not coming from Dart code."); |
249 __ Bind(&ok); | 247 __ Bind(&ok); |
250 } | 248 } |
251 #endif | 249 #endif |
252 | 250 |
253 // Mark that the isolate is executing Native code. | 251 // Mark that the isolate is executing Native code. |
254 __ StoreToOffset(kWord, R5, R7, Isolate::vm_tag_offset()); | 252 __ StoreToOffset(kWord, R5, R9, Isolate::vm_tag_offset()); |
255 | 253 |
256 // Reserve space for the native arguments structure passed on the stack (the | 254 // Reserve space for the native arguments structure passed on the stack (the |
257 // outgoing pointer parameter to the native arguments structure is passed in | 255 // outgoing pointer parameter to the native arguments structure is passed in |
258 // R0) and align frame before entering the C++ world. | 256 // R0) and align frame before entering the C++ world. |
259 __ ReserveAlignedFrameSpace(sizeof(NativeArguments)); | 257 __ ReserveAlignedFrameSpace(sizeof(NativeArguments)); |
260 | 258 |
261 // Initialize NativeArguments structure and call native function. | 259 // Initialize NativeArguments structure and call native function. |
262 // Registers R0, R1, R2, and R3 are used. | 260 // Registers R0, R1, R2, and R3 are used. |
263 | 261 |
264 ASSERT(thread_offset == 0 * kWordSize); | 262 ASSERT(thread_offset == 0 * kWordSize); |
265 // Set thread in NativeArgs. | 263 // Set thread in NativeArgs. |
266 __ mov(R0, Operand(THR)); | 264 __ mov(R0, Operand(THR)); |
267 | 265 |
268 // There are no native calls to closures, so we do not need to set the tag | 266 // There are no native calls to closures, so we do not need to set the tag |
269 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. | 267 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. |
270 ASSERT(argc_tag_offset == 1 * kWordSize); | 268 ASSERT(argc_tag_offset == 1 * kWordSize); |
271 // Set argc in NativeArguments: R1 already contains argc. | 269 // Set argc in NativeArguments: R1 already contains argc. |
272 | 270 |
273 ASSERT(argv_offset == 2 * kWordSize); | 271 ASSERT(argv_offset == 2 * kWordSize); |
274 // Set argv in NativeArguments: R2 already contains argv. | 272 // Set argv in NativeArguments: R2 already contains argv. |
275 | 273 |
276 ASSERT(retval_offset == 3 * kWordSize); | 274 ASSERT(retval_offset == 3 * kWordSize); |
277 // Set retval in NativeArgs. | 275 __ add(R3, FP, Operand(3 * kWordSize)); // Set retval in NativeArgs. |
278 __ add(R3, FP, Operand(kCallerSpSlotFromFp * kWordSize)); | |
279 | 276 |
280 // Passing the structure by value as in runtime calls would require changing | 277 // Passing the structure by value as in runtime calls would require changing |
281 // Dart API for native functions. | 278 // Dart API for native functions. |
282 // For now, space is reserved on the stack and we pass a pointer to it. | 279 // For now, space is reserved on the stack and we pass a pointer to it. |
283 __ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3)); | 280 __ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3)); |
284 __ mov(R0, Operand(SP)); // Pass the pointer to the NativeArguments. | 281 __ mov(R0, Operand(SP)); // Pass the pointer to the NativeArguments. |
285 | 282 |
286 // Call native function or redirection via simulator. | 283 // Call native function or redirection via simulator. |
287 __ blx(R5); | 284 __ blx(R5); |
288 | 285 |
289 // Mark that the isolate is executing Dart code. | 286 // Mark that the isolate is executing Dart code. |
290 __ LoadImmediate(R2, VMTag::kDartTagId); | 287 __ LoadImmediate(R2, VMTag::kDartTagId); |
291 __ StoreToOffset(kWord, R2, R7, Isolate::vm_tag_offset()); | 288 __ StoreToOffset(kWord, R2, R9, Isolate::vm_tag_offset()); |
292 | 289 |
293 // Reset exit frame information in Isolate structure. | 290 // Reset exit frame information in Isolate structure. |
294 __ LoadImmediate(R2, 0); | 291 __ LoadImmediate(R2, 0); |
295 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset()); | 292 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset()); |
296 | 293 |
297 __ LeaveStubFrame(); | 294 __ LeaveStubFrame(); |
298 __ Ret(); | 295 __ Ret(); |
299 } | 296 } |
300 | 297 |
301 | 298 |
302 // Input parameters: | 299 // Input parameters: |
303 // R4: arguments descriptor array. | 300 // R4: arguments descriptor array. |
304 void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) { | 301 void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) { |
305 // Create a stub frame as we are pushing some objects on the stack before | 302 // Create a stub frame as we are pushing some objects on the stack before |
306 // calling into the runtime. | 303 // calling into the runtime. |
307 __ EnterStubFrame(); | 304 __ EnterStubFrame(); |
308 // Setup space on stack for return value and preserve arguments descriptor. | 305 // Setup space on stack for return value and preserve arguments descriptor. |
309 __ LoadObject(R0, Object::null_object()); | 306 __ LoadObject(R0, Object::null_object()); |
310 __ PushList((1 << R0) | (1 << R4)); | 307 __ PushList((1 << R0) | (1 << R4)); |
311 __ CallRuntime(kPatchStaticCallRuntimeEntry, 0); | 308 __ CallRuntime(kPatchStaticCallRuntimeEntry, 0); |
312 // Get Code object result and restore arguments descriptor array. | 309 // Get Code object result and restore arguments descriptor array. |
313 __ PopList((1 << R0) | (1 << R4)); | 310 __ PopList((1 << R0) | (1 << R4)); |
314 // Remove the stub frame. | 311 // Remove the stub frame. |
315 __ LeaveStubFrame(); | 312 __ LeaveStubFrame(); |
316 // Jump to the dart function. | 313 // Jump to the dart function. |
317 __ mov(CODE_REG, Operand(R0)); | |
318 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset())); | 314 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset())); |
319 __ bx(R0); | 315 __ bx(R0); |
320 } | 316 } |
321 | 317 |
322 | 318 |
323 // Called from a static call only when an invalid code has been entered | 319 // Called from a static call only when an invalid code has been entered |
324 // (invalid because its function was optimized or deoptimized). | 320 // (invalid because its function was optimized or deoptimized). |
325 // R4: arguments descriptor array. | 321 // R4: arguments descriptor array. |
326 void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) { | 322 void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) { |
327 // Load code pointer to this stub from the thread: | |
328 // The one that is passed in, is not correct - it points to the code object | |
329 // that needs to be replaced. | |
330 __ ldr(CODE_REG, Address(THR, Thread::fix_callers_target_code_offset())); | |
331 // Create a stub frame as we are pushing some objects on the stack before | 323 // Create a stub frame as we are pushing some objects on the stack before |
332 // calling into the runtime. | 324 // calling into the runtime. |
333 __ EnterStubFrame(); | 325 __ EnterStubFrame(); |
334 // Setup space on stack for return value and preserve arguments descriptor. | 326 // Setup space on stack for return value and preserve arguments descriptor. |
335 __ LoadObject(R0, Object::null_object()); | 327 __ LoadObject(R0, Object::null_object()); |
336 __ PushList((1 << R0) | (1 << R4)); | 328 __ PushList((1 << R0) | (1 << R4)); |
337 __ CallRuntime(kFixCallersTargetRuntimeEntry, 0); | 329 __ CallRuntime(kFixCallersTargetRuntimeEntry, 0); |
338 // Get Code object result and restore arguments descriptor array. | 330 // Get Code object result and restore arguments descriptor array. |
339 __ PopList((1 << R0) | (1 << R4)); | 331 __ PopList((1 << R0) | (1 << R4)); |
340 // Remove the stub frame. | 332 // Remove the stub frame. |
341 __ LeaveStubFrame(); | 333 __ LeaveStubFrame(); |
342 // Jump to the dart function. | 334 // Jump to the dart function. |
343 __ mov(CODE_REG, Operand(R0)); | |
344 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset())); | 335 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset())); |
345 __ bx(R0); | 336 __ bx(R0); |
346 } | 337 } |
347 | 338 |
348 | 339 |
349 // Called from object allocate instruction when the allocation stub has been | 340 // Called from object allocate instruction when the allocation stub has been |
350 // disabled. | 341 // disabled. |
351 void StubCode::GenerateFixAllocationStubTargetStub(Assembler* assembler) { | 342 void StubCode::GenerateFixAllocationStubTargetStub(Assembler* assembler) { |
352 // Load code pointer to this stub from the thread: | |
353 // The one that is passed in, is not correct - it points to the code object | |
354 // that needs to be replaced. | |
355 __ ldr(CODE_REG, Address(THR, Thread::fix_allocation_stub_code_offset())); | |
356 __ EnterStubFrame(); | 343 __ EnterStubFrame(); |
357 // Setup space on stack for return value. | 344 // Setup space on stack for return value. |
358 __ LoadObject(R0, Object::null_object()); | 345 __ LoadObject(R0, Object::null_object()); |
359 __ Push(R0); | 346 __ Push(R0); |
360 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0); | 347 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0); |
361 // Get Code object result. | 348 // Get Code object result. |
362 __ Pop(R0); | 349 __ Pop(R0); |
363 // Remove the stub frame. | 350 // Remove the stub frame. |
364 __ LeaveStubFrame(); | 351 __ LeaveStubFrame(); |
365 // Jump to the dart function. | 352 // Jump to the dart function. |
366 __ mov(CODE_REG, Operand(R0)); | |
367 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset())); | 353 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset())); |
368 __ bx(R0); | 354 __ bx(R0); |
369 } | 355 } |
370 | 356 |
371 | 357 |
372 // Input parameters: | 358 // Input parameters: |
373 // R2: smi-tagged argument count, may be zero. | 359 // R2: smi-tagged argument count, may be zero. |
374 // FP[kParamEndSlotFromFp + 1]: last argument. | 360 // FP[kParamEndSlotFromFp + 1]: last argument. |
375 static void PushArgumentsArray(Assembler* assembler) { | 361 static void PushArgumentsArray(Assembler* assembler) { |
376 // Allocate array to store arguments of caller. | 362 // Allocate array to store arguments of caller. |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
410 // - Materialize objects that require allocation (e.g. Double instances). | 396 // - Materialize objects that require allocation (e.g. Double instances). |
411 // GC can occur only after frame is fully rewritten. | 397 // GC can occur only after frame is fully rewritten. |
412 // Stack after EnterFrame(...) below: | 398 // Stack after EnterFrame(...) below: |
413 // +------------------+ | 399 // +------------------+ |
414 // | Saved PP | <- TOS | 400 // | Saved PP | <- TOS |
415 // +------------------+ | 401 // +------------------+ |
416 // | Saved FP | <- FP of stub | 402 // | Saved FP | <- FP of stub |
417 // +------------------+ | 403 // +------------------+ |
418 // | Saved LR | (deoptimization point) | 404 // | Saved LR | (deoptimization point) |
419 // +------------------+ | 405 // +------------------+ |
420 // | pc marker | | 406 // | PC marker | |
421 // +------------------+ | |
422 // | Saved CODE_REG | | |
423 // +------------------+ | 407 // +------------------+ |
424 // | ... | <- SP of optimized frame | 408 // | ... | <- SP of optimized frame |
425 // | 409 // |
426 // Parts of the code cannot GC, part of the code can GC. | 410 // Parts of the code cannot GC, part of the code can GC. |
427 static void GenerateDeoptimizationSequence(Assembler* assembler, | 411 static void GenerateDeoptimizationSequence(Assembler* assembler, |
428 DeoptStubKind kind) { | 412 bool preserve_result) { |
429 // DeoptimizeCopyFrame expects a Dart frame, i.e. EnterDartFrame(0), but there | 413 // DeoptimizeCopyFrame expects a Dart frame, i.e. EnterDartFrame(0), but there |
430 // is no need to set the correct PC marker or load PP, since they get patched. | 414 // is no need to set the correct PC marker or load PP, since they get patched. |
431 | 415 |
432 // IP has the potentially live LR value. LR was clobbered by the call with | 416 // IP has the potentially live LR value. LR was clobbered by the call with |
433 // the return address, so move it into IP to set up the Dart frame. | 417 // the return address, so move it into IP to set up the Dart frame. |
434 __ eor(IP, IP, Operand(LR)); | 418 __ eor(IP, IP, Operand(LR)); |
435 __ eor(LR, IP, Operand(LR)); | 419 __ eor(LR, IP, Operand(LR)); |
436 __ eor(IP, IP, Operand(LR)); | 420 __ eor(IP, IP, Operand(LR)); |
437 | 421 |
438 // Set up the frame manually with return address now stored in IP. | 422 // Set up the frame manually. We can't use EnterFrame because we can't |
439 __ EnterFrame((1 << PP) | (1 << CODE_REG) | (1 << FP) | (1 << IP), 0); | 423 // clobber LR (or any other register) with 0, yet. |
| 424 __ sub(SP, SP, Operand(kWordSize)); // Make room for PC marker of 0. |
| 425 __ Push(IP); // Push return address. |
| 426 __ Push(FP); |
| 427 __ mov(FP, Operand(SP)); |
| 428 __ Push(PP); |
| 429 |
440 __ LoadPoolPointer(); | 430 __ LoadPoolPointer(); |
441 | 431 |
| 432 // Now that IP holding the return address has been written to the stack, |
| 433 // we can clobber it with 0 to write the null PC marker. |
| 434 __ mov(IP, Operand(0)); |
| 435 __ str(IP, Address(SP, +3 * kWordSize)); |
| 436 |
442 // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry | 437 // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry |
443 // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls. | 438 // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls. |
444 const intptr_t saved_result_slot_from_fp = | 439 const intptr_t saved_result_slot_from_fp = |
445 kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - R0); | 440 kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - R0); |
446 // Result in R0 is preserved as part of pushing all registers below. | 441 // Result in R0 is preserved as part of pushing all registers below. |
447 | 442 |
448 // Push registers in their enumeration order: lowest register number at | 443 // Push registers in their enumeration order: lowest register number at |
449 // lowest address. | 444 // lowest address. |
450 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) { | 445 __ PushList(kAllCpuRegistersList); |
451 if (i == CODE_REG) { | |
452 // Save the original value of CODE_REG pushed before invoking this stub | |
453 // instead of the value used to call this stub. | |
454 COMPILE_ASSERT(IP > CODE_REG); // Assert IP is pushed first. | |
455 __ ldr(IP, Address(FP, kCallerSpSlotFromFp * kWordSize)); | |
456 __ Push(IP); | |
457 } else { | |
458 __ Push(static_cast<Register>(i)); | |
459 } | |
460 } | |
461 | 446 |
462 if (TargetCPUFeatures::vfp_supported()) { | 447 if (TargetCPUFeatures::vfp_supported()) { |
463 ASSERT(kFpuRegisterSize == 4 * kWordSize); | 448 ASSERT(kFpuRegisterSize == 4 * kWordSize); |
464 if (kNumberOfDRegisters > 16) { | 449 if (kNumberOfDRegisters > 16) { |
465 __ vstmd(DB_W, SP, D16, kNumberOfDRegisters - 16); | 450 __ vstmd(DB_W, SP, D16, kNumberOfDRegisters - 16); |
466 __ vstmd(DB_W, SP, D0, 16); | 451 __ vstmd(DB_W, SP, D0, 16); |
467 } else { | 452 } else { |
468 __ vstmd(DB_W, SP, D0, kNumberOfDRegisters); | 453 __ vstmd(DB_W, SP, D0, kNumberOfDRegisters); |
469 } | 454 } |
470 } else { | 455 } else { |
471 __ AddImmediate(SP, SP, -kNumberOfFpuRegisters * kFpuRegisterSize); | 456 __ AddImmediate(SP, SP, -kNumberOfFpuRegisters * kFpuRegisterSize); |
472 } | 457 } |
473 | 458 |
474 __ mov(R0, Operand(SP)); // Pass address of saved registers block. | 459 __ mov(R0, Operand(SP)); // Pass address of saved registers block. |
475 __ mov(R1, Operand(kind == kLazyDeopt ? 1 : 0)); | |
476 __ ReserveAlignedFrameSpace(0); | 460 __ ReserveAlignedFrameSpace(0); |
477 __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2); | 461 __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 1); |
478 // Result (R0) is stack-size (FP - SP) in bytes. | 462 // Result (R0) is stack-size (FP - SP) in bytes. |
479 | 463 |
480 const bool preserve_result = (kind == kLazyDeopt); | |
481 if (preserve_result) { | 464 if (preserve_result) { |
482 // Restore result into R1 temporarily. | 465 // Restore result into R1 temporarily. |
483 __ ldr(R1, Address(FP, saved_result_slot_from_fp * kWordSize)); | 466 __ ldr(R1, Address(FP, saved_result_slot_from_fp * kWordSize)); |
484 } | 467 } |
485 | 468 |
486 __ RestoreCodePointer(); | |
487 __ LeaveDartFrame(); | 469 __ LeaveDartFrame(); |
488 __ sub(SP, FP, Operand(R0)); | 470 __ sub(SP, FP, Operand(R0)); |
489 | 471 |
490 // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there | 472 // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there |
491 // is no need to set the correct PC marker or load PP, since they get patched. | 473 // is no need to set the correct PC marker or load PP, since they get patched. |
492 __ EnterStubFrame(); | 474 __ EnterStubFrame(); |
493 __ mov(R0, Operand(FP)); // Get last FP address. | 475 __ mov(R0, Operand(FP)); // Get last FP address. |
494 if (preserve_result) { | 476 if (preserve_result) { |
495 __ Push(R1); // Preserve result as first local. | 477 __ Push(R1); // Preserve result as first local. |
496 } | 478 } |
497 __ ReserveAlignedFrameSpace(0); | 479 __ ReserveAlignedFrameSpace(0); |
498 __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1); // Pass last FP in R0. | 480 __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1); // Pass last FP in R0. |
499 if (preserve_result) { | 481 if (preserve_result) { |
500 // Restore result into R1. | 482 // Restore result into R1. |
501 __ ldr(R1, Address(FP, kFirstLocalSlotFromFp * kWordSize)); | 483 __ ldr(R1, Address(FP, kFirstLocalSlotFromFp * kWordSize)); |
502 } | 484 } |
503 // Code above cannot cause GC. | 485 // Code above cannot cause GC. |
504 __ RestoreCodePointer(); | |
505 __ LeaveStubFrame(); | 486 __ LeaveStubFrame(); |
506 | 487 |
507 // Frame is fully rewritten at this point and it is safe to perform a GC. | 488 // Frame is fully rewritten at this point and it is safe to perform a GC. |
508 // Materialize any objects that were deferred by FillFrame because they | 489 // Materialize any objects that were deferred by FillFrame because they |
509 // require allocation. | 490 // require allocation. |
510 // Enter stub frame with loading PP. The caller's PP is not materialized yet. | 491 // Enter stub frame with loading PP. The caller's PP is not materialized yet. |
511 __ EnterStubFrame(); | 492 __ EnterStubFrame(); |
512 if (preserve_result) { | 493 if (preserve_result) { |
513 __ Push(R1); // Preserve result, it will be GC-d here. | 494 __ Push(R1); // Preserve result, it will be GC-d here. |
514 } | 495 } |
515 __ PushObject(Smi::ZoneHandle()); // Space for the result. | 496 __ PushObject(Smi::ZoneHandle()); // Space for the result. |
516 __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0); | 497 __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0); |
517 // Result tells stub how many bytes to remove from the expression stack | 498 // Result tells stub how many bytes to remove from the expression stack |
518 // of the bottom-most frame. They were used as materialization arguments. | 499 // of the bottom-most frame. They were used as materialization arguments. |
519 __ Pop(R1); | 500 __ Pop(R1); |
520 if (preserve_result) { | 501 if (preserve_result) { |
521 __ Pop(R0); // Restore result. | 502 __ Pop(R0); // Restore result. |
522 } | 503 } |
523 __ LeaveStubFrame(); | 504 __ LeaveStubFrame(); |
524 // Remove materialization arguments. | 505 // Remove materialization arguments. |
525 __ add(SP, SP, Operand(R1, ASR, kSmiTagSize)); | 506 __ add(SP, SP, Operand(R1, ASR, kSmiTagSize)); |
526 __ Ret(); | 507 __ Ret(); |
527 } | 508 } |
528 | 509 |
529 | 510 |
530 void StubCode::GenerateDeoptimizeLazyStub(Assembler* assembler) { | 511 void StubCode::GenerateDeoptimizeLazyStub(Assembler* assembler) { |
531 // Correct return address to point just after the call that is being | 512 // Correct return address to point just after the call that is being |
532 // deoptimized. | 513 // deoptimized. |
533 __ AddImmediate(LR, -CallPattern::DeoptCallPatternLengthInBytes()); | 514 __ AddImmediate(LR, -CallPattern::LengthInBytes()); |
534 // Push zap value instead of CODE_REG for lazy deopt. | 515 GenerateDeoptimizationSequence(assembler, true); // Preserve R0. |
535 __ LoadImmediate(IP, 0xf1f1f1f1); | |
536 __ Push(IP); | |
537 GenerateDeoptimizationSequence(assembler, kLazyDeopt); | |
538 } | 516 } |
539 | 517 |
540 | 518 |
541 void StubCode::GenerateDeoptimizeStub(Assembler* assembler) { | 519 void StubCode::GenerateDeoptimizeStub(Assembler* assembler) { |
542 GenerateDeoptimizationSequence(assembler, kEagerDeopt); | 520 GenerateDeoptimizationSequence(assembler, false); // Don't preserve R0. |
543 } | 521 } |
544 | 522 |
545 | 523 |
546 static void GenerateDispatcherCode(Assembler* assembler, | 524 static void GenerateDispatcherCode(Assembler* assembler, |
547 Label* call_target_function) { | 525 Label* call_target_function) { |
548 __ Comment("NoSuchMethodDispatch"); | 526 __ Comment("NoSuchMethodDispatch"); |
549 // When lazily generated invocation dispatchers are disabled, the | 527 // When lazily generated invocation dispatchers are disabled, the |
550 // miss-handler may return null. | 528 // miss-handler may return null. |
551 __ CompareObject(R0, Object::null_object()); | 529 __ CompareObject(R0, Object::null_object()); |
552 __ b(call_target_function, NE); | 530 __ b(call_target_function, NE); |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
588 __ LoadObject(IP, Object::null_object()); | 566 __ LoadObject(IP, Object::null_object()); |
589 __ PushList((1 << R4) | (1 << R5) | (1 << R6) | (1 << IP)); | 567 __ PushList((1 << R4) | (1 << R5) | (1 << R6) | (1 << IP)); |
590 __ CallRuntime(kMegamorphicCacheMissHandlerRuntimeEntry, 3); | 568 __ CallRuntime(kMegamorphicCacheMissHandlerRuntimeEntry, 3); |
591 // Remove arguments. | 569 // Remove arguments. |
592 __ Drop(3); | 570 __ Drop(3); |
593 __ Pop(R0); // Get result into R0 (target function). | 571 __ Pop(R0); // Get result into R0 (target function). |
594 | 572 |
595 // Restore IC data and arguments descriptor. | 573 // Restore IC data and arguments descriptor. |
596 __ PopList((1 << R4) | (1 << R5)); | 574 __ PopList((1 << R4) | (1 << R5)); |
597 | 575 |
598 __ RestoreCodePointer(); | |
599 __ LeaveStubFrame(); | 576 __ LeaveStubFrame(); |
600 | 577 |
601 if (!FLAG_lazy_dispatchers) { | 578 if (!FLAG_lazy_dispatchers) { |
602 Label call_target_function; | 579 Label call_target_function; |
603 GenerateDispatcherCode(assembler, &call_target_function); | 580 GenerateDispatcherCode(assembler, &call_target_function); |
604 __ Bind(&call_target_function); | 581 __ Bind(&call_target_function); |
605 } | 582 } |
606 | 583 |
607 // Tail-call to target function. | 584 // Tail-call to target function. |
608 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset())); | |
609 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset())); | 585 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset())); |
610 __ bx(R2); | 586 __ bx(R2); |
611 } | 587 } |
612 | 588 |
613 | 589 |
614 // Called for inline allocation of arrays. | 590 // Called for inline allocation of arrays. |
615 // Input parameters: | 591 // Input parameters: |
616 // LR: return address. | 592 // LR: return address. |
617 // R1: array element type (either NULL or an instantiated type). | 593 // R1: array element type (either NULL or an instantiated type). |
618 // R2: array length as Smi (must be preserved). | 594 // R2: array length as Smi (must be preserved). |
(...skipping 18 matching lines...) Expand all Loading... |
637 const intptr_t max_len = | 613 const intptr_t max_len = |
638 reinterpret_cast<int32_t>(Smi::New(Array::kMaxElements)); | 614 reinterpret_cast<int32_t>(Smi::New(Array::kMaxElements)); |
639 __ CompareImmediate(R3, max_len); | 615 __ CompareImmediate(R3, max_len); |
640 __ b(&slow_case, GT); | 616 __ b(&slow_case, GT); |
641 | 617 |
642 const intptr_t cid = kArrayCid; | 618 const intptr_t cid = kArrayCid; |
643 __ MaybeTraceAllocation(cid, R4, &slow_case, | 619 __ MaybeTraceAllocation(cid, R4, &slow_case, |
644 /* inline_isolate = */ false); | 620 /* inline_isolate = */ false); |
645 | 621 |
646 const intptr_t fixed_size = sizeof(RawArray) + kObjectAlignment - 1; | 622 const intptr_t fixed_size = sizeof(RawArray) + kObjectAlignment - 1; |
647 __ LoadImmediate(R5, fixed_size); | 623 __ LoadImmediate(R9, fixed_size); |
648 __ add(R5, R5, Operand(R3, LSL, 1)); // R3 is a Smi. | 624 __ add(R9, R9, Operand(R3, LSL, 1)); // R3 is a Smi. |
649 ASSERT(kSmiTagShift == 1); | 625 ASSERT(kSmiTagShift == 1); |
650 __ bic(R5, R5, Operand(kObjectAlignment - 1)); | 626 __ bic(R9, R9, Operand(kObjectAlignment - 1)); |
651 | 627 |
652 // R5: Allocation size. | 628 // R9: Allocation size. |
653 Heap::Space space = Heap::SpaceForAllocation(cid); | 629 Heap::Space space = Heap::SpaceForAllocation(cid); |
654 __ LoadIsolate(R6); | 630 __ LoadIsolate(R6); |
655 __ ldr(R6, Address(R6, Isolate::heap_offset())); | 631 __ ldr(R6, Address(R6, Isolate::heap_offset())); |
656 // Potential new object start. | 632 // Potential new object start. |
657 __ ldr(R0, Address(R6, Heap::TopOffset(space))); | 633 __ ldr(R0, Address(R6, Heap::TopOffset(space))); |
658 __ adds(R7, R0, Operand(R5)); // Potential next object start. | 634 __ adds(R7, R0, Operand(R9)); // Potential next object start. |
659 __ b(&slow_case, CS); // Branch if unsigned overflow. | 635 __ b(&slow_case, CS); // Branch if unsigned overflow. |
660 | 636 |
661 // Check if the allocation fits into the remaining space. | 637 // Check if the allocation fits into the remaining space. |
662 // R0: potential new object start. | 638 // R0: potential new object start. |
663 // R7: potential next object start. | 639 // R7: potential next object start. |
664 // R5: allocation size. | 640 // R9: allocation size. |
665 __ ldr(R3, Address(R6, Heap::EndOffset(space))); | 641 __ ldr(R3, Address(R6, Heap::EndOffset(space))); |
666 __ cmp(R7, Operand(R3)); | 642 __ cmp(R7, Operand(R3)); |
667 __ b(&slow_case, CS); | 643 __ b(&slow_case, CS); |
668 | 644 |
669 // Successfully allocated the object(s), now update top to point to | 645 // Successfully allocated the object(s), now update top to point to |
670 // next object start and initialize the object. | 646 // next object start and initialize the object. |
671 __ LoadAllocationStatsAddress(R3, cid, /* inline_isolate = */ false); | 647 __ LoadAllocationStatsAddress(R3, cid, /* inline_isolate = */ false); |
672 __ str(R7, Address(R6, Heap::TopOffset(space))); | 648 __ str(R7, Address(R6, Heap::TopOffset(space))); |
673 __ add(R0, R0, Operand(kHeapObjectTag)); | 649 __ add(R0, R0, Operand(kHeapObjectTag)); |
674 | 650 |
675 // Initialize the tags. | 651 // Initialize the tags. |
676 // R0: new object start as a tagged pointer. | 652 // R0: new object start as a tagged pointer. |
677 // R3: allocation stats address. | 653 // R3: allocation stats address. |
678 // R7: new object end address. | 654 // R7: new object end address. |
679 // R5: allocation size. | 655 // R9: allocation size. |
680 { | 656 { |
681 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; | 657 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; |
682 | 658 |
683 __ CompareImmediate(R5, RawObject::SizeTag::kMaxSizeTag); | 659 __ CompareImmediate(R9, RawObject::SizeTag::kMaxSizeTag); |
684 __ mov(R6, Operand(R5, LSL, shift), LS); | 660 __ mov(R6, Operand(R9, LSL, shift), LS); |
685 __ mov(R6, Operand(0), HI); | 661 __ mov(R6, Operand(0), HI); |
686 | 662 |
687 // Get the class index and insert it into the tags. | 663 // Get the class index and insert it into the tags. |
688 // R6: size and bit tags. | 664 // R6: size and bit tags. |
689 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); | 665 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); |
690 __ orr(R6, R6, Operand(TMP)); | 666 __ orr(R6, R6, Operand(TMP)); |
691 __ str(R6, FieldAddress(R0, Array::tags_offset())); // Store tags. | 667 __ str(R6, FieldAddress(R0, Array::tags_offset())); // Store tags. |
692 } | 668 } |
693 | 669 |
694 // R0: new object start as a tagged pointer. | 670 // R0: new object start as a tagged pointer. |
695 // R7: new object end address. | 671 // R7: new object end address. |
696 // Store the type argument field. | 672 // Store the type argument field. |
697 __ InitializeFieldNoBarrier(R0, | 673 __ InitializeFieldNoBarrier(R0, |
698 FieldAddress(R0, Array::type_arguments_offset()), | 674 FieldAddress(R0, Array::type_arguments_offset()), |
699 R1); | 675 R1); |
700 | 676 |
701 // Set the length field. | 677 // Set the length field. |
702 __ InitializeFieldNoBarrier(R0, | 678 __ InitializeFieldNoBarrier(R0, |
703 FieldAddress(R0, Array::length_offset()), | 679 FieldAddress(R0, Array::length_offset()), |
704 R2); | 680 R2); |
705 | 681 |
706 // Initialize all array elements to raw_null. | 682 // Initialize all array elements to raw_null. |
707 // R0: new object start as a tagged pointer. | 683 // R0: new object start as a tagged pointer. |
708 // R3: allocation stats address. | 684 // R3: allocation stats address. |
709 // R4, R5: null | 685 // R4, R5: null |
710 // R6: iterator which initially points to the start of the variable | 686 // R6: iterator which initially points to the start of the variable |
711 // data area to be initialized. | 687 // data area to be initialized. |
712 // R7: new object end address. | 688 // R7: new object end address. |
713 // R5: allocation size. | 689 // R9: allocation size. |
714 __ IncrementAllocationStatsWithSize(R3, R5, space); | |
715 | 690 |
716 __ LoadObject(R4, Object::null_object()); | 691 __ LoadObject(R4, Object::null_object()); |
717 __ mov(R5, Operand(R4)); | 692 __ mov(R5, Operand(R4)); |
718 __ AddImmediate(R6, R0, sizeof(RawArray) - kHeapObjectTag); | 693 __ AddImmediate(R6, R0, sizeof(RawArray) - kHeapObjectTag); |
719 __ InitializeFieldsNoBarrier(R0, R6, R7, R4, R5); | 694 __ InitializeFieldsNoBarrier(R0, R6, R7, R4, R5); |
| 695 __ IncrementAllocationStatsWithSize(R3, R9, space); |
720 __ Ret(); // Returns the newly allocated object in R0. | 696 __ Ret(); // Returns the newly allocated object in R0. |
721 // Unable to allocate the array using the fast inline code, just call | 697 // Unable to allocate the array using the fast inline code, just call |
722 // into the runtime. | 698 // into the runtime. |
723 __ Bind(&slow_case); | 699 __ Bind(&slow_case); |
724 | 700 |
725 // Create a stub frame as we are pushing some objects on the stack before | 701 // Create a stub frame as we are pushing some objects on the stack before |
726 // calling into the runtime. | 702 // calling into the runtime. |
727 __ EnterStubFrame(); | 703 __ EnterStubFrame(); |
728 __ LoadObject(IP, Object::null_object()); | 704 __ LoadObject(IP, Object::null_object()); |
729 // Setup space on stack for return value. | 705 // Setup space on stack for return value. |
730 // Push array length as Smi and element type. | 706 // Push array length as Smi and element type. |
731 __ PushList((1 << R1) | (1 << R2) | (1 << IP)); | 707 __ PushList((1 << R1) | (1 << R2) | (1 << IP)); |
732 __ CallRuntime(kAllocateArrayRuntimeEntry, 2); | 708 __ CallRuntime(kAllocateArrayRuntimeEntry, 2); |
733 // Pop arguments; result is popped in IP. | 709 // Pop arguments; result is popped in IP. |
734 __ PopList((1 << R1) | (1 << R2) | (1 << IP)); // R2 is restored. | 710 __ PopList((1 << R1) | (1 << R2) | (1 << IP)); // R2 is restored. |
735 __ mov(R0, Operand(IP)); | 711 __ mov(R0, Operand(IP)); |
736 __ LeaveStubFrame(); | 712 __ LeaveStubFrame(); |
737 __ Ret(); | 713 __ Ret(); |
738 } | 714 } |
739 | 715 |
740 | 716 |
741 // Called when invoking Dart code from C++ (VM code). | 717 // Called when invoking Dart code from C++ (VM code). |
742 // Input parameters: | 718 // Input parameters: |
743 // LR : points to return address. | 719 // LR : points to return address. |
744 // R0 : code object of the Dart function to call. | 720 // R0 : entrypoint of the Dart function to call. |
745 // R1 : arguments descriptor array. | 721 // R1 : arguments descriptor array. |
746 // R2 : arguments array. | 722 // R2 : arguments array. |
747 // R3 : current thread. | 723 // R3 : current thread. |
748 void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) { | 724 void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) { |
749 // Save frame pointer coming in. | 725 // Save frame pointer coming in. |
750 __ EnterFrame((1 << FP) | (1 << LR), 0); | 726 __ EnterFrame((1 << FP) | (1 << LR), 0); |
751 | 727 |
752 // Save new context and C++ ABI callee-saved registers. | 728 // Save new context and C++ ABI callee-saved registers. |
753 __ PushList(kAbiPreservedCpuRegs); | 729 __ PushList(kAbiPreservedCpuRegs); |
754 | 730 |
755 const DRegister firstd = EvenDRegisterOf(kAbiFirstPreservedFpuReg); | 731 const DRegister firstd = EvenDRegisterOf(kAbiFirstPreservedFpuReg); |
756 if (TargetCPUFeatures::vfp_supported()) { | 732 if (TargetCPUFeatures::vfp_supported()) { |
757 ASSERT(2 * kAbiPreservedFpuRegCount < 16); | 733 ASSERT(2 * kAbiPreservedFpuRegCount < 16); |
758 // Save FPU registers. 2 D registers per Q register. | 734 // Save FPU registers. 2 D registers per Q register. |
759 __ vstmd(DB_W, SP, firstd, 2 * kAbiPreservedFpuRegCount); | 735 __ vstmd(DB_W, SP, firstd, 2 * kAbiPreservedFpuRegCount); |
760 } else { | 736 } else { |
761 __ sub(SP, SP, Operand(kAbiPreservedFpuRegCount * kFpuRegisterSize)); | 737 __ sub(SP, SP, Operand(kAbiPreservedFpuRegCount * kFpuRegisterSize)); |
762 } | 738 } |
763 | 739 |
| 740 // We now load the pool pointer(PP) as we are about to invoke dart code and we |
| 741 // could potentially invoke some intrinsic functions which need the PP to be |
| 742 // set up. |
| 743 __ LoadPoolPointer(); |
| 744 |
764 // Set up THR, which caches the current thread in Dart code. | 745 // Set up THR, which caches the current thread in Dart code. |
765 if (THR != R3) { | 746 if (THR != R3) { |
766 __ mov(THR, Operand(R3)); | 747 __ mov(THR, Operand(R3)); |
767 } | 748 } |
768 __ LoadIsolate(R7); | 749 __ LoadIsolate(R9); |
769 | 750 |
770 // Save the current VMTag on the stack. | 751 // Save the current VMTag on the stack. |
771 __ LoadFromOffset(kWord, R5, R7, Isolate::vm_tag_offset()); | 752 __ LoadFromOffset(kWord, R5, R9, Isolate::vm_tag_offset()); |
772 __ Push(R5); | 753 __ Push(R5); |
773 | 754 |
774 // Mark that the isolate is executing Dart code. | 755 // Mark that the isolate is executing Dart code. |
775 __ LoadImmediate(R5, VMTag::kDartTagId); | 756 __ LoadImmediate(R5, VMTag::kDartTagId); |
776 __ StoreToOffset(kWord, R5, R7, Isolate::vm_tag_offset()); | 757 __ StoreToOffset(kWord, R5, R9, Isolate::vm_tag_offset()); |
777 | 758 |
778 // Save top resource and top exit frame info. Use R4-6 as temporary registers. | 759 // Save top resource and top exit frame info. Use R4-6 as temporary registers. |
779 // StackFrameIterator reads the top exit frame info saved in this frame. | 760 // StackFrameIterator reads the top exit frame info saved in this frame. |
780 __ LoadFromOffset(kWord, R5, THR, Thread::top_exit_frame_info_offset()); | 761 __ LoadFromOffset(kWord, R5, THR, Thread::top_exit_frame_info_offset()); |
781 __ LoadFromOffset(kWord, R4, THR, Thread::top_resource_offset()); | 762 __ LoadFromOffset(kWord, R4, THR, Thread::top_resource_offset()); |
782 __ LoadImmediate(R6, 0); | 763 __ LoadImmediate(R6, 0); |
783 __ StoreToOffset(kWord, R6, THR, Thread::top_resource_offset()); | 764 __ StoreToOffset(kWord, R6, THR, Thread::top_resource_offset()); |
784 __ StoreToOffset(kWord, R6, THR, Thread::top_exit_frame_info_offset()); | 765 __ StoreToOffset(kWord, R6, THR, Thread::top_exit_frame_info_offset()); |
785 | 766 |
786 // kExitLinkSlotFromEntryFp must be kept in sync with the code below. | 767 // kExitLinkSlotFromEntryFp must be kept in sync with the code below. |
(...skipping 21 matching lines...) Expand all Loading... |
808 __ Bind(&push_arguments); | 789 __ Bind(&push_arguments); |
809 __ ldr(R3, Address(R2)); | 790 __ ldr(R3, Address(R2)); |
810 __ Push(R3); | 791 __ Push(R3); |
811 __ AddImmediate(R2, kWordSize); | 792 __ AddImmediate(R2, kWordSize); |
812 __ AddImmediate(R1, 1); | 793 __ AddImmediate(R1, 1); |
813 __ cmp(R1, Operand(R5)); | 794 __ cmp(R1, Operand(R5)); |
814 __ b(&push_arguments, LT); | 795 __ b(&push_arguments, LT); |
815 __ Bind(&done_push_arguments); | 796 __ Bind(&done_push_arguments); |
816 | 797 |
817 // Call the Dart code entrypoint. | 798 // Call the Dart code entrypoint. |
818 __ LoadImmediate(PP, 0); // GC safe value into PP. | |
819 __ ldr(CODE_REG, Address(R0, VMHandles::kOffsetOfRawPtrInHandle)); | |
820 __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset())); | |
821 __ blx(R0); // R4 is the arguments descriptor array. | 799 __ blx(R0); // R4 is the arguments descriptor array. |
822 | 800 |
823 // Get rid of arguments pushed on the stack. | 801 // Get rid of arguments pushed on the stack. |
824 __ AddImmediate(SP, FP, kExitLinkSlotFromEntryFp * kWordSize); | 802 __ AddImmediate(SP, FP, kExitLinkSlotFromEntryFp * kWordSize); |
825 | 803 |
826 __ LoadIsolate(R7); | 804 __ LoadIsolate(R9); |
827 // Restore the saved top exit frame info and top resource back into the | 805 // Restore the saved top exit frame info and top resource back into the |
828 // Isolate structure. Uses R5 as a temporary register for this. | 806 // Isolate structure. Uses R5 as a temporary register for this. |
829 __ Pop(R5); | 807 __ Pop(R5); |
830 __ StoreToOffset(kWord, R5, THR, Thread::top_exit_frame_info_offset()); | 808 __ StoreToOffset(kWord, R5, THR, Thread::top_exit_frame_info_offset()); |
831 __ Pop(R5); | 809 __ Pop(R5); |
832 __ StoreToOffset(kWord, R5, THR, Thread::top_resource_offset()); | 810 __ StoreToOffset(kWord, R5, THR, Thread::top_resource_offset()); |
833 | 811 |
834 // Restore the current VMTag from the stack. | 812 // Restore the current VMTag from the stack. |
835 __ Pop(R4); | 813 __ Pop(R4); |
836 __ StoreToOffset(kWord, R4, R7, Isolate::vm_tag_offset()); | 814 __ StoreToOffset(kWord, R4, R9, Isolate::vm_tag_offset()); |
837 | 815 |
838 // Restore C++ ABI callee-saved registers. | 816 // Restore C++ ABI callee-saved registers. |
839 if (TargetCPUFeatures::vfp_supported()) { | 817 if (TargetCPUFeatures::vfp_supported()) { |
840 // Restore FPU registers. 2 D registers per Q register. | 818 // Restore FPU registers. 2 D registers per Q register. |
841 __ vldmd(IA_W, SP, firstd, 2 * kAbiPreservedFpuRegCount); | 819 __ vldmd(IA_W, SP, firstd, 2 * kAbiPreservedFpuRegCount); |
842 } else { | 820 } else { |
843 __ AddImmediate(SP, kAbiPreservedFpuRegCount * kFpuRegisterSize); | 821 __ AddImmediate(SP, kAbiPreservedFpuRegCount * kFpuRegisterSize); |
844 } | 822 } |
845 // Restore CPU registers. | 823 // Restore CPU registers. |
846 __ PopList(kAbiPreservedCpuRegs); | 824 __ PopList(kAbiPreservedCpuRegs); |
(...skipping 198 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1045 // Restore callee-saved registers, tear down frame. | 1023 // Restore callee-saved registers, tear down frame. |
1046 __ LeaveCallRuntimeFrame(); | 1024 __ LeaveCallRuntimeFrame(); |
1047 __ Ret(); | 1025 __ Ret(); |
1048 } | 1026 } |
1049 | 1027 |
1050 | 1028 |
1051 // Called for inline allocation of objects. | 1029 // Called for inline allocation of objects. |
1052 // Input parameters: | 1030 // Input parameters: |
1053 // LR : return address. | 1031 // LR : return address. |
1054 // SP + 0 : type arguments object (only if class is parameterized). | 1032 // SP + 0 : type arguments object (only if class is parameterized). |
1055 void StubCode::GenerateAllocationStubForClass(Assembler* assembler, | 1033 // Returns patch_code_pc offset where patching code for disabling the stub |
1056 const Class& cls) { | 1034 // has been generated (similar to regularly generated Dart code). |
1057 // Must load pool pointer before being able to patch. | 1035 void StubCode::GenerateAllocationStubForClass( |
1058 Register new_pp = R7; | 1036 Assembler* assembler, const Class& cls, |
1059 __ LoadPoolPointer(new_pp); | 1037 uword* entry_patch_offset, uword* patch_code_pc_offset) { |
| 1038 *entry_patch_offset = assembler->CodeSize(); |
1060 // The generated code is different if the class is parameterized. | 1039 // The generated code is different if the class is parameterized. |
1061 const bool is_cls_parameterized = cls.NumTypeArguments() > 0; | 1040 const bool is_cls_parameterized = cls.NumTypeArguments() > 0; |
1062 ASSERT(!is_cls_parameterized || | 1041 ASSERT(!is_cls_parameterized || |
1063 (cls.type_arguments_field_offset() != Class::kNoTypeArguments)); | 1042 (cls.type_arguments_field_offset() != Class::kNoTypeArguments)); |
1064 // kInlineInstanceSize is a constant used as a threshold for determining | 1043 // kInlineInstanceSize is a constant used as a threshold for determining |
1065 // when the object initialization should be done as a loop or as | 1044 // when the object initialization should be done as a loop or as |
1066 // straight line code. | 1045 // straight line code. |
1067 const int kInlineInstanceSize = 12; | 1046 const int kInlineInstanceSize = 12; |
1068 const intptr_t instance_size = cls.instance_size(); | 1047 const intptr_t instance_size = cls.instance_size(); |
1069 ASSERT(instance_size > 0); | 1048 ASSERT(instance_size > 0); |
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1175 // Push null type arguments. | 1154 // Push null type arguments. |
1176 __ Push(R2); | 1155 __ Push(R2); |
1177 } | 1156 } |
1178 __ CallRuntime(kAllocateObjectRuntimeEntry, 2); // Allocate object. | 1157 __ CallRuntime(kAllocateObjectRuntimeEntry, 2); // Allocate object. |
1179 __ Drop(2); // Pop arguments. | 1158 __ Drop(2); // Pop arguments. |
1180 __ Pop(R0); // Pop result (newly allocated object). | 1159 __ Pop(R0); // Pop result (newly allocated object). |
1181 // R0: new object | 1160 // R0: new object |
1182 // Restore the frame pointer. | 1161 // Restore the frame pointer. |
1183 __ LeaveStubFrame(); | 1162 __ LeaveStubFrame(); |
1184 __ Ret(); | 1163 __ Ret(); |
| 1164 *patch_code_pc_offset = assembler->CodeSize(); |
| 1165 __ BranchPatchable(*StubCode::FixAllocationStubTarget_entry()); |
1185 } | 1166 } |
1186 | 1167 |
1187 | 1168 |
1188 // Called for invoking "dynamic noSuchMethod(Invocation invocation)" function | 1169 // Called for invoking "dynamic noSuchMethod(Invocation invocation)" function |
1189 // from the entry code of a dart function after an error in passed argument | 1170 // from the entry code of a dart function after an error in passed argument |
1190 // name or number is detected. | 1171 // name or number is detected. |
1191 // Input parameters: | 1172 // Input parameters: |
1192 // LR : return address. | 1173 // LR : return address. |
1193 // SP : address of last argument. | 1174 // SP : address of last argument. |
1194 // R4: arguments descriptor array. | 1175 // R4: arguments descriptor array. |
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1329 // - Check if 'num_args' (including receiver) match any IC data group. | 1310 // - Check if 'num_args' (including receiver) match any IC data group. |
1330 // - Match found -> jump to target. | 1311 // - Match found -> jump to target. |
1331 // - Match not found -> jump to IC miss. | 1312 // - Match not found -> jump to IC miss. |
1332 void StubCode::GenerateNArgsCheckInlineCacheStub( | 1313 void StubCode::GenerateNArgsCheckInlineCacheStub( |
1333 Assembler* assembler, | 1314 Assembler* assembler, |
1334 intptr_t num_args, | 1315 intptr_t num_args, |
1335 const RuntimeEntry& handle_ic_miss, | 1316 const RuntimeEntry& handle_ic_miss, |
1336 Token::Kind kind, | 1317 Token::Kind kind, |
1337 RangeCollectionMode range_collection_mode, | 1318 RangeCollectionMode range_collection_mode, |
1338 bool optimized) { | 1319 bool optimized) { |
1339 __ CheckCodePointer(); | |
1340 ASSERT(num_args > 0); | 1320 ASSERT(num_args > 0); |
1341 #if defined(DEBUG) | 1321 #if defined(DEBUG) |
1342 { Label ok; | 1322 { Label ok; |
1343 // Check that the IC data array has NumArgsTested() == num_args. | 1323 // Check that the IC data array has NumArgsTested() == num_args. |
1344 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. | 1324 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. |
1345 __ ldr(R6, FieldAddress(R5, ICData::state_bits_offset())); | 1325 __ ldr(R6, FieldAddress(R5, ICData::state_bits_offset())); |
1346 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed. | 1326 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed. |
1347 __ and_(R6, R6, Operand(ICData::NumArgsTestedMask())); | 1327 __ and_(R6, R6, Operand(ICData::NumArgsTestedMask())); |
1348 __ CompareImmediate(R6, num_args); | 1328 __ CompareImmediate(R6, num_args); |
1349 __ b(&ok, EQ); | 1329 __ b(&ok, EQ); |
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1454 __ Push(IP); | 1434 __ Push(IP); |
1455 } | 1435 } |
1456 // Pass IC data object. | 1436 // Pass IC data object. |
1457 __ Push(R5); | 1437 __ Push(R5); |
1458 __ CallRuntime(handle_ic_miss, num_args + 1); | 1438 __ CallRuntime(handle_ic_miss, num_args + 1); |
1459 // Remove the call arguments pushed earlier, including the IC data object. | 1439 // Remove the call arguments pushed earlier, including the IC data object. |
1460 __ Drop(num_args + 1); | 1440 __ Drop(num_args + 1); |
1461 // Pop returned function object into R0. | 1441 // Pop returned function object into R0. |
1462 // Restore arguments descriptor array and IC data array. | 1442 // Restore arguments descriptor array and IC data array. |
1463 __ PopList((1 << R0) | (1 << R4) | (1 << R5)); | 1443 __ PopList((1 << R0) | (1 << R4) | (1 << R5)); |
1464 if (range_collection_mode == kCollectRanges) { | |
1465 __ RestoreCodePointer(); | |
1466 } | |
1467 __ LeaveStubFrame(); | 1444 __ LeaveStubFrame(); |
1468 Label call_target_function; | 1445 Label call_target_function; |
1469 if (!FLAG_lazy_dispatchers) { | 1446 if (!FLAG_lazy_dispatchers) { |
1470 GenerateDispatcherCode(assembler, &call_target_function); | 1447 GenerateDispatcherCode(assembler, &call_target_function); |
1471 } else { | 1448 } else { |
1472 __ b(&call_target_function); | 1449 __ b(&call_target_function); |
1473 } | 1450 } |
1474 | 1451 |
1475 __ Bind(&found); | 1452 __ Bind(&found); |
1476 // R6: pointer to an IC data check group. | 1453 // R6: pointer to an IC data check group. |
(...skipping 17 matching lines...) Expand all Loading... |
1494 __ ldr(R1, Address(SP, 0 * kWordSize)); | 1471 __ ldr(R1, Address(SP, 0 * kWordSize)); |
1495 if (num_args == 2) { | 1472 if (num_args == 2) { |
1496 __ ldr(R3, Address(SP, 1 * kWordSize)); | 1473 __ ldr(R3, Address(SP, 1 * kWordSize)); |
1497 } | 1474 } |
1498 __ EnterStubFrame(); | 1475 __ EnterStubFrame(); |
1499 if (num_args == 2) { | 1476 if (num_args == 2) { |
1500 __ PushList((1 << R1) | (1 << R3) | (1 << R5)); | 1477 __ PushList((1 << R1) | (1 << R3) | (1 << R5)); |
1501 } else { | 1478 } else { |
1502 __ PushList((1 << R1) | (1 << R5)); | 1479 __ PushList((1 << R1) | (1 << R5)); |
1503 } | 1480 } |
1504 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset())); | |
1505 __ blx(R2); | 1481 __ blx(R2); |
1506 | 1482 |
1507 Label done; | 1483 Label done; |
1508 __ ldr(R5, Address(FP, kFirstLocalSlotFromFp * kWordSize)); | 1484 __ ldr(R5, Address(FP, kFirstLocalSlotFromFp * kWordSize)); |
1509 __ UpdateRangeFeedback(R0, 2, R5, R1, R4, &done); | 1485 __ UpdateRangeFeedback(R0, 2, R5, R1, R4, &done); |
1510 __ Bind(&done); | 1486 __ Bind(&done); |
1511 __ RestoreCodePointer(); | |
1512 __ LeaveStubFrame(); | 1487 __ LeaveStubFrame(); |
1513 __ Ret(); | 1488 __ Ret(); |
1514 } else { | 1489 } else { |
1515 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset())); | |
1516 __ bx(R2); | 1490 __ bx(R2); |
1517 } | 1491 } |
1518 | 1492 |
1519 if (FLAG_support_debugger && !optimized) { | 1493 if (FLAG_support_debugger && !optimized) { |
1520 __ Bind(&stepping); | 1494 __ Bind(&stepping); |
1521 __ EnterStubFrame(); | 1495 __ EnterStubFrame(); |
1522 __ Push(R5); // Preserve IC data. | 1496 __ Push(R5); // Preserve IC data. |
1523 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); | 1497 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); |
1524 __ Pop(R5); | 1498 __ Pop(R5); |
1525 __ RestoreCodePointer(); | |
1526 __ LeaveStubFrame(); | 1499 __ LeaveStubFrame(); |
1527 __ b(&done_stepping); | 1500 __ b(&done_stepping); |
1528 } | 1501 } |
1529 } | 1502 } |
1530 | 1503 |
1531 | 1504 |
1532 // Use inline cache data array to invoke the target or continue in inline | 1505 // Use inline cache data array to invoke the target or continue in inline |
1533 // cache miss handler. Stub for 1-argument check (receiver class). | 1506 // cache miss handler. Stub for 1-argument check (receiver class). |
1534 // LR: return address. | 1507 // LR: return address. |
1535 // R5: inline cache data object. | 1508 // R5: inline cache data object. |
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1666 __ adds(R1, R1, Operand(Smi::RawValue(1))); | 1639 __ adds(R1, R1, Operand(Smi::RawValue(1))); |
1667 __ LoadImmediate(R1, Smi::RawValue(Smi::kMaxValue), VS); // Overflow. | 1640 __ LoadImmediate(R1, Smi::RawValue(Smi::kMaxValue), VS); // Overflow. |
1668 __ StoreIntoSmiField(Address(R6, count_offset), R1); | 1641 __ StoreIntoSmiField(Address(R6, count_offset), R1); |
1669 } | 1642 } |
1670 | 1643 |
1671 // Load arguments descriptor into R4. | 1644 // Load arguments descriptor into R4. |
1672 __ ldr(R4, FieldAddress(R5, ICData::arguments_descriptor_offset())); | 1645 __ ldr(R4, FieldAddress(R5, ICData::arguments_descriptor_offset())); |
1673 | 1646 |
1674 // Get function and call it, if possible. | 1647 // Get function and call it, if possible. |
1675 __ LoadFromOffset(kWord, R0, R6, target_offset); | 1648 __ LoadFromOffset(kWord, R0, R6, target_offset); |
1676 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset())); | |
1677 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset())); | 1649 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset())); |
1678 __ bx(R2); | 1650 __ bx(R2); |
1679 | 1651 |
1680 if (FLAG_support_debugger) { | 1652 if (FLAG_support_debugger) { |
1681 __ Bind(&stepping); | 1653 __ Bind(&stepping); |
1682 __ EnterStubFrame(); | 1654 __ EnterStubFrame(); |
1683 __ Push(R5); // Preserve IC data. | 1655 __ Push(R5); // Preserve IC data. |
1684 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); | 1656 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); |
1685 __ Pop(R5); | 1657 __ Pop(R5); |
1686 __ RestoreCodePointer(); | |
1687 __ LeaveStubFrame(); | 1658 __ LeaveStubFrame(); |
1688 __ b(&done_stepping); | 1659 __ b(&done_stepping); |
1689 } | 1660 } |
1690 } | 1661 } |
1691 | 1662 |
1692 | 1663 |
1693 void StubCode::GenerateOneArgUnoptimizedStaticCallStub(Assembler* assembler) { | 1664 void StubCode::GenerateOneArgUnoptimizedStaticCallStub(Assembler* assembler) { |
1694 GenerateUsageCounterIncrement(assembler, R6); | 1665 GenerateUsageCounterIncrement(assembler, R6); |
1695 GenerateNArgsCheckInlineCacheStub( | 1666 GenerateNArgsCheckInlineCacheStub( |
1696 assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, | 1667 assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, |
(...skipping 16 matching lines...) Expand all Loading... |
1713 void StubCode::GenerateLazyCompileStub(Assembler* assembler) { | 1684 void StubCode::GenerateLazyCompileStub(Assembler* assembler) { |
1714 // Preserve arg desc. and IC data object. | 1685 // Preserve arg desc. and IC data object. |
1715 __ EnterStubFrame(); | 1686 __ EnterStubFrame(); |
1716 __ PushList((1 << R4) | (1 << R5)); | 1687 __ PushList((1 << R4) | (1 << R5)); |
1717 __ Push(R0); // Pass function. | 1688 __ Push(R0); // Pass function. |
1718 __ CallRuntime(kCompileFunctionRuntimeEntry, 1); | 1689 __ CallRuntime(kCompileFunctionRuntimeEntry, 1); |
1719 __ Pop(R0); // Restore argument. | 1690 __ Pop(R0); // Restore argument. |
1720 __ PopList((1 << R4) | (1 << R5)); // Restore arg desc. and IC data. | 1691 __ PopList((1 << R4) | (1 << R5)); // Restore arg desc. and IC data. |
1721 __ LeaveStubFrame(); | 1692 __ LeaveStubFrame(); |
1722 | 1693 |
1723 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset())); | |
1724 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset())); | 1694 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset())); |
1725 __ bx(R2); | 1695 __ bx(R2); |
1726 } | 1696 } |
1727 | 1697 |
1728 | 1698 |
1729 // R5: Contains an ICData. | 1699 // R5: Contains an ICData. |
1730 void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) { | 1700 void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) { |
1731 __ EnterStubFrame(); | 1701 __ EnterStubFrame(); |
1732 __ LoadObject(R0, Object::null_object()); | 1702 __ LoadObject(R0, Object::null_object()); |
1733 // Preserve arguments descriptor and make room for result. | 1703 // Preserve arguments descriptor and make room for result. |
1734 __ PushList((1 << R0) | (1 << R5)); | 1704 __ PushList((1 << R0) | (1 << R5)); |
1735 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); | 1705 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); |
1736 __ PopList((1 << R0) | (1 << R5)); | 1706 __ PopList((1 << R0) | (1 << R5)); |
1737 __ LeaveStubFrame(); | 1707 __ LeaveStubFrame(); |
1738 __ mov(CODE_REG, Operand(R0)); | |
1739 __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset())); | |
1740 __ bx(R0); | 1708 __ bx(R0); |
1741 } | 1709 } |
1742 | 1710 |
1743 | 1711 |
1744 void StubCode::GenerateRuntimeCallBreakpointStub(Assembler* assembler) { | 1712 void StubCode::GenerateRuntimeCallBreakpointStub(Assembler* assembler) { |
1745 __ EnterStubFrame(); | 1713 __ EnterStubFrame(); |
1746 __ LoadObject(R0, Object::null_object()); | 1714 __ LoadObject(R0, Object::null_object()); |
1747 // Make room for result. | 1715 // Make room for result. |
1748 __ PushList((1 << R0)); | 1716 __ PushList((1 << R0)); |
1749 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); | 1717 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); |
1750 __ PopList((1 << CODE_REG)); | 1718 __ PopList((1 << R0)); |
1751 __ LeaveStubFrame(); | 1719 __ LeaveStubFrame(); |
1752 __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset())); | |
1753 __ bx(R0); | 1720 __ bx(R0); |
1754 } | 1721 } |
1755 | 1722 |
1756 | 1723 |
1757 // Called only from unoptimized code. All relevant registers have been saved. | 1724 // Called only from unoptimized code. All relevant registers have been saved. |
1758 void StubCode::GenerateDebugStepCheckStub( | 1725 void StubCode::GenerateDebugStepCheckStub( |
1759 Assembler* assembler) { | 1726 Assembler* assembler) { |
1760 // Check single stepping. | 1727 // Check single stepping. |
1761 Label stepping, done_stepping; | 1728 Label stepping, done_stepping; |
1762 __ LoadIsolate(R1); | 1729 __ LoadIsolate(R1); |
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1922 void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) { | 1889 void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) { |
1923 __ EnterStubFrame(); | 1890 __ EnterStubFrame(); |
1924 __ Push(R4); | 1891 __ Push(R4); |
1925 __ LoadObject(IP, Object::null_object()); | 1892 __ LoadObject(IP, Object::null_object()); |
1926 __ Push(IP); // Setup space on stack for return value. | 1893 __ Push(IP); // Setup space on stack for return value. |
1927 __ Push(R6); | 1894 __ Push(R6); |
1928 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1); | 1895 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1); |
1929 __ Pop(R0); // Discard argument. | 1896 __ Pop(R0); // Discard argument. |
1930 __ Pop(R0); // Get Code object | 1897 __ Pop(R0); // Get Code object |
1931 __ Pop(R4); // Restore argument descriptor. | 1898 __ Pop(R4); // Restore argument descriptor. |
| 1899 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset())); |
1932 __ LeaveStubFrame(); | 1900 __ LeaveStubFrame(); |
1933 __ mov(CODE_REG, Operand(R0)); | |
1934 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset())); | |
1935 __ bx(R0); | 1901 __ bx(R0); |
1936 __ bkpt(0); | 1902 __ bkpt(0); |
1937 } | 1903 } |
1938 | 1904 |
1939 | 1905 |
1940 // Does identical check (object references are equal or not equal) with special | 1906 // Does identical check (object references are equal or not equal) with special |
1941 // checks for boxed numbers. | 1907 // checks for boxed numbers. |
1942 // LR: return address. | 1908 // LR: return address. |
1943 // Return Zero condition flag set if equal. | 1909 // Return Zero condition flag set if equal. |
1944 // Note: A Mint cannot contain a value that would fit in Smi, a Bigint | 1910 // Note: A Mint cannot contain a value that would fit in Smi, a Bigint |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2026 const Register right = R0; | 1992 const Register right = R0; |
2027 __ ldr(left, Address(SP, 1 * kWordSize)); | 1993 __ ldr(left, Address(SP, 1 * kWordSize)); |
2028 __ ldr(right, Address(SP, 0 * kWordSize)); | 1994 __ ldr(right, Address(SP, 0 * kWordSize)); |
2029 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp); | 1995 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp); |
2030 __ Ret(); | 1996 __ Ret(); |
2031 | 1997 |
2032 if (FLAG_support_debugger) { | 1998 if (FLAG_support_debugger) { |
2033 __ Bind(&stepping); | 1999 __ Bind(&stepping); |
2034 __ EnterStubFrame(); | 2000 __ EnterStubFrame(); |
2035 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); | 2001 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); |
2036 __ RestoreCodePointer(); | |
2037 __ LeaveStubFrame(); | 2002 __ LeaveStubFrame(); |
2038 __ b(&done_stepping); | 2003 __ b(&done_stepping); |
2039 } | 2004 } |
2040 } | 2005 } |
2041 | 2006 |
2042 | 2007 |
2043 // Called from optimized code only. | 2008 // Called from optimized code only. |
2044 // LR: return address. | 2009 // LR: return address. |
2045 // SP + 4: left operand. | 2010 // SP + 4: left operand. |
2046 // SP + 0: right operand. | 2011 // SP + 0: right operand. |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2086 __ cmp(R4, Operand(R0)); | 2051 __ cmp(R4, Operand(R0)); |
2087 __ b(&update, NE); | 2052 __ b(&update, NE); |
2088 | 2053 |
2089 __ Bind(&call_target_function); | 2054 __ Bind(&call_target_function); |
2090 // Call the target found in the cache. For a class id match, this is a | 2055 // Call the target found in the cache. For a class id match, this is a |
2091 // proper target for the given name and arguments descriptor. If the | 2056 // proper target for the given name and arguments descriptor. If the |
2092 // illegal class id was found, the target is a cache miss handler that can | 2057 // illegal class id was found, the target is a cache miss handler that can |
2093 // be invoked as a normal Dart function. | 2058 // be invoked as a normal Dart function. |
2094 __ add(IP, R2, Operand(R3, LSL, 2)); | 2059 __ add(IP, R2, Operand(R3, LSL, 2)); |
2095 __ ldr(R0, FieldAddress(IP, base + kWordSize)); | 2060 __ ldr(R0, FieldAddress(IP, base + kWordSize)); |
2096 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset())); | |
2097 __ ldr(target, FieldAddress(R0, Function::entry_point_offset())); | 2061 __ ldr(target, FieldAddress(R0, Function::entry_point_offset())); |
2098 } | 2062 } |
2099 | 2063 |
2100 | 2064 |
2101 // Called from megamorphic calls. | 2065 // Called from megamorphic calls. |
2102 // R0: receiver. | 2066 // R0: receiver. |
2103 // R1: lookup cache. | 2067 // R1: lookup cache. |
2104 // Result: | 2068 // Result: |
2105 // R1: entry point. | 2069 // R1: entry point. |
2106 void StubCode::GenerateMegamorphicLookupStub(Assembler* assembler) { | 2070 void StubCode::GenerateMegamorphicLookupStub(Assembler* assembler) { |
2107 EmitMegamorphicLookup(assembler, R0, R1, R1); | 2071 EmitMegamorphicLookup(assembler, R0, R1, R1); |
2108 __ Ret(); | 2072 __ Ret(); |
2109 } | 2073 } |
2110 | 2074 |
2111 } // namespace dart | 2075 } // namespace dart |
2112 | 2076 |
2113 #endif // defined TARGET_ARCH_ARM | 2077 #endif // defined TARGET_ARCH_ARM |
OLD | NEW |