OLD | NEW |
| (Empty) |
1 // Copyright 2013 the V8 project authors. All rights reserved. | |
2 // Redistribution and use in source and binary forms, with or without | |
3 // modification, are permitted provided that the following conditions are | |
4 // met: | |
5 // | |
6 // * Redistributions of source code must retain the above copyright | |
7 // notice, this list of conditions and the following disclaimer. | |
8 // * Redistributions in binary form must reproduce the above | |
9 // copyright notice, this list of conditions and the following | |
10 // disclaimer in the documentation and/or other materials provided | |
11 // with the distribution. | |
12 // * Neither the name of Google Inc. nor the names of its | |
13 // contributors may be used to endorse or promote products derived | |
14 // from this software without specific prior written permission. | |
15 // | |
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
27 | |
28 #include "v8.h" | |
29 | |
30 #if V8_TARGET_ARCH_A64 | |
31 | |
32 #include "bootstrapper.h" | |
33 #include "code-stubs.h" | |
34 #include "regexp-macro-assembler.h" | |
35 #include "stub-cache.h" | |
36 | |
37 namespace v8 { | |
38 namespace internal { | |
39 | |
40 | |
41 void FastNewClosureStub::InitializeInterfaceDescriptor( | |
42 Isolate* isolate, | |
43 CodeStubInterfaceDescriptor* descriptor) { | |
44 // x2: function info | |
45 static Register registers[] = { x2 }; | |
46 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
47 descriptor->register_params_ = registers; | |
48 descriptor->deoptimization_handler_ = | |
49 Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry; | |
50 } | |
51 | |
52 | |
53 void FastNewContextStub::InitializeInterfaceDescriptor( | |
54 Isolate* isolate, | |
55 CodeStubInterfaceDescriptor* descriptor) { | |
56 // x1: function | |
57 static Register registers[] = { x1 }; | |
58 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
59 descriptor->register_params_ = registers; | |
60 descriptor->deoptimization_handler_ = NULL; | |
61 } | |
62 | |
63 | |
64 void ToNumberStub::InitializeInterfaceDescriptor( | |
65 Isolate* isolate, | |
66 CodeStubInterfaceDescriptor* descriptor) { | |
67 // x0: value | |
68 static Register registers[] = { x0 }; | |
69 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
70 descriptor->register_params_ = registers; | |
71 descriptor->deoptimization_handler_ = NULL; | |
72 } | |
73 | |
74 | |
75 void NumberToStringStub::InitializeInterfaceDescriptor( | |
76 Isolate* isolate, | |
77 CodeStubInterfaceDescriptor* descriptor) { | |
78 // x0: value | |
79 static Register registers[] = { x0 }; | |
80 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
81 descriptor->register_params_ = registers; | |
82 descriptor->deoptimization_handler_ = | |
83 Runtime::FunctionForId(Runtime::kNumberToString)->entry; | |
84 } | |
85 | |
86 | |
87 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( | |
88 Isolate* isolate, | |
89 CodeStubInterfaceDescriptor* descriptor) { | |
90 // x3: array literals array | |
91 // x2: array literal index | |
92 // x1: constant elements | |
93 static Register registers[] = { x3, x2, x1 }; | |
94 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
95 descriptor->register_params_ = registers; | |
96 descriptor->deoptimization_handler_ = | |
97 Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry; | |
98 } | |
99 | |
100 | |
101 void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( | |
102 Isolate* isolate, | |
103 CodeStubInterfaceDescriptor* descriptor) { | |
104 // x3: object literals array | |
105 // x2: object literal index | |
106 // x1: constant properties | |
107 // x0: object literal flags | |
108 static Register registers[] = { x3, x2, x1, x0 }; | |
109 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
110 descriptor->register_params_ = registers; | |
111 descriptor->deoptimization_handler_ = | |
112 Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry; | |
113 } | |
114 | |
115 | |
116 void CreateAllocationSiteStub::InitializeInterfaceDescriptor( | |
117 Isolate* isolate, | |
118 CodeStubInterfaceDescriptor* descriptor) { | |
119 // x2: feedback vector | |
120 // x3: call feedback slot | |
121 static Register registers[] = { x2, x3 }; | |
122 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
123 descriptor->register_params_ = registers; | |
124 descriptor->deoptimization_handler_ = NULL; | |
125 } | |
126 | |
127 | |
128 void KeyedLoadFastElementStub::InitializeInterfaceDescriptor( | |
129 Isolate* isolate, | |
130 CodeStubInterfaceDescriptor* descriptor) { | |
131 // x1: receiver | |
132 // x0: key | |
133 static Register registers[] = { x1, x0 }; | |
134 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
135 descriptor->register_params_ = registers; | |
136 descriptor->deoptimization_handler_ = | |
137 FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); | |
138 } | |
139 | |
140 | |
141 void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor( | |
142 Isolate* isolate, | |
143 CodeStubInterfaceDescriptor* descriptor) { | |
144 // x1: receiver | |
145 // x0: key | |
146 static Register registers[] = { x1, x0 }; | |
147 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
148 descriptor->register_params_ = registers; | |
149 descriptor->deoptimization_handler_ = | |
150 FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); | |
151 } | |
152 | |
153 | |
154 void RegExpConstructResultStub::InitializeInterfaceDescriptor( | |
155 Isolate* isolate, | |
156 CodeStubInterfaceDescriptor* descriptor) { | |
157 // x2: length | |
158 // x1: index (of last match) | |
159 // x0: string | |
160 static Register registers[] = { x2, x1, x0 }; | |
161 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
162 descriptor->register_params_ = registers; | |
163 descriptor->deoptimization_handler_ = | |
164 Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry; | |
165 } | |
166 | |
167 | |
168 void LoadFieldStub::InitializeInterfaceDescriptor( | |
169 Isolate* isolate, | |
170 CodeStubInterfaceDescriptor* descriptor) { | |
171 // x0: receiver | |
172 static Register registers[] = { x0 }; | |
173 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
174 descriptor->register_params_ = registers; | |
175 descriptor->deoptimization_handler_ = NULL; | |
176 } | |
177 | |
178 | |
179 void KeyedLoadFieldStub::InitializeInterfaceDescriptor( | |
180 Isolate* isolate, | |
181 CodeStubInterfaceDescriptor* descriptor) { | |
182 // x1: receiver | |
183 static Register registers[] = { x1 }; | |
184 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
185 descriptor->register_params_ = registers; | |
186 descriptor->deoptimization_handler_ = NULL; | |
187 } | |
188 | |
189 | |
190 void StringLengthStub::InitializeInterfaceDescriptor( | |
191 Isolate* isolate, | |
192 CodeStubInterfaceDescriptor* descriptor) { | |
193 static Register registers[] = { x0, x2 }; | |
194 descriptor->register_param_count_ = 2; | |
195 descriptor->register_params_ = registers; | |
196 descriptor->deoptimization_handler_ = NULL; | |
197 } | |
198 | |
199 | |
200 void KeyedStringLengthStub::InitializeInterfaceDescriptor( | |
201 Isolate* isolate, | |
202 CodeStubInterfaceDescriptor* descriptor) { | |
203 static Register registers[] = { x1, x0 }; | |
204 descriptor->register_param_count_ = 2; | |
205 descriptor->register_params_ = registers; | |
206 descriptor->deoptimization_handler_ = NULL; | |
207 } | |
208 | |
209 | |
210 void KeyedStoreFastElementStub::InitializeInterfaceDescriptor( | |
211 Isolate* isolate, | |
212 CodeStubInterfaceDescriptor* descriptor) { | |
213 // x2: receiver | |
214 // x1: key | |
215 // x0: value | |
216 static Register registers[] = { x2, x1, x0 }; | |
217 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
218 descriptor->register_params_ = registers; | |
219 descriptor->deoptimization_handler_ = | |
220 FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure); | |
221 } | |
222 | |
223 | |
224 void TransitionElementsKindStub::InitializeInterfaceDescriptor( | |
225 Isolate* isolate, | |
226 CodeStubInterfaceDescriptor* descriptor) { | |
227 // x0: value (js_array) | |
228 // x1: to_map | |
229 static Register registers[] = { x0, x1 }; | |
230 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
231 descriptor->register_params_ = registers; | |
232 Address entry = | |
233 Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry; | |
234 descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry); | |
235 } | |
236 | |
237 | |
238 void CompareNilICStub::InitializeInterfaceDescriptor( | |
239 Isolate* isolate, | |
240 CodeStubInterfaceDescriptor* descriptor) { | |
241 // x0: value to compare | |
242 static Register registers[] = { x0 }; | |
243 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
244 descriptor->register_params_ = registers; | |
245 descriptor->deoptimization_handler_ = | |
246 FUNCTION_ADDR(CompareNilIC_Miss); | |
247 descriptor->SetMissHandler( | |
248 ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate)); | |
249 } | |
250 | |
251 | |
252 static void InitializeArrayConstructorDescriptor( | |
253 Isolate* isolate, | |
254 CodeStubInterfaceDescriptor* descriptor, | |
255 int constant_stack_parameter_count) { | |
256 // x1: function | |
257 // x2: allocation site with elements kind | |
258 // x0: number of arguments to the constructor function | |
259 static Register registers_variable_args[] = { x1, x2, x0 }; | |
260 static Register registers_no_args[] = { x1, x2 }; | |
261 | |
262 if (constant_stack_parameter_count == 0) { | |
263 descriptor->register_param_count_ = | |
264 sizeof(registers_no_args) / sizeof(registers_no_args[0]); | |
265 descriptor->register_params_ = registers_no_args; | |
266 } else { | |
267 // stack param count needs (constructor pointer, and single argument) | |
268 descriptor->handler_arguments_mode_ = PASS_ARGUMENTS; | |
269 descriptor->stack_parameter_count_ = x0; | |
270 descriptor->register_param_count_ = | |
271 sizeof(registers_variable_args) / sizeof(registers_variable_args[0]); | |
272 descriptor->register_params_ = registers_variable_args; | |
273 } | |
274 | |
275 descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; | |
276 descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; | |
277 descriptor->deoptimization_handler_ = | |
278 Runtime::FunctionForId(Runtime::kArrayConstructor)->entry; | |
279 } | |
280 | |
281 | |
282 void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( | |
283 Isolate* isolate, | |
284 CodeStubInterfaceDescriptor* descriptor) { | |
285 InitializeArrayConstructorDescriptor(isolate, descriptor, 0); | |
286 } | |
287 | |
288 | |
289 void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( | |
290 Isolate* isolate, | |
291 CodeStubInterfaceDescriptor* descriptor) { | |
292 InitializeArrayConstructorDescriptor(isolate, descriptor, 1); | |
293 } | |
294 | |
295 | |
296 void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( | |
297 Isolate* isolate, | |
298 CodeStubInterfaceDescriptor* descriptor) { | |
299 InitializeArrayConstructorDescriptor(isolate, descriptor, -1); | |
300 } | |
301 | |
302 | |
303 static void InitializeInternalArrayConstructorDescriptor( | |
304 Isolate* isolate, | |
305 CodeStubInterfaceDescriptor* descriptor, | |
306 int constant_stack_parameter_count) { | |
307 // x1: constructor function | |
308 // x0: number of arguments to the constructor function | |
309 static Register registers_variable_args[] = { x1, x0 }; | |
310 static Register registers_no_args[] = { x1 }; | |
311 | |
312 if (constant_stack_parameter_count == 0) { | |
313 descriptor->register_param_count_ = | |
314 sizeof(registers_no_args) / sizeof(registers_no_args[0]); | |
315 descriptor->register_params_ = registers_no_args; | |
316 } else { | |
317 // stack param count needs (constructor pointer, and single argument) | |
318 descriptor->handler_arguments_mode_ = PASS_ARGUMENTS; | |
319 descriptor->stack_parameter_count_ = x0; | |
320 descriptor->register_param_count_ = | |
321 sizeof(registers_variable_args) / sizeof(registers_variable_args[0]); | |
322 descriptor->register_params_ = registers_variable_args; | |
323 } | |
324 | |
325 descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; | |
326 descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; | |
327 descriptor->deoptimization_handler_ = | |
328 Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry; | |
329 } | |
330 | |
331 | |
332 void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( | |
333 Isolate* isolate, | |
334 CodeStubInterfaceDescriptor* descriptor) { | |
335 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0); | |
336 } | |
337 | |
338 | |
339 void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( | |
340 Isolate* isolate, | |
341 CodeStubInterfaceDescriptor* descriptor) { | |
342 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1); | |
343 } | |
344 | |
345 | |
346 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( | |
347 Isolate* isolate, | |
348 CodeStubInterfaceDescriptor* descriptor) { | |
349 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1); | |
350 } | |
351 | |
352 | |
353 void ToBooleanStub::InitializeInterfaceDescriptor( | |
354 Isolate* isolate, | |
355 CodeStubInterfaceDescriptor* descriptor) { | |
356 // x0: value | |
357 static Register registers[] = { x0 }; | |
358 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
359 descriptor->register_params_ = registers; | |
360 descriptor->deoptimization_handler_ = FUNCTION_ADDR(ToBooleanIC_Miss); | |
361 descriptor->SetMissHandler( | |
362 ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate)); | |
363 } | |
364 | |
365 | |
366 void StoreGlobalStub::InitializeInterfaceDescriptor( | |
367 Isolate* isolate, | |
368 CodeStubInterfaceDescriptor* descriptor) { | |
369 // x1: receiver | |
370 // x2: key (unused) | |
371 // x0: value | |
372 static Register registers[] = { x1, x2, x0 }; | |
373 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
374 descriptor->register_params_ = registers; | |
375 descriptor->deoptimization_handler_ = | |
376 FUNCTION_ADDR(StoreIC_MissFromStubFailure); | |
377 } | |
378 | |
379 | |
380 void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor( | |
381 Isolate* isolate, | |
382 CodeStubInterfaceDescriptor* descriptor) { | |
383 // x0: value | |
384 // x3: target map | |
385 // x1: key | |
386 // x2: receiver | |
387 static Register registers[] = { x0, x3, x1, x2 }; | |
388 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
389 descriptor->register_params_ = registers; | |
390 descriptor->deoptimization_handler_ = | |
391 FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss); | |
392 } | |
393 | |
394 | |
395 void BinaryOpICStub::InitializeInterfaceDescriptor( | |
396 Isolate* isolate, | |
397 CodeStubInterfaceDescriptor* descriptor) { | |
398 // x1: left operand | |
399 // x0: right operand | |
400 static Register registers[] = { x1, x0 }; | |
401 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
402 descriptor->register_params_ = registers; | |
403 descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss); | |
404 descriptor->SetMissHandler( | |
405 ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate)); | |
406 } | |
407 | |
408 | |
409 void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor( | |
410 Isolate* isolate, | |
411 CodeStubInterfaceDescriptor* descriptor) { | |
412 // x2: allocation site | |
413 // x1: left operand | |
414 // x0: right operand | |
415 static Register registers[] = { x2, x1, x0 }; | |
416 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
417 descriptor->register_params_ = registers; | |
418 descriptor->deoptimization_handler_ = | |
419 FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite); | |
420 } | |
421 | |
422 | |
423 void StringAddStub::InitializeInterfaceDescriptor( | |
424 Isolate* isolate, | |
425 CodeStubInterfaceDescriptor* descriptor) { | |
426 // x1: left operand | |
427 // x0: right operand | |
428 static Register registers[] = { x1, x0 }; | |
429 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
430 descriptor->register_params_ = registers; | |
431 descriptor->deoptimization_handler_ = | |
432 Runtime::FunctionForId(Runtime::kStringAdd)->entry; | |
433 } | |
434 | |
435 | |
436 void CallDescriptors::InitializeForIsolate(Isolate* isolate) { | |
437 static PlatformCallInterfaceDescriptor default_descriptor = | |
438 PlatformCallInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS); | |
439 | |
440 static PlatformCallInterfaceDescriptor noInlineDescriptor = | |
441 PlatformCallInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS); | |
442 | |
443 { | |
444 CallInterfaceDescriptor* descriptor = | |
445 isolate->call_descriptor(Isolate::ArgumentAdaptorCall); | |
446 static Register registers[] = { x1, // JSFunction | |
447 cp, // context | |
448 x0, // actual number of arguments | |
449 x2, // expected number of arguments | |
450 }; | |
451 static Representation representations[] = { | |
452 Representation::Tagged(), // JSFunction | |
453 Representation::Tagged(), // context | |
454 Representation::Integer32(), // actual number of arguments | |
455 Representation::Integer32(), // expected number of arguments | |
456 }; | |
457 descriptor->register_param_count_ = 4; | |
458 descriptor->register_params_ = registers; | |
459 descriptor->param_representations_ = representations; | |
460 descriptor->platform_specific_descriptor_ = &default_descriptor; | |
461 } | |
462 { | |
463 CallInterfaceDescriptor* descriptor = | |
464 isolate->call_descriptor(Isolate::KeyedCall); | |
465 static Register registers[] = { cp, // context | |
466 x2, // key | |
467 }; | |
468 static Representation representations[] = { | |
469 Representation::Tagged(), // context | |
470 Representation::Tagged(), // key | |
471 }; | |
472 descriptor->register_param_count_ = 2; | |
473 descriptor->register_params_ = registers; | |
474 descriptor->param_representations_ = representations; | |
475 descriptor->platform_specific_descriptor_ = &noInlineDescriptor; | |
476 } | |
477 { | |
478 CallInterfaceDescriptor* descriptor = | |
479 isolate->call_descriptor(Isolate::NamedCall); | |
480 static Register registers[] = { cp, // context | |
481 x2, // name | |
482 }; | |
483 static Representation representations[] = { | |
484 Representation::Tagged(), // context | |
485 Representation::Tagged(), // name | |
486 }; | |
487 descriptor->register_param_count_ = 2; | |
488 descriptor->register_params_ = registers; | |
489 descriptor->param_representations_ = representations; | |
490 descriptor->platform_specific_descriptor_ = &noInlineDescriptor; | |
491 } | |
492 { | |
493 CallInterfaceDescriptor* descriptor = | |
494 isolate->call_descriptor(Isolate::CallHandler); | |
495 static Register registers[] = { cp, // context | |
496 x0, // receiver | |
497 }; | |
498 static Representation representations[] = { | |
499 Representation::Tagged(), // context | |
500 Representation::Tagged(), // receiver | |
501 }; | |
502 descriptor->register_param_count_ = 2; | |
503 descriptor->register_params_ = registers; | |
504 descriptor->param_representations_ = representations; | |
505 descriptor->platform_specific_descriptor_ = &default_descriptor; | |
506 } | |
507 { | |
508 CallInterfaceDescriptor* descriptor = | |
509 isolate->call_descriptor(Isolate::ApiFunctionCall); | |
510 static Register registers[] = { x0, // callee | |
511 x4, // call_data | |
512 x2, // holder | |
513 x1, // api_function_address | |
514 cp, // context | |
515 }; | |
516 static Representation representations[] = { | |
517 Representation::Tagged(), // callee | |
518 Representation::Tagged(), // call_data | |
519 Representation::Tagged(), // holder | |
520 Representation::External(), // api_function_address | |
521 Representation::Tagged(), // context | |
522 }; | |
523 descriptor->register_param_count_ = 5; | |
524 descriptor->register_params_ = registers; | |
525 descriptor->param_representations_ = representations; | |
526 descriptor->platform_specific_descriptor_ = &default_descriptor; | |
527 } | |
528 } | |
529 | |
530 | |
531 #define __ ACCESS_MASM(masm) | |
532 | |
533 | |
534 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { | |
535 // Update the static counter each time a new code stub is generated. | |
536 Isolate* isolate = masm->isolate(); | |
537 isolate->counters()->code_stubs()->Increment(); | |
538 | |
539 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate); | |
540 int param_count = descriptor->register_param_count_; | |
541 { | |
542 // Call the runtime system in a fresh internal frame. | |
543 FrameScope scope(masm, StackFrame::INTERNAL); | |
544 ASSERT((descriptor->register_param_count_ == 0) || | |
545 x0.Is(descriptor->register_params_[param_count - 1])); | |
546 | |
547 // Push arguments | |
548 MacroAssembler::PushPopQueue queue(masm); | |
549 for (int i = 0; i < param_count; ++i) { | |
550 queue.Queue(descriptor->register_params_[i]); | |
551 } | |
552 queue.PushQueued(); | |
553 | |
554 ExternalReference miss = descriptor->miss_handler(); | |
555 __ CallExternalReference(miss, descriptor->register_param_count_); | |
556 } | |
557 | |
558 __ Ret(); | |
559 } | |
560 | |
561 | |
562 void DoubleToIStub::Generate(MacroAssembler* masm) { | |
563 Label done; | |
564 Register input = source(); | |
565 Register result = destination(); | |
566 ASSERT(is_truncating()); | |
567 | |
568 ASSERT(result.Is64Bits()); | |
569 ASSERT(jssp.Is(masm->StackPointer())); | |
570 | |
571 int double_offset = offset(); | |
572 | |
573 DoubleRegister double_scratch = d0; // only used if !skip_fastpath() | |
574 Register scratch1 = GetAllocatableRegisterThatIsNotOneOf(input, result); | |
575 Register scratch2 = | |
576 GetAllocatableRegisterThatIsNotOneOf(input, result, scratch1); | |
577 | |
578 __ Push(scratch1, scratch2); | |
579 // Account for saved regs if input is jssp. | |
580 if (input.is(jssp)) double_offset += 2 * kPointerSize; | |
581 | |
582 if (!skip_fastpath()) { | |
583 __ Push(double_scratch); | |
584 if (input.is(jssp)) double_offset += 1 * kDoubleSize; | |
585 __ Ldr(double_scratch, MemOperand(input, double_offset)); | |
586 // Try to convert with a FPU convert instruction. This handles all | |
587 // non-saturating cases. | |
588 __ TryConvertDoubleToInt64(result, double_scratch, &done); | |
589 __ Fmov(result, double_scratch); | |
590 } else { | |
591 __ Ldr(result, MemOperand(input, double_offset)); | |
592 } | |
593 | |
594 // If we reach here we need to manually convert the input to an int32. | |
595 | |
596 // Extract the exponent. | |
597 Register exponent = scratch1; | |
598 __ Ubfx(exponent, result, HeapNumber::kMantissaBits, | |
599 HeapNumber::kExponentBits); | |
600 | |
601 // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since | |
602 // the mantissa gets shifted completely out of the int32_t result. | |
603 __ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32); | |
604 __ CzeroX(result, ge); | |
605 __ B(ge, &done); | |
606 | |
607 // The Fcvtzs sequence handles all cases except where the conversion causes | |
608 // signed overflow in the int64_t target. Since we've already handled | |
609 // exponents >= 84, we can guarantee that 63 <= exponent < 84. | |
610 | |
611 if (masm->emit_debug_code()) { | |
612 __ Cmp(exponent, HeapNumber::kExponentBias + 63); | |
613 // Exponents less than this should have been handled by the Fcvt case. | |
614 __ Check(ge, kUnexpectedValue); | |
615 } | |
616 | |
617 // Isolate the mantissa bits, and set the implicit '1'. | |
618 Register mantissa = scratch2; | |
619 __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits); | |
620 __ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits); | |
621 | |
622 // Negate the mantissa if necessary. | |
623 __ Tst(result, kXSignMask); | |
624 __ Cneg(mantissa, mantissa, ne); | |
625 | |
626 // Shift the mantissa bits in the correct place. We know that we have to shift | |
627 // it left here, because exponent >= 63 >= kMantissaBits. | |
628 __ Sub(exponent, exponent, | |
629 HeapNumber::kExponentBias + HeapNumber::kMantissaBits); | |
630 __ Lsl(result, mantissa, exponent); | |
631 | |
632 __ Bind(&done); | |
633 if (!skip_fastpath()) { | |
634 __ Pop(double_scratch); | |
635 } | |
636 __ Pop(scratch2, scratch1); | |
637 __ Ret(); | |
638 } | |
639 | |
640 | |
641 // See call site for description. | |
642 static void EmitIdenticalObjectComparison(MacroAssembler* masm, | |
643 Register left, | |
644 Register right, | |
645 Register scratch, | |
646 FPRegister double_scratch, | |
647 Label* slow, | |
648 Condition cond) { | |
649 ASSERT(!AreAliased(left, right, scratch)); | |
650 Label not_identical, return_equal, heap_number; | |
651 Register result = x0; | |
652 | |
653 __ Cmp(right, left); | |
654 __ B(ne, ¬_identical); | |
655 | |
656 // Test for NaN. Sadly, we can't just compare to factory::nan_value(), | |
657 // so we do the second best thing - test it ourselves. | |
658 // They are both equal and they are not both Smis so both of them are not | |
659 // Smis. If it's not a heap number, then return equal. | |
660 if ((cond == lt) || (cond == gt)) { | |
661 __ JumpIfObjectType(right, scratch, scratch, FIRST_SPEC_OBJECT_TYPE, slow, | |
662 ge); | |
663 } else { | |
664 Register right_type = scratch; | |
665 __ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE, | |
666 &heap_number); | |
667 // Comparing JS objects with <=, >= is complicated. | |
668 if (cond != eq) { | |
669 __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE); | |
670 __ B(ge, slow); | |
671 // Normally here we fall through to return_equal, but undefined is | |
672 // special: (undefined == undefined) == true, but | |
673 // (undefined <= undefined) == false! See ECMAScript 11.8.5. | |
674 if ((cond == le) || (cond == ge)) { | |
675 __ Cmp(right_type, ODDBALL_TYPE); | |
676 __ B(ne, &return_equal); | |
677 __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal); | |
678 if (cond == le) { | |
679 // undefined <= undefined should fail. | |
680 __ Mov(result, GREATER); | |
681 } else { | |
682 // undefined >= undefined should fail. | |
683 __ Mov(result, LESS); | |
684 } | |
685 __ Ret(); | |
686 } | |
687 } | |
688 } | |
689 | |
690 __ Bind(&return_equal); | |
691 if (cond == lt) { | |
692 __ Mov(result, GREATER); // Things aren't less than themselves. | |
693 } else if (cond == gt) { | |
694 __ Mov(result, LESS); // Things aren't greater than themselves. | |
695 } else { | |
696 __ Mov(result, EQUAL); // Things are <=, >=, ==, === themselves. | |
697 } | |
698 __ Ret(); | |
699 | |
700 // Cases lt and gt have been handled earlier, and case ne is never seen, as | |
701 // it is handled in the parser (see Parser::ParseBinaryExpression). We are | |
702 // only concerned with cases ge, le and eq here. | |
703 if ((cond != lt) && (cond != gt)) { | |
704 ASSERT((cond == ge) || (cond == le) || (cond == eq)); | |
705 __ Bind(&heap_number); | |
706 // Left and right are identical pointers to a heap number object. Return | |
707 // non-equal if the heap number is a NaN, and equal otherwise. Comparing | |
708 // the number to itself will set the overflow flag iff the number is NaN. | |
709 __ Ldr(double_scratch, FieldMemOperand(right, HeapNumber::kValueOffset)); | |
710 __ Fcmp(double_scratch, double_scratch); | |
711 __ B(vc, &return_equal); // Not NaN, so treat as normal heap number. | |
712 | |
713 if (cond == le) { | |
714 __ Mov(result, GREATER); | |
715 } else { | |
716 __ Mov(result, LESS); | |
717 } | |
718 __ Ret(); | |
719 } | |
720 | |
721 // No fall through here. | |
722 if (FLAG_debug_code) { | |
723 __ Unreachable(); | |
724 } | |
725 | |
726 __ Bind(¬_identical); | |
727 } | |
728 | |
729 | |
730 // See call site for description. | |
731 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | |
732 Register left, | |
733 Register right, | |
734 Register left_type, | |
735 Register right_type, | |
736 Register scratch) { | |
737 ASSERT(!AreAliased(left, right, left_type, right_type, scratch)); | |
738 | |
739 if (masm->emit_debug_code()) { | |
740 // We assume that the arguments are not identical. | |
741 __ Cmp(left, right); | |
742 __ Assert(ne, kExpectedNonIdenticalObjects); | |
743 } | |
744 | |
745 // If either operand is a JS object or an oddball value, then they are not | |
746 // equal since their pointers are different. | |
747 // There is no test for undetectability in strict equality. | |
748 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); | |
749 Label right_non_object; | |
750 | |
751 __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE); | |
752 __ B(lt, &right_non_object); | |
753 | |
754 // Return non-zero - x0 already contains a non-zero pointer. | |
755 ASSERT(left.is(x0) || right.is(x0)); | |
756 Label return_not_equal; | |
757 __ Bind(&return_not_equal); | |
758 __ Ret(); | |
759 | |
760 __ Bind(&right_non_object); | |
761 | |
762 // Check for oddballs: true, false, null, undefined. | |
763 __ Cmp(right_type, ODDBALL_TYPE); | |
764 | |
765 // If right is not ODDBALL, test left. Otherwise, set eq condition. | |
766 __ Ccmp(left_type, ODDBALL_TYPE, ZFlag, ne); | |
767 | |
768 // If right or left is not ODDBALL, test left >= FIRST_SPEC_OBJECT_TYPE. | |
769 // Otherwise, right or left is ODDBALL, so set a ge condition. | |
770 __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NVFlag, ne); | |
771 | |
772 __ B(ge, &return_not_equal); | |
773 | |
774 // Internalized strings are unique, so they can only be equal if they are the | |
775 // same object. We have already tested that case, so if left and right are | |
776 // both internalized strings, they cannot be equal. | |
777 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0)); | |
778 __ Orr(scratch, left_type, right_type); | |
779 __ TestAndBranchIfAllClear( | |
780 scratch, kIsNotStringMask | kIsNotInternalizedMask, &return_not_equal); | |
781 } | |
782 | |
783 | |
784 // See call site for description. | |
785 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | |
786 Register left, | |
787 Register right, | |
788 FPRegister left_d, | |
789 FPRegister right_d, | |
790 Register scratch, | |
791 Label* slow, | |
792 bool strict) { | |
793 ASSERT(!AreAliased(left, right, scratch)); | |
794 ASSERT(!AreAliased(left_d, right_d)); | |
795 ASSERT((left.is(x0) && right.is(x1)) || | |
796 (right.is(x0) && left.is(x1))); | |
797 Register result = x0; | |
798 | |
799 Label right_is_smi, done; | |
800 __ JumpIfSmi(right, &right_is_smi); | |
801 | |
802 // Left is the smi. Check whether right is a heap number. | |
803 if (strict) { | |
804 // If right is not a number and left is a smi, then strict equality cannot | |
805 // succeed. Return non-equal. | |
806 Label is_heap_number; | |
807 __ JumpIfObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE, | |
808 &is_heap_number); | |
809 // Register right is a non-zero pointer, which is a valid NOT_EQUAL result. | |
810 if (!right.is(result)) { | |
811 __ Mov(result, NOT_EQUAL); | |
812 } | |
813 __ Ret(); | |
814 __ Bind(&is_heap_number); | |
815 } else { | |
816 // Smi compared non-strictly with a non-smi, non-heap-number. Call the | |
817 // runtime. | |
818 __ JumpIfNotObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE, slow); | |
819 } | |
820 | |
821 // Left is the smi. Right is a heap number. Load right value into right_d, and | |
822 // convert left smi into double in left_d. | |
823 __ Ldr(right_d, FieldMemOperand(right, HeapNumber::kValueOffset)); | |
824 __ SmiUntagToDouble(left_d, left); | |
825 __ B(&done); | |
826 | |
827 __ Bind(&right_is_smi); | |
828 // Right is a smi. Check whether the non-smi left is a heap number. | |
829 if (strict) { | |
830 // If left is not a number and right is a smi then strict equality cannot | |
831 // succeed. Return non-equal. | |
832 Label is_heap_number; | |
833 __ JumpIfObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE, | |
834 &is_heap_number); | |
835 // Register left is a non-zero pointer, which is a valid NOT_EQUAL result. | |
836 if (!left.is(result)) { | |
837 __ Mov(result, NOT_EQUAL); | |
838 } | |
839 __ Ret(); | |
840 __ Bind(&is_heap_number); | |
841 } else { | |
842 // Smi compared non-strictly with a non-smi, non-heap-number. Call the | |
843 // runtime. | |
844 __ JumpIfNotObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE, slow); | |
845 } | |
846 | |
847 // Right is the smi. Left is a heap number. Load left value into left_d, and | |
848 // convert right smi into double in right_d. | |
849 __ Ldr(left_d, FieldMemOperand(left, HeapNumber::kValueOffset)); | |
850 __ SmiUntagToDouble(right_d, right); | |
851 | |
852 // Fall through to both_loaded_as_doubles. | |
853 __ Bind(&done); | |
854 } | |
855 | |
856 | |
857 // Fast negative check for internalized-to-internalized equality. | |
858 // See call site for description. | |
859 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, | |
860 Register left, | |
861 Register right, | |
862 Register left_map, | |
863 Register right_map, | |
864 Register left_type, | |
865 Register right_type, | |
866 Label* possible_strings, | |
867 Label* not_both_strings) { | |
868 ASSERT(!AreAliased(left, right, left_map, right_map, left_type, right_type)); | |
869 Register result = x0; | |
870 | |
871 Label object_test; | |
872 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0)); | |
873 // TODO(all): reexamine this branch sequence for optimisation wrt branch | |
874 // prediction. | |
875 __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test); | |
876 __ Tbnz(right_type, MaskToBit(kIsNotInternalizedMask), possible_strings); | |
877 __ Tbnz(left_type, MaskToBit(kIsNotStringMask), not_both_strings); | |
878 __ Tbnz(left_type, MaskToBit(kIsNotInternalizedMask), possible_strings); | |
879 | |
880 // Both are internalized. We already checked that they weren't the same | |
881 // pointer, so they are not equal. | |
882 __ Mov(result, NOT_EQUAL); | |
883 __ Ret(); | |
884 | |
885 __ Bind(&object_test); | |
886 | |
887 __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE); | |
888 | |
889 // If right >= FIRST_SPEC_OBJECT_TYPE, test left. | |
890 // Otherwise, right < FIRST_SPEC_OBJECT_TYPE, so set lt condition. | |
891 __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NFlag, ge); | |
892 | |
893 __ B(lt, not_both_strings); | |
894 | |
895 // If both objects are undetectable, they are equal. Otherwise, they are not | |
896 // equal, since they are different objects and an object is not equal to | |
897 // undefined. | |
898 | |
899 // Returning here, so we can corrupt right_type and left_type. | |
900 Register right_bitfield = right_type; | |
901 Register left_bitfield = left_type; | |
902 __ Ldrb(right_bitfield, FieldMemOperand(right_map, Map::kBitFieldOffset)); | |
903 __ Ldrb(left_bitfield, FieldMemOperand(left_map, Map::kBitFieldOffset)); | |
904 __ And(result, right_bitfield, left_bitfield); | |
905 __ And(result, result, 1 << Map::kIsUndetectable); | |
906 __ Eor(result, result, 1 << Map::kIsUndetectable); | |
907 __ Ret(); | |
908 } | |
909 | |
910 | |
911 static void ICCompareStub_CheckInputType(MacroAssembler* masm, | |
912 Register input, | |
913 Register scratch, | |
914 CompareIC::State expected, | |
915 Label* fail) { | |
916 Label ok; | |
917 if (expected == CompareIC::SMI) { | |
918 __ JumpIfNotSmi(input, fail); | |
919 } else if (expected == CompareIC::NUMBER) { | |
920 __ JumpIfSmi(input, &ok); | |
921 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail, | |
922 DONT_DO_SMI_CHECK); | |
923 } | |
924 // We could be strict about internalized/non-internalized here, but as long as | |
925 // hydrogen doesn't care, the stub doesn't have to care either. | |
926 __ Bind(&ok); | |
927 } | |
928 | |
929 | |
930 void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { | |
931 Register lhs = x1; | |
932 Register rhs = x0; | |
933 Register result = x0; | |
934 Condition cond = GetCondition(); | |
935 | |
936 Label miss; | |
937 ICCompareStub_CheckInputType(masm, lhs, x2, left_, &miss); | |
938 ICCompareStub_CheckInputType(masm, rhs, x3, right_, &miss); | |
939 | |
940 Label slow; // Call builtin. | |
941 Label not_smis, both_loaded_as_doubles; | |
942 Label not_two_smis, smi_done; | |
943 __ JumpIfEitherNotSmi(lhs, rhs, ¬_two_smis); | |
944 __ SmiUntag(lhs); | |
945 __ Sub(result, lhs, Operand::UntagSmi(rhs)); | |
946 __ Ret(); | |
947 | |
948 __ Bind(¬_two_smis); | |
949 | |
950 // NOTICE! This code is only reached after a smi-fast-case check, so it is | |
951 // certain that at least one operand isn't a smi. | |
952 | |
953 // Handle the case where the objects are identical. Either returns the answer | |
954 // or goes to slow. Only falls through if the objects were not identical. | |
955 EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond); | |
956 | |
957 // If either is a smi (we know that at least one is not a smi), then they can | |
958 // only be strictly equal if the other is a HeapNumber. | |
959 __ JumpIfBothNotSmi(lhs, rhs, ¬_smis); | |
960 | |
961 // Exactly one operand is a smi. EmitSmiNonsmiComparison generates code that | |
962 // can: | |
963 // 1) Return the answer. | |
964 // 2) Branch to the slow case. | |
965 // 3) Fall through to both_loaded_as_doubles. | |
966 // In case 3, we have found out that we were dealing with a number-number | |
967 // comparison. The double values of the numbers have been loaded, right into | |
968 // rhs_d, left into lhs_d. | |
969 FPRegister rhs_d = d0; | |
970 FPRegister lhs_d = d1; | |
971 EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, x10, &slow, strict()); | |
972 | |
973 __ Bind(&both_loaded_as_doubles); | |
974 // The arguments have been converted to doubles and stored in rhs_d and | |
975 // lhs_d. | |
976 Label nan; | |
977 __ Fcmp(lhs_d, rhs_d); | |
978 __ B(vs, &nan); // Overflow flag set if either is NaN. | |
979 STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1)); | |
980 __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL). | |
981 __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0. | |
982 __ Ret(); | |
983 | |
984 __ Bind(&nan); | |
985 // Left and/or right is a NaN. Load the result register with whatever makes | |
986 // the comparison fail, since comparisons with NaN always fail (except ne, | |
987 // which is filtered out at a higher level.) | |
988 ASSERT(cond != ne); | |
989 if ((cond == lt) || (cond == le)) { | |
990 __ Mov(result, GREATER); | |
991 } else { | |
992 __ Mov(result, LESS); | |
993 } | |
994 __ Ret(); | |
995 | |
996 __ Bind(¬_smis); | |
997 // At this point we know we are dealing with two different objects, and | |
998 // neither of them is a smi. The objects are in rhs_ and lhs_. | |
999 | |
1000 // Load the maps and types of the objects. | |
1001 Register rhs_map = x10; | |
1002 Register rhs_type = x11; | |
1003 Register lhs_map = x12; | |
1004 Register lhs_type = x13; | |
1005 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset)); | |
1006 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset)); | |
1007 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset)); | |
1008 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset)); | |
1009 | |
1010 if (strict()) { | |
1011 // This emits a non-equal return sequence for some object types, or falls | |
1012 // through if it was not lucky. | |
1013 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs, lhs_type, rhs_type, x14); | |
1014 } | |
1015 | |
1016 Label check_for_internalized_strings; | |
1017 Label flat_string_check; | |
1018 // Check for heap number comparison. Branch to earlier double comparison code | |
1019 // if they are heap numbers, otherwise, branch to internalized string check. | |
1020 __ Cmp(rhs_type, HEAP_NUMBER_TYPE); | |
1021 __ B(ne, &check_for_internalized_strings); | |
1022 __ Cmp(lhs_map, rhs_map); | |
1023 | |
1024 // If maps aren't equal, lhs_ and rhs_ are not heap numbers. Branch to flat | |
1025 // string check. | |
1026 __ B(ne, &flat_string_check); | |
1027 | |
1028 // Both lhs_ and rhs_ are heap numbers. Load them and branch to the double | |
1029 // comparison code. | |
1030 __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | |
1031 __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | |
1032 __ B(&both_loaded_as_doubles); | |
1033 | |
1034 __ Bind(&check_for_internalized_strings); | |
1035 // In the strict case, the EmitStrictTwoHeapObjectCompare already took care | |
1036 // of internalized strings. | |
1037 if ((cond == eq) && !strict()) { | |
1038 // Returns an answer for two internalized strings or two detectable objects. | |
1039 // Otherwise branches to the string case or not both strings case. | |
1040 EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, lhs_map, rhs_map, | |
1041 lhs_type, rhs_type, | |
1042 &flat_string_check, &slow); | |
1043 } | |
1044 | |
1045 // Check for both being sequential ASCII strings, and inline if that is the | |
1046 // case. | |
1047 __ Bind(&flat_string_check); | |
1048 __ JumpIfBothInstanceTypesAreNotSequentialAscii(lhs_type, rhs_type, x14, | |
1049 x15, &slow); | |
1050 | |
1051 Isolate* isolate = masm->isolate(); | |
1052 __ IncrementCounter(isolate->counters()->string_compare_native(), 1, x10, | |
1053 x11); | |
1054 if (cond == eq) { | |
1055 StringCompareStub::GenerateFlatAsciiStringEquals(masm, lhs, rhs, | |
1056 x10, x11, x12); | |
1057 } else { | |
1058 StringCompareStub::GenerateCompareFlatAsciiStrings(masm, lhs, rhs, | |
1059 x10, x11, x12, x13); | |
1060 } | |
1061 | |
1062 // Never fall through to here. | |
1063 if (FLAG_debug_code) { | |
1064 __ Unreachable(); | |
1065 } | |
1066 | |
1067 __ Bind(&slow); | |
1068 | |
1069 __ Push(lhs, rhs); | |
1070 // Figure out which native to call and setup the arguments. | |
1071 Builtins::JavaScript native; | |
1072 if (cond == eq) { | |
1073 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; | |
1074 } else { | |
1075 native = Builtins::COMPARE; | |
1076 int ncr; // NaN compare result | |
1077 if ((cond == lt) || (cond == le)) { | |
1078 ncr = GREATER; | |
1079 } else { | |
1080 ASSERT((cond == gt) || (cond == ge)); // remaining cases | |
1081 ncr = LESS; | |
1082 } | |
1083 __ Mov(x10, Smi::FromInt(ncr)); | |
1084 __ Push(x10); | |
1085 } | |
1086 | |
1087 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) | |
1088 // tagged as a small integer. | |
1089 __ InvokeBuiltin(native, JUMP_FUNCTION); | |
1090 | |
1091 __ Bind(&miss); | |
1092 GenerateMiss(masm); | |
1093 } | |
1094 | |
1095 | |
1096 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { | |
1097 // Preserve caller-saved registers x0-x7 and x10-x15. We don't care if x8, x9, | |
1098 // ip0 and ip1 are corrupted by the call into C. | |
1099 CPURegList saved_regs = kCallerSaved; | |
1100 saved_regs.Remove(ip0); | |
1101 saved_regs.Remove(ip1); | |
1102 saved_regs.Remove(x8); | |
1103 saved_regs.Remove(x9); | |
1104 | |
1105 // We don't allow a GC during a store buffer overflow so there is no need to | |
1106 // store the registers in any particular way, but we do have to store and | |
1107 // restore them. | |
1108 __ PushCPURegList(saved_regs); | |
1109 if (save_doubles_ == kSaveFPRegs) { | |
1110 __ PushCPURegList(kCallerSavedFP); | |
1111 } | |
1112 | |
1113 AllowExternalCallThatCantCauseGC scope(masm); | |
1114 __ Mov(x0, ExternalReference::isolate_address(masm->isolate())); | |
1115 __ CallCFunction( | |
1116 ExternalReference::store_buffer_overflow_function(masm->isolate()), | |
1117 1, 0); | |
1118 | |
1119 if (save_doubles_ == kSaveFPRegs) { | |
1120 __ PopCPURegList(kCallerSavedFP); | |
1121 } | |
1122 __ PopCPURegList(saved_regs); | |
1123 __ Ret(); | |
1124 } | |
1125 | |
1126 | |
1127 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( | |
1128 Isolate* isolate) { | |
1129 StoreBufferOverflowStub stub1(kDontSaveFPRegs); | |
1130 stub1.GetCode(isolate); | |
1131 StoreBufferOverflowStub stub2(kSaveFPRegs); | |
1132 stub2.GetCode(isolate); | |
1133 } | |
1134 | |
1135 | |
1136 void MathPowStub::Generate(MacroAssembler* masm) { | |
1137 // Stack on entry: | |
1138 // jssp[0]: Exponent (as a tagged value). | |
1139 // jssp[1]: Base (as a tagged value). | |
1140 // | |
1141 // The (tagged) result will be returned in x0, as a heap number. | |
1142 | |
1143 Register result_tagged = x0; | |
1144 Register base_tagged = x10; | |
1145 Register exponent_tagged = x11; | |
1146 Register exponent_integer = x12; | |
1147 Register scratch1 = x14; | |
1148 Register scratch0 = x15; | |
1149 Register saved_lr = x19; | |
1150 FPRegister result_double = d0; | |
1151 FPRegister base_double = d0; | |
1152 FPRegister exponent_double = d1; | |
1153 FPRegister base_double_copy = d2; | |
1154 FPRegister scratch1_double = d6; | |
1155 FPRegister scratch0_double = d7; | |
1156 | |
1157 // A fast-path for integer exponents. | |
1158 Label exponent_is_smi, exponent_is_integer; | |
1159 // Bail out to runtime. | |
1160 Label call_runtime; | |
1161 // Allocate a heap number for the result, and return it. | |
1162 Label done; | |
1163 | |
1164 // Unpack the inputs. | |
1165 if (exponent_type_ == ON_STACK) { | |
1166 Label base_is_smi; | |
1167 Label unpack_exponent; | |
1168 | |
1169 __ Pop(exponent_tagged, base_tagged); | |
1170 | |
1171 __ JumpIfSmi(base_tagged, &base_is_smi); | |
1172 __ JumpIfNotHeapNumber(base_tagged, &call_runtime); | |
1173 // base_tagged is a heap number, so load its double value. | |
1174 __ Ldr(base_double, FieldMemOperand(base_tagged, HeapNumber::kValueOffset)); | |
1175 __ B(&unpack_exponent); | |
1176 __ Bind(&base_is_smi); | |
1177 // base_tagged is a SMI, so untag it and convert it to a double. | |
1178 __ SmiUntagToDouble(base_double, base_tagged); | |
1179 | |
1180 __ Bind(&unpack_exponent); | |
1181 // x10 base_tagged The tagged base (input). | |
1182 // x11 exponent_tagged The tagged exponent (input). | |
1183 // d1 base_double The base as a double. | |
1184 __ JumpIfSmi(exponent_tagged, &exponent_is_smi); | |
1185 __ JumpIfNotHeapNumber(exponent_tagged, &call_runtime); | |
1186 // exponent_tagged is a heap number, so load its double value. | |
1187 __ Ldr(exponent_double, | |
1188 FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset)); | |
1189 } else if (exponent_type_ == TAGGED) { | |
1190 __ JumpIfSmi(exponent_tagged, &exponent_is_smi); | |
1191 __ Ldr(exponent_double, | |
1192 FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset)); | |
1193 } | |
1194 | |
1195 // Handle double (heap number) exponents. | |
1196 if (exponent_type_ != INTEGER) { | |
1197 // Detect integer exponents stored as doubles and handle those in the | |
1198 // integer fast-path. | |
1199 __ TryConvertDoubleToInt64(exponent_integer, exponent_double, | |
1200 scratch0_double, &exponent_is_integer); | |
1201 | |
1202 if (exponent_type_ == ON_STACK) { | |
1203 FPRegister half_double = d3; | |
1204 FPRegister minus_half_double = d4; | |
1205 // Detect square root case. Crankshaft detects constant +/-0.5 at compile | |
1206 // time and uses DoMathPowHalf instead. We then skip this check for | |
1207 // non-constant cases of +/-0.5 as these hardly occur. | |
1208 | |
1209 __ Fmov(minus_half_double, -0.5); | |
1210 __ Fmov(half_double, 0.5); | |
1211 __ Fcmp(minus_half_double, exponent_double); | |
1212 __ Fccmp(half_double, exponent_double, NZFlag, ne); | |
1213 // Condition flags at this point: | |
1214 // 0.5; nZCv // Identified by eq && pl | |
1215 // -0.5: NZcv // Identified by eq && mi | |
1216 // other: ?z?? // Identified by ne | |
1217 __ B(ne, &call_runtime); | |
1218 | |
1219 // The exponent is 0.5 or -0.5. | |
1220 | |
1221 // Given that exponent is known to be either 0.5 or -0.5, the following | |
1222 // special cases could apply (according to ECMA-262 15.8.2.13): | |
1223 // | |
1224 // base.isNaN(): The result is NaN. | |
1225 // (base == +INFINITY) || (base == -INFINITY) | |
1226 // exponent == 0.5: The result is +INFINITY. | |
1227 // exponent == -0.5: The result is +0. | |
1228 // (base == +0) || (base == -0) | |
1229 // exponent == 0.5: The result is +0. | |
1230 // exponent == -0.5: The result is +INFINITY. | |
1231 // (base < 0) && base.isFinite(): The result is NaN. | |
1232 // | |
1233 // Fsqrt (and Fdiv for the -0.5 case) can handle all of those except | |
1234 // where base is -INFINITY or -0. | |
1235 | |
1236 // Add +0 to base. This has no effect other than turning -0 into +0. | |
1237 __ Fadd(base_double, base_double, fp_zero); | |
1238 // The operation -0+0 results in +0 in all cases except where the | |
1239 // FPCR rounding mode is 'round towards minus infinity' (RM). The | |
1240 // A64 simulator does not currently simulate FPCR (where the rounding | |
1241 // mode is set), so test the operation with some debug code. | |
1242 if (masm->emit_debug_code()) { | |
1243 UseScratchRegisterScope temps(masm); | |
1244 Register temp = temps.AcquireX(); | |
1245 __ Fneg(scratch0_double, fp_zero); | |
1246 // Verify that we correctly generated +0.0 and -0.0. | |
1247 // bits(+0.0) = 0x0000000000000000 | |
1248 // bits(-0.0) = 0x8000000000000000 | |
1249 __ Fmov(temp, fp_zero); | |
1250 __ CheckRegisterIsClear(temp, kCouldNotGenerateZero); | |
1251 __ Fmov(temp, scratch0_double); | |
1252 __ Eor(temp, temp, kDSignMask); | |
1253 __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero); | |
1254 // Check that -0.0 + 0.0 == +0.0. | |
1255 __ Fadd(scratch0_double, scratch0_double, fp_zero); | |
1256 __ Fmov(temp, scratch0_double); | |
1257 __ CheckRegisterIsClear(temp, kExpectedPositiveZero); | |
1258 } | |
1259 | |
1260 // If base is -INFINITY, make it +INFINITY. | |
1261 // * Calculate base - base: All infinities will become NaNs since both | |
1262 // -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in A64. | |
1263 // * If the result is NaN, calculate abs(base). | |
1264 __ Fsub(scratch0_double, base_double, base_double); | |
1265 __ Fcmp(scratch0_double, 0.0); | |
1266 __ Fabs(scratch1_double, base_double); | |
1267 __ Fcsel(base_double, scratch1_double, base_double, vs); | |
1268 | |
1269 // Calculate the square root of base. | |
1270 __ Fsqrt(result_double, base_double); | |
1271 __ Fcmp(exponent_double, 0.0); | |
1272 __ B(ge, &done); // Finish now for exponents of 0.5. | |
1273 // Find the inverse for exponents of -0.5. | |
1274 __ Fmov(scratch0_double, 1.0); | |
1275 __ Fdiv(result_double, scratch0_double, result_double); | |
1276 __ B(&done); | |
1277 } | |
1278 | |
1279 { | |
1280 AllowExternalCallThatCantCauseGC scope(masm); | |
1281 __ Mov(saved_lr, lr); | |
1282 __ CallCFunction( | |
1283 ExternalReference::power_double_double_function(masm->isolate()), | |
1284 0, 2); | |
1285 __ Mov(lr, saved_lr); | |
1286 __ B(&done); | |
1287 } | |
1288 | |
1289 // Handle SMI exponents. | |
1290 __ Bind(&exponent_is_smi); | |
1291 // x10 base_tagged The tagged base (input). | |
1292 // x11 exponent_tagged The tagged exponent (input). | |
1293 // d1 base_double The base as a double. | |
1294 __ SmiUntag(exponent_integer, exponent_tagged); | |
1295 } | |
1296 | |
1297 __ Bind(&exponent_is_integer); | |
1298 // x10 base_tagged The tagged base (input). | |
1299 // x11 exponent_tagged The tagged exponent (input). | |
1300 // x12 exponent_integer The exponent as an integer. | |
1301 // d1 base_double The base as a double. | |
1302 | |
1303 // Find abs(exponent). For negative exponents, we can find the inverse later. | |
1304 Register exponent_abs = x13; | |
1305 __ Cmp(exponent_integer, 0); | |
1306 __ Cneg(exponent_abs, exponent_integer, mi); | |
1307 // x13 exponent_abs The value of abs(exponent_integer). | |
1308 | |
1309 // Repeatedly multiply to calculate the power. | |
1310 // result = 1.0; | |
1311 // For each bit n (exponent_integer{n}) { | |
1312 // if (exponent_integer{n}) { | |
1313 // result *= base; | |
1314 // } | |
1315 // base *= base; | |
1316 // if (remaining bits in exponent_integer are all zero) { | |
1317 // break; | |
1318 // } | |
1319 // } | |
1320 Label power_loop, power_loop_entry, power_loop_exit; | |
1321 __ Fmov(scratch1_double, base_double); | |
1322 __ Fmov(base_double_copy, base_double); | |
1323 __ Fmov(result_double, 1.0); | |
1324 __ B(&power_loop_entry); | |
1325 | |
1326 __ Bind(&power_loop); | |
1327 __ Fmul(scratch1_double, scratch1_double, scratch1_double); | |
1328 __ Lsr(exponent_abs, exponent_abs, 1); | |
1329 __ Cbz(exponent_abs, &power_loop_exit); | |
1330 | |
1331 __ Bind(&power_loop_entry); | |
1332 __ Tbz(exponent_abs, 0, &power_loop); | |
1333 __ Fmul(result_double, result_double, scratch1_double); | |
1334 __ B(&power_loop); | |
1335 | |
1336 __ Bind(&power_loop_exit); | |
1337 | |
1338 // If the exponent was positive, result_double holds the result. | |
1339 __ Tbz(exponent_integer, kXSignBit, &done); | |
1340 | |
1341 // The exponent was negative, so find the inverse. | |
1342 __ Fmov(scratch0_double, 1.0); | |
1343 __ Fdiv(result_double, scratch0_double, result_double); | |
1344 // ECMA-262 only requires Math.pow to return an 'implementation-dependent | |
1345 // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow | |
1346 // to calculate the subnormal value 2^-1074. This method of calculating | |
1347 // negative powers doesn't work because 2^1074 overflows to infinity. To | |
1348 // catch this corner-case, we bail out if the result was 0. (This can only | |
1349 // occur if the divisor is infinity or the base is zero.) | |
1350 __ Fcmp(result_double, 0.0); | |
1351 __ B(&done, ne); | |
1352 | |
1353 if (exponent_type_ == ON_STACK) { | |
1354 // Bail out to runtime code. | |
1355 __ Bind(&call_runtime); | |
1356 // Put the arguments back on the stack. | |
1357 __ Push(base_tagged, exponent_tagged); | |
1358 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); | |
1359 | |
1360 // Return. | |
1361 __ Bind(&done); | |
1362 __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1); | |
1363 __ Str(result_double, | |
1364 FieldMemOperand(result_tagged, HeapNumber::kValueOffset)); | |
1365 ASSERT(result_tagged.is(x0)); | |
1366 __ IncrementCounter( | |
1367 masm->isolate()->counters()->math_pow(), 1, scratch0, scratch1); | |
1368 __ Ret(); | |
1369 } else { | |
1370 AllowExternalCallThatCantCauseGC scope(masm); | |
1371 __ Mov(saved_lr, lr); | |
1372 __ Fmov(base_double, base_double_copy); | |
1373 __ Scvtf(exponent_double, exponent_integer); | |
1374 __ CallCFunction( | |
1375 ExternalReference::power_double_double_function(masm->isolate()), | |
1376 0, 2); | |
1377 __ Mov(lr, saved_lr); | |
1378 __ Bind(&done); | |
1379 __ IncrementCounter( | |
1380 masm->isolate()->counters()->math_pow(), 1, scratch0, scratch1); | |
1381 __ Ret(); | |
1382 } | |
1383 } | |
1384 | |
1385 | |
1386 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { | |
1387 // It is important that the following stubs are generated in this order | |
1388 // because pregenerated stubs can only call other pregenerated stubs. | |
1389 // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses | |
1390 // CEntryStub. | |
1391 CEntryStub::GenerateAheadOfTime(isolate); | |
1392 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); | |
1393 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); | |
1394 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); | |
1395 CreateAllocationSiteStub::GenerateAheadOfTime(isolate); | |
1396 BinaryOpICStub::GenerateAheadOfTime(isolate); | |
1397 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate); | |
1398 } | |
1399 | |
1400 | |
1401 void CodeStub::GenerateFPStubs(Isolate* isolate) { | |
1402 // Floating-point code doesn't get special handling in A64, so there's | |
1403 // nothing to do here. | |
1404 USE(isolate); | |
1405 } | |
1406 | |
1407 | |
1408 static void JumpIfOOM(MacroAssembler* masm, | |
1409 Register value, | |
1410 Register scratch, | |
1411 Label* oom_label) { | |
1412 STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3); | |
1413 STATIC_ASSERT(kFailureTag == 3); | |
1414 __ And(scratch, value, 0xf); | |
1415 __ Cmp(scratch, 0xf); | |
1416 __ B(eq, oom_label); | |
1417 } | |
1418 | |
1419 | |
1420 bool CEntryStub::NeedsImmovableCode() { | |
1421 // CEntryStub stores the return address on the stack before calling into | |
1422 // C++ code. In some cases, the VM accesses this address, but it is not used | |
1423 // when the C++ code returns to the stub because LR holds the return address | |
1424 // in AAPCS64. If the stub is moved (perhaps during a GC), we could end up | |
1425 // returning to dead code. | |
1426 // TODO(jbramley): Whilst this is the only analysis that makes sense, I can't | |
1427 // find any comment to confirm this, and I don't hit any crashes whatever | |
1428 // this function returns. The anaylsis should be properly confirmed. | |
1429 return true; | |
1430 } | |
1431 | |
1432 | |
1433 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { | |
1434 CEntryStub stub(1, kDontSaveFPRegs); | |
1435 stub.GetCode(isolate); | |
1436 CEntryStub stub_fp(1, kSaveFPRegs); | |
1437 stub_fp.GetCode(isolate); | |
1438 } | |
1439 | |
1440 | |
1441 void CEntryStub::GenerateCore(MacroAssembler* masm, | |
1442 Label* throw_normal, | |
1443 Label* throw_termination, | |
1444 Label* throw_out_of_memory, | |
1445 bool do_gc, | |
1446 bool always_allocate) { | |
1447 // x0 : Result parameter for PerformGC, if do_gc is true. | |
1448 // x21 : argv | |
1449 // x22 : argc | |
1450 // x23 : target | |
1451 // | |
1452 // The stack (on entry) holds the arguments and the receiver, with the | |
1453 // receiver at the highest address: | |
1454 // | |
1455 // argv[8]: receiver | |
1456 // argv -> argv[0]: arg[argc-2] | |
1457 // ... ... | |
1458 // argv[...]: arg[1] | |
1459 // argv[...]: arg[0] | |
1460 // | |
1461 // Immediately below (after) this is the exit frame, as constructed by | |
1462 // EnterExitFrame: | |
1463 // fp[8]: CallerPC (lr) | |
1464 // fp -> fp[0]: CallerFP (old fp) | |
1465 // fp[-8]: Space reserved for SPOffset. | |
1466 // fp[-16]: CodeObject() | |
1467 // csp[...]: Saved doubles, if saved_doubles is true. | |
1468 // csp[32]: Alignment padding, if necessary. | |
1469 // csp[24]: Preserved x23 (used for target). | |
1470 // csp[16]: Preserved x22 (used for argc). | |
1471 // csp[8]: Preserved x21 (used for argv). | |
1472 // csp -> csp[0]: Space reserved for the return address. | |
1473 // | |
1474 // After a successful call, the exit frame, preserved registers (x21-x23) and | |
1475 // the arguments (including the receiver) are dropped or popped as | |
1476 // appropriate. The stub then returns. | |
1477 // | |
1478 // After an unsuccessful call, the exit frame and suchlike are left | |
1479 // untouched, and the stub either throws an exception by jumping to one of | |
1480 // the provided throw_ labels, or it falls through. The failure details are | |
1481 // passed through in x0. | |
1482 ASSERT(csp.Is(__ StackPointer())); | |
1483 | |
1484 Isolate* isolate = masm->isolate(); | |
1485 | |
1486 const Register& argv = x21; | |
1487 const Register& argc = x22; | |
1488 const Register& target = x23; | |
1489 | |
1490 if (do_gc) { | |
1491 // Call Runtime::PerformGC, passing x0 (the result parameter for | |
1492 // PerformGC) and x1 (the isolate). | |
1493 __ Mov(x1, ExternalReference::isolate_address(masm->isolate())); | |
1494 __ CallCFunction( | |
1495 ExternalReference::perform_gc_function(isolate), 2, 0); | |
1496 } | |
1497 | |
1498 ExternalReference scope_depth = | |
1499 ExternalReference::heap_always_allocate_scope_depth(isolate); | |
1500 if (always_allocate) { | |
1501 __ Mov(x10, Operand(scope_depth)); | |
1502 __ Ldr(x11, MemOperand(x10)); | |
1503 __ Add(x11, x11, 1); | |
1504 __ Str(x11, MemOperand(x10)); | |
1505 } | |
1506 | |
1507 // Prepare AAPCS64 arguments to pass to the builtin. | |
1508 __ Mov(x0, argc); | |
1509 __ Mov(x1, argv); | |
1510 __ Mov(x2, ExternalReference::isolate_address(isolate)); | |
1511 | |
1512 // Store the return address on the stack, in the space previously allocated | |
1513 // by EnterExitFrame. The return address is queried by | |
1514 // ExitFrame::GetStateForFramePointer. | |
1515 Label return_location; | |
1516 __ Adr(x12, &return_location); | |
1517 __ Poke(x12, 0); | |
1518 if (__ emit_debug_code()) { | |
1519 // Verify that the slot below fp[kSPOffset]-8 points to the return location | |
1520 // (currently in x12). | |
1521 UseScratchRegisterScope temps(masm); | |
1522 Register temp = temps.AcquireX(); | |
1523 __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset)); | |
1524 __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSize))); | |
1525 __ Cmp(temp, x12); | |
1526 __ Check(eq, kReturnAddressNotFoundInFrame); | |
1527 } | |
1528 | |
1529 // Call the builtin. | |
1530 __ Blr(target); | |
1531 __ Bind(&return_location); | |
1532 const Register& result = x0; | |
1533 | |
1534 if (always_allocate) { | |
1535 __ Mov(x10, Operand(scope_depth)); | |
1536 __ Ldr(x11, MemOperand(x10)); | |
1537 __ Sub(x11, x11, 1); | |
1538 __ Str(x11, MemOperand(x10)); | |
1539 } | |
1540 | |
1541 // x0 result The return code from the call. | |
1542 // x21 argv | |
1543 // x22 argc | |
1544 // x23 target | |
1545 // | |
1546 // If all of the result bits matching kFailureTagMask are '1', the result is | |
1547 // a failure. Otherwise, it's an ordinary tagged object and the call was a | |
1548 // success. | |
1549 Label failure; | |
1550 __ And(x10, result, kFailureTagMask); | |
1551 __ Cmp(x10, kFailureTagMask); | |
1552 __ B(&failure, eq); | |
1553 | |
1554 // The call succeeded, so unwind the stack and return. | |
1555 | |
1556 // Restore callee-saved registers x21-x23. | |
1557 __ Mov(x11, argc); | |
1558 | |
1559 __ Peek(argv, 1 * kPointerSize); | |
1560 __ Peek(argc, 2 * kPointerSize); | |
1561 __ Peek(target, 3 * kPointerSize); | |
1562 | |
1563 __ LeaveExitFrame(save_doubles_, x10, true); | |
1564 ASSERT(jssp.Is(__ StackPointer())); | |
1565 // Pop or drop the remaining stack slots and return from the stub. | |
1566 // jssp[24]: Arguments array (of size argc), including receiver. | |
1567 // jssp[16]: Preserved x23 (used for target). | |
1568 // jssp[8]: Preserved x22 (used for argc). | |
1569 // jssp[0]: Preserved x21 (used for argv). | |
1570 __ Drop(x11); | |
1571 __ Ret(); | |
1572 | |
1573 // The stack pointer is still csp if we aren't returning, and the frame | |
1574 // hasn't changed (except for the return address). | |
1575 __ SetStackPointer(csp); | |
1576 | |
1577 __ Bind(&failure); | |
1578 // The call failed, so check if we need to throw an exception, and fall | |
1579 // through (to retry) otherwise. | |
1580 | |
1581 Label retry; | |
1582 // x0 result The return code from the call, including the failure | |
1583 // code and details. | |
1584 // x21 argv | |
1585 // x22 argc | |
1586 // x23 target | |
1587 // Refer to the Failure class for details of the bit layout. | |
1588 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); | |
1589 __ Tst(result, kFailureTypeTagMask << kFailureTagSize); | |
1590 __ B(eq, &retry); // RETRY_AFTER_GC | |
1591 | |
1592 // Special handling of out-of-memory exceptions: Pass the failure result, | |
1593 // rather than the exception descriptor. | |
1594 JumpIfOOM(masm, result, x10, throw_out_of_memory); | |
1595 | |
1596 // Retrieve the pending exception. | |
1597 const Register& exception = result; | |
1598 const Register& exception_address = x11; | |
1599 __ Mov(exception_address, | |
1600 Operand(ExternalReference(Isolate::kPendingExceptionAddress, | |
1601 isolate))); | |
1602 __ Ldr(exception, MemOperand(exception_address)); | |
1603 | |
1604 // See if we just retrieved an OOM exception. | |
1605 JumpIfOOM(masm, exception, x10, throw_out_of_memory); | |
1606 | |
1607 // Clear the pending exception. | |
1608 __ Mov(x10, Operand(isolate->factory()->the_hole_value())); | |
1609 __ Str(x10, MemOperand(exception_address)); | |
1610 | |
1611 // x0 exception The exception descriptor. | |
1612 // x21 argv | |
1613 // x22 argc | |
1614 // x23 target | |
1615 | |
1616 // Special handling of termination exceptions, which are uncatchable by | |
1617 // JavaScript code. | |
1618 __ Cmp(exception, Operand(isolate->factory()->termination_exception())); | |
1619 __ B(eq, throw_termination); | |
1620 | |
1621 // Handle normal exception. | |
1622 __ B(throw_normal); | |
1623 | |
1624 __ Bind(&retry); | |
1625 // The result (x0) is passed through as the next PerformGC parameter. | |
1626 } | |
1627 | |
1628 | |
1629 void CEntryStub::Generate(MacroAssembler* masm) { | |
1630 // The Abort mechanism relies on CallRuntime, which in turn relies on | |
1631 // CEntryStub, so until this stub has been generated, we have to use a | |
1632 // fall-back Abort mechanism. | |
1633 // | |
1634 // Note that this stub must be generated before any use of Abort. | |
1635 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm); | |
1636 | |
1637 ASM_LOCATION("CEntryStub::Generate entry"); | |
1638 ProfileEntryHookStub::MaybeCallEntryHook(masm); | |
1639 | |
1640 // Register parameters: | |
1641 // x0: argc (including receiver, untagged) | |
1642 // x1: target | |
1643 // | |
1644 // The stack on entry holds the arguments and the receiver, with the receiver | |
1645 // at the highest address: | |
1646 // | |
1647 // jssp]argc-1]: receiver | |
1648 // jssp[argc-2]: arg[argc-2] | |
1649 // ... ... | |
1650 // jssp[1]: arg[1] | |
1651 // jssp[0]: arg[0] | |
1652 // | |
1653 // The arguments are in reverse order, so that arg[argc-2] is actually the | |
1654 // first argument to the target function and arg[0] is the last. | |
1655 ASSERT(jssp.Is(__ StackPointer())); | |
1656 const Register& argc_input = x0; | |
1657 const Register& target_input = x1; | |
1658 | |
1659 // Calculate argv, argc and the target address, and store them in | |
1660 // callee-saved registers so we can retry the call without having to reload | |
1661 // these arguments. | |
1662 // TODO(jbramley): If the first call attempt succeeds in the common case (as | |
1663 // it should), then we might be better off putting these parameters directly | |
1664 // into their argument registers, rather than using callee-saved registers and | |
1665 // preserving them on the stack. | |
1666 const Register& argv = x21; | |
1667 const Register& argc = x22; | |
1668 const Register& target = x23; | |
1669 | |
1670 // Derive argv from the stack pointer so that it points to the first argument | |
1671 // (arg[argc-2]), or just below the receiver in case there are no arguments. | |
1672 // - Adjust for the arg[] array. | |
1673 Register temp_argv = x11; | |
1674 __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2)); | |
1675 // - Adjust for the receiver. | |
1676 __ Sub(temp_argv, temp_argv, 1 * kPointerSize); | |
1677 | |
1678 // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved | |
1679 // registers. | |
1680 FrameScope scope(masm, StackFrame::MANUAL); | |
1681 __ EnterExitFrame(save_doubles_, x10, 3); | |
1682 ASSERT(csp.Is(__ StackPointer())); | |
1683 | |
1684 // Poke callee-saved registers into reserved space. | |
1685 __ Poke(argv, 1 * kPointerSize); | |
1686 __ Poke(argc, 2 * kPointerSize); | |
1687 __ Poke(target, 3 * kPointerSize); | |
1688 | |
1689 // We normally only keep tagged values in callee-saved registers, as they | |
1690 // could be pushed onto the stack by called stubs and functions, and on the | |
1691 // stack they can confuse the GC. However, we're only calling C functions | |
1692 // which can push arbitrary data onto the stack anyway, and so the GC won't | |
1693 // examine that part of the stack. | |
1694 __ Mov(argc, argc_input); | |
1695 __ Mov(target, target_input); | |
1696 __ Mov(argv, temp_argv); | |
1697 | |
1698 Label throw_normal; | |
1699 Label throw_termination; | |
1700 Label throw_out_of_memory; | |
1701 | |
1702 // Call the runtime function. | |
1703 GenerateCore(masm, | |
1704 &throw_normal, | |
1705 &throw_termination, | |
1706 &throw_out_of_memory, | |
1707 false, | |
1708 false); | |
1709 | |
1710 // If successful, the previous GenerateCore will have returned to the | |
1711 // calling code. Otherwise, we fall through into the following. | |
1712 | |
1713 // Do space-specific GC and retry runtime call. | |
1714 GenerateCore(masm, | |
1715 &throw_normal, | |
1716 &throw_termination, | |
1717 &throw_out_of_memory, | |
1718 true, | |
1719 false); | |
1720 | |
1721 // Do full GC and retry runtime call one final time. | |
1722 __ Mov(x0, reinterpret_cast<uint64_t>(Failure::InternalError())); | |
1723 GenerateCore(masm, | |
1724 &throw_normal, | |
1725 &throw_termination, | |
1726 &throw_out_of_memory, | |
1727 true, | |
1728 true); | |
1729 | |
1730 // We didn't execute a return case, so the stack frame hasn't been updated | |
1731 // (except for the return address slot). However, we don't need to initialize | |
1732 // jssp because the throw method will immediately overwrite it when it | |
1733 // unwinds the stack. | |
1734 if (__ emit_debug_code()) { | |
1735 __ Mov(jssp, kDebugZapValue); | |
1736 } | |
1737 __ SetStackPointer(jssp); | |
1738 | |
1739 // Throw exceptions. | |
1740 // If we throw an exception, we can end up re-entering CEntryStub before we | |
1741 // pop the exit frame, so need to ensure that x21-x23 contain GC-safe values | |
1742 // here. | |
1743 __ Bind(&throw_out_of_memory); | |
1744 ASM_LOCATION("Throw out of memory"); | |
1745 __ Mov(argv, 0); | |
1746 __ Mov(argc, 0); | |
1747 __ Mov(target, 0); | |
1748 // Set external caught exception to false. | |
1749 Isolate* isolate = masm->isolate(); | |
1750 __ Mov(x2, Operand(ExternalReference(Isolate::kExternalCaughtExceptionAddress, | |
1751 isolate))); | |
1752 __ Str(xzr, MemOperand(x2)); | |
1753 | |
1754 // Set pending exception and x0 to out of memory exception. | |
1755 Label already_have_failure; | |
1756 JumpIfOOM(masm, x0, x10, &already_have_failure); | |
1757 Failure* out_of_memory = Failure::OutOfMemoryException(0x1); | |
1758 __ Mov(x0, Operand(reinterpret_cast<uint64_t>(out_of_memory))); | |
1759 __ Bind(&already_have_failure); | |
1760 __ Mov(x2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | |
1761 isolate))); | |
1762 __ Str(x0, MemOperand(x2)); | |
1763 // Fall through to the next label. | |
1764 | |
1765 __ Bind(&throw_termination); | |
1766 ASM_LOCATION("Throw termination"); | |
1767 __ Mov(argv, 0); | |
1768 __ Mov(argc, 0); | |
1769 __ Mov(target, 0); | |
1770 __ ThrowUncatchable(x0, x10, x11, x12, x13); | |
1771 | |
1772 __ Bind(&throw_normal); | |
1773 ASM_LOCATION("Throw normal"); | |
1774 __ Mov(argv, 0); | |
1775 __ Mov(argc, 0); | |
1776 __ Mov(target, 0); | |
1777 __ Throw(x0, x10, x11, x12, x13); | |
1778 } | |
1779 | |
1780 | |
1781 // This is the entry point from C++. 5 arguments are provided in x0-x4. | |
1782 // See use of the CALL_GENERATED_CODE macro for example in src/execution.cc. | |
1783 // Input: | |
1784 // x0: code entry. | |
1785 // x1: function. | |
1786 // x2: receiver. | |
1787 // x3: argc. | |
1788 // x4: argv. | |
1789 // Output: | |
1790 // x0: result. | |
1791 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { | |
1792 ASSERT(jssp.Is(__ StackPointer())); | |
1793 Register code_entry = x0; | |
1794 | |
1795 // Enable instruction instrumentation. This only works on the simulator, and | |
1796 // will have no effect on the model or real hardware. | |
1797 __ EnableInstrumentation(); | |
1798 | |
1799 Label invoke, handler_entry, exit; | |
1800 | |
1801 // Push callee-saved registers and synchronize the system stack pointer (csp) | |
1802 // and the JavaScript stack pointer (jssp). | |
1803 // | |
1804 // We must not write to jssp until after the PushCalleeSavedRegisters() | |
1805 // call, since jssp is itself a callee-saved register. | |
1806 __ SetStackPointer(csp); | |
1807 __ PushCalleeSavedRegisters(); | |
1808 __ Mov(jssp, csp); | |
1809 __ SetStackPointer(jssp); | |
1810 | |
1811 ProfileEntryHookStub::MaybeCallEntryHook(masm); | |
1812 | |
1813 // Set up the reserved register for 0.0. | |
1814 __ Fmov(fp_zero, 0.0); | |
1815 | |
1816 // Build an entry frame (see layout below). | |
1817 Isolate* isolate = masm->isolate(); | |
1818 | |
1819 // Build an entry frame. | |
1820 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; | |
1821 int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used. | |
1822 __ Mov(x13, bad_frame_pointer); | |
1823 __ Mov(x12, Smi::FromInt(marker)); | |
1824 __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate)); | |
1825 __ Ldr(x10, MemOperand(x11)); | |
1826 | |
1827 __ Push(x13, xzr, x12, x10); | |
1828 // Set up fp. | |
1829 __ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset); | |
1830 | |
1831 // Push the JS entry frame marker. Also set js_entry_sp if this is the | |
1832 // outermost JS call. | |
1833 Label non_outermost_js, done; | |
1834 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); | |
1835 __ Mov(x10, ExternalReference(js_entry_sp)); | |
1836 __ Ldr(x11, MemOperand(x10)); | |
1837 __ Cbnz(x11, &non_outermost_js); | |
1838 __ Str(fp, MemOperand(x10)); | |
1839 __ Mov(x12, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)); | |
1840 __ Push(x12); | |
1841 __ B(&done); | |
1842 __ Bind(&non_outermost_js); | |
1843 // We spare one instruction by pushing xzr since the marker is 0. | |
1844 ASSERT(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL); | |
1845 __ Push(xzr); | |
1846 __ Bind(&done); | |
1847 | |
1848 // The frame set up looks like this: | |
1849 // jssp[0] : JS entry frame marker. | |
1850 // jssp[1] : C entry FP. | |
1851 // jssp[2] : stack frame marker. | |
1852 // jssp[3] : stack frmae marker. | |
1853 // jssp[4] : bad frame pointer 0xfff...ff <- fp points here. | |
1854 | |
1855 | |
1856 // Jump to a faked try block that does the invoke, with a faked catch | |
1857 // block that sets the pending exception. | |
1858 __ B(&invoke); | |
1859 | |
1860 // Prevent the constant pool from being emitted between the record of the | |
1861 // handler_entry position and the first instruction of the sequence here. | |
1862 // There is no risk because Assembler::Emit() emits the instruction before | |
1863 // checking for constant pool emission, but we do not want to depend on | |
1864 // that. | |
1865 { | |
1866 Assembler::BlockPoolsScope block_pools(masm); | |
1867 __ bind(&handler_entry); | |
1868 handler_offset_ = handler_entry.pos(); | |
1869 // Caught exception: Store result (exception) in the pending exception | |
1870 // field in the JSEnv and return a failure sentinel. Coming in here the | |
1871 // fp will be invalid because the PushTryHandler below sets it to 0 to | |
1872 // signal the existence of the JSEntry frame. | |
1873 __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | |
1874 isolate))); | |
1875 } | |
1876 __ Str(code_entry, MemOperand(x10)); | |
1877 __ Mov(x0, Operand(reinterpret_cast<int64_t>(Failure::Exception()))); | |
1878 __ B(&exit); | |
1879 | |
1880 // Invoke: Link this frame into the handler chain. There's only one | |
1881 // handler block in this code object, so its index is 0. | |
1882 __ Bind(&invoke); | |
1883 __ PushTryHandler(StackHandler::JS_ENTRY, 0); | |
1884 // If an exception not caught by another handler occurs, this handler | |
1885 // returns control to the code after the B(&invoke) above, which | |
1886 // restores all callee-saved registers (including cp and fp) to their | |
1887 // saved values before returning a failure to C. | |
1888 | |
1889 // Clear any pending exceptions. | |
1890 __ Mov(x10, Operand(isolate->factory()->the_hole_value())); | |
1891 __ Mov(x11, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | |
1892 isolate))); | |
1893 __ Str(x10, MemOperand(x11)); | |
1894 | |
1895 // Invoke the function by calling through the JS entry trampoline builtin. | |
1896 // Notice that we cannot store a reference to the trampoline code directly in | |
1897 // this stub, because runtime stubs are not traversed when doing GC. | |
1898 | |
1899 // Expected registers by Builtins::JSEntryTrampoline | |
1900 // x0: code entry. | |
1901 // x1: function. | |
1902 // x2: receiver. | |
1903 // x3: argc. | |
1904 // x4: argv. | |
1905 ExternalReference entry(is_construct ? Builtins::kJSConstructEntryTrampoline | |
1906 : Builtins::kJSEntryTrampoline, | |
1907 isolate); | |
1908 __ Mov(x10, entry); | |
1909 | |
1910 // Call the JSEntryTrampoline. | |
1911 __ Ldr(x11, MemOperand(x10)); // Dereference the address. | |
1912 __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag); | |
1913 __ Blr(x12); | |
1914 | |
1915 // Unlink this frame from the handler chain. | |
1916 __ PopTryHandler(); | |
1917 | |
1918 | |
1919 __ Bind(&exit); | |
1920 // x0 holds the result. | |
1921 // The stack pointer points to the top of the entry frame pushed on entry from | |
1922 // C++ (at the beginning of this stub): | |
1923 // jssp[0] : JS entry frame marker. | |
1924 // jssp[1] : C entry FP. | |
1925 // jssp[2] : stack frame marker. | |
1926 // jssp[3] : stack frmae marker. | |
1927 // jssp[4] : bad frame pointer 0xfff...ff <- fp points here. | |
1928 | |
1929 // Check if the current stack frame is marked as the outermost JS frame. | |
1930 Label non_outermost_js_2; | |
1931 __ Pop(x10); | |
1932 __ Cmp(x10, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)); | |
1933 __ B(ne, &non_outermost_js_2); | |
1934 __ Mov(x11, ExternalReference(js_entry_sp)); | |
1935 __ Str(xzr, MemOperand(x11)); | |
1936 __ Bind(&non_outermost_js_2); | |
1937 | |
1938 // Restore the top frame descriptors from the stack. | |
1939 __ Pop(x10); | |
1940 __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate)); | |
1941 __ Str(x10, MemOperand(x11)); | |
1942 | |
1943 // Reset the stack to the callee saved registers. | |
1944 __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes); | |
1945 // Restore the callee-saved registers and return. | |
1946 ASSERT(jssp.Is(__ StackPointer())); | |
1947 __ Mov(csp, jssp); | |
1948 __ SetStackPointer(csp); | |
1949 __ PopCalleeSavedRegisters(); | |
1950 // After this point, we must not modify jssp because it is a callee-saved | |
1951 // register which we have just restored. | |
1952 __ Ret(); | |
1953 } | |
1954 | |
1955 | |
1956 void FunctionPrototypeStub::Generate(MacroAssembler* masm) { | |
1957 Label miss; | |
1958 Register receiver; | |
1959 if (kind() == Code::KEYED_LOAD_IC) { | |
1960 // ----------- S t a t e ------------- | |
1961 // -- lr : return address | |
1962 // -- x1 : receiver | |
1963 // -- x0 : key | |
1964 // ----------------------------------- | |
1965 Register key = x0; | |
1966 receiver = x1; | |
1967 __ Cmp(key, Operand(masm->isolate()->factory()->prototype_string())); | |
1968 __ B(ne, &miss); | |
1969 } else { | |
1970 ASSERT(kind() == Code::LOAD_IC); | |
1971 // ----------- S t a t e ------------- | |
1972 // -- lr : return address | |
1973 // -- x2 : name | |
1974 // -- x0 : receiver | |
1975 // -- sp[0] : receiver | |
1976 // ----------------------------------- | |
1977 receiver = x0; | |
1978 } | |
1979 | |
1980 StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10, x11, &miss); | |
1981 | |
1982 __ Bind(&miss); | |
1983 StubCompiler::TailCallBuiltin(masm, | |
1984 BaseLoadStoreStubCompiler::MissBuiltin(kind())); | |
1985 } | |
1986 | |
1987 | |
1988 void InstanceofStub::Generate(MacroAssembler* masm) { | |
1989 // Stack on entry: | |
1990 // jssp[0]: function. | |
1991 // jssp[8]: object. | |
1992 // | |
1993 // Returns result in x0. Zero indicates instanceof, smi 1 indicates not | |
1994 // instanceof. | |
1995 | |
1996 Register result = x0; | |
1997 Register function = right(); | |
1998 Register object = left(); | |
1999 Register scratch1 = x6; | |
2000 Register scratch2 = x7; | |
2001 Register res_true = x8; | |
2002 Register res_false = x9; | |
2003 // Only used if there was an inline map check site. (See | |
2004 // LCodeGen::DoInstanceOfKnownGlobal().) | |
2005 Register map_check_site = x4; | |
2006 // Delta for the instructions generated between the inline map check and the | |
2007 // instruction setting the result. | |
2008 const int32_t kDeltaToLoadBoolResult = 4 * kInstructionSize; | |
2009 | |
2010 Label not_js_object, slow; | |
2011 | |
2012 if (!HasArgsInRegisters()) { | |
2013 __ Pop(function, object); | |
2014 } | |
2015 | |
2016 if (ReturnTrueFalseObject()) { | |
2017 __ LoadTrueFalseRoots(res_true, res_false); | |
2018 } else { | |
2019 // This is counter-intuitive, but correct. | |
2020 __ Mov(res_true, Smi::FromInt(0)); | |
2021 __ Mov(res_false, Smi::FromInt(1)); | |
2022 } | |
2023 | |
2024 // Check that the left hand side is a JS object and load its map as a side | |
2025 // effect. | |
2026 Register map = x12; | |
2027 __ JumpIfSmi(object, ¬_js_object); | |
2028 __ IsObjectJSObjectType(object, map, scratch2, ¬_js_object); | |
2029 | |
2030 // If there is a call site cache, don't look in the global cache, but do the | |
2031 // real lookup and update the call site cache. | |
2032 if (!HasCallSiteInlineCheck()) { | |
2033 Label miss; | |
2034 __ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex, &miss); | |
2035 __ JumpIfNotRoot(map, Heap::kInstanceofCacheMapRootIndex, &miss); | |
2036 __ LoadRoot(result, Heap::kInstanceofCacheAnswerRootIndex); | |
2037 __ Ret(); | |
2038 __ Bind(&miss); | |
2039 } | |
2040 | |
2041 // Get the prototype of the function. | |
2042 Register prototype = x13; | |
2043 __ TryGetFunctionPrototype(function, prototype, scratch2, &slow, | |
2044 MacroAssembler::kMissOnBoundFunction); | |
2045 | |
2046 // Check that the function prototype is a JS object. | |
2047 __ JumpIfSmi(prototype, &slow); | |
2048 __ IsObjectJSObjectType(prototype, scratch1, scratch2, &slow); | |
2049 | |
2050 // Update the global instanceof or call site inlined cache with the current | |
2051 // map and function. The cached answer will be set when it is known below. | |
2052 if (HasCallSiteInlineCheck()) { | |
2053 // Patch the (relocated) inlined map check. | |
2054 __ GetRelocatedValueLocation(map_check_site, scratch1); | |
2055 // We have a cell, so need another level of dereferencing. | |
2056 __ Ldr(scratch1, MemOperand(scratch1)); | |
2057 __ Str(map, FieldMemOperand(scratch1, Cell::kValueOffset)); | |
2058 } else { | |
2059 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); | |
2060 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); | |
2061 } | |
2062 | |
2063 Label return_true, return_result; | |
2064 { | |
2065 // Loop through the prototype chain looking for the function prototype. | |
2066 Register chain_map = x1; | |
2067 Register chain_prototype = x14; | |
2068 Register null_value = x15; | |
2069 Label loop; | |
2070 __ Ldr(chain_prototype, FieldMemOperand(map, Map::kPrototypeOffset)); | |
2071 __ LoadRoot(null_value, Heap::kNullValueRootIndex); | |
2072 // Speculatively set a result. | |
2073 __ Mov(result, res_false); | |
2074 | |
2075 __ Bind(&loop); | |
2076 | |
2077 // If the chain prototype is the object prototype, return true. | |
2078 __ Cmp(chain_prototype, prototype); | |
2079 __ B(eq, &return_true); | |
2080 | |
2081 // If the chain prototype is null, we've reached the end of the chain, so | |
2082 // return false. | |
2083 __ Cmp(chain_prototype, null_value); | |
2084 __ B(eq, &return_result); | |
2085 | |
2086 // Otherwise, load the next prototype in the chain, and loop. | |
2087 __ Ldr(chain_map, FieldMemOperand(chain_prototype, HeapObject::kMapOffset)); | |
2088 __ Ldr(chain_prototype, FieldMemOperand(chain_map, Map::kPrototypeOffset)); | |
2089 __ B(&loop); | |
2090 } | |
2091 | |
2092 // Return sequence when no arguments are on the stack. | |
2093 // We cannot fall through to here. | |
2094 __ Bind(&return_true); | |
2095 __ Mov(result, res_true); | |
2096 __ Bind(&return_result); | |
2097 if (HasCallSiteInlineCheck()) { | |
2098 ASSERT(ReturnTrueFalseObject()); | |
2099 __ Add(map_check_site, map_check_site, kDeltaToLoadBoolResult); | |
2100 __ GetRelocatedValueLocation(map_check_site, scratch2); | |
2101 __ Str(result, MemOperand(scratch2)); | |
2102 } else { | |
2103 __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex); | |
2104 } | |
2105 __ Ret(); | |
2106 | |
2107 Label object_not_null, object_not_null_or_smi; | |
2108 | |
2109 __ Bind(¬_js_object); | |
2110 Register object_type = x14; | |
2111 // x0 result result return register (uninit) | |
2112 // x10 function pointer to function | |
2113 // x11 object pointer to object | |
2114 // x14 object_type type of object (uninit) | |
2115 | |
2116 // Before null, smi and string checks, check that the rhs is a function. | |
2117 // For a non-function rhs, an exception must be thrown. | |
2118 __ JumpIfSmi(function, &slow); | |
2119 __ JumpIfNotObjectType( | |
2120 function, scratch1, object_type, JS_FUNCTION_TYPE, &slow); | |
2121 | |
2122 __ Mov(result, res_false); | |
2123 | |
2124 // Null is not instance of anything. | |
2125 __ Cmp(object_type, Operand(masm->isolate()->factory()->null_value())); | |
2126 __ B(ne, &object_not_null); | |
2127 __ Ret(); | |
2128 | |
2129 __ Bind(&object_not_null); | |
2130 // Smi values are not instances of anything. | |
2131 __ JumpIfNotSmi(object, &object_not_null_or_smi); | |
2132 __ Ret(); | |
2133 | |
2134 __ Bind(&object_not_null_or_smi); | |
2135 // String values are not instances of anything. | |
2136 __ IsObjectJSStringType(object, scratch2, &slow); | |
2137 __ Ret(); | |
2138 | |
2139 // Slow-case. Tail call builtin. | |
2140 __ Bind(&slow); | |
2141 { | |
2142 FrameScope scope(masm, StackFrame::INTERNAL); | |
2143 // Arguments have either been passed into registers or have been previously | |
2144 // popped. We need to push them before calling builtin. | |
2145 __ Push(object, function); | |
2146 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); | |
2147 } | |
2148 if (ReturnTrueFalseObject()) { | |
2149 // Reload true/false because they were clobbered in the builtin call. | |
2150 __ LoadTrueFalseRoots(res_true, res_false); | |
2151 __ Cmp(result, 0); | |
2152 __ Csel(result, res_true, res_false, eq); | |
2153 } | |
2154 __ Ret(); | |
2155 } | |
2156 | |
2157 | |
2158 Register InstanceofStub::left() { | |
2159 // Object to check (instanceof lhs). | |
2160 return x11; | |
2161 } | |
2162 | |
2163 | |
2164 Register InstanceofStub::right() { | |
2165 // Constructor function (instanceof rhs). | |
2166 return x10; | |
2167 } | |
2168 | |
2169 | |
2170 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { | |
2171 Register arg_count = x0; | |
2172 Register key = x1; | |
2173 | |
2174 // The displacement is the offset of the last parameter (if any) relative | |
2175 // to the frame pointer. | |
2176 static const int kDisplacement = | |
2177 StandardFrameConstants::kCallerSPOffset - kPointerSize; | |
2178 | |
2179 // Check that the key is a smi. | |
2180 Label slow; | |
2181 __ JumpIfNotSmi(key, &slow); | |
2182 | |
2183 // Check if the calling frame is an arguments adaptor frame. | |
2184 Register local_fp = x11; | |
2185 Register caller_fp = x11; | |
2186 Register caller_ctx = x12; | |
2187 Label skip_adaptor; | |
2188 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
2189 __ Ldr(caller_ctx, MemOperand(caller_fp, | |
2190 StandardFrameConstants::kContextOffset)); | |
2191 __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); | |
2192 __ Csel(local_fp, fp, caller_fp, ne); | |
2193 __ B(ne, &skip_adaptor); | |
2194 | |
2195 // Load the actual arguments limit found in the arguments adaptor frame. | |
2196 __ Ldr(arg_count, MemOperand(caller_fp, | |
2197 ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
2198 __ Bind(&skip_adaptor); | |
2199 | |
2200 // Check index against formal parameters count limit. Use unsigned comparison | |
2201 // to get negative check for free: branch if key < 0 or key >= arg_count. | |
2202 __ Cmp(key, arg_count); | |
2203 __ B(hs, &slow); | |
2204 | |
2205 // Read the argument from the stack and return it. | |
2206 __ Sub(x10, arg_count, key); | |
2207 __ Add(x10, local_fp, Operand::UntagSmiAndScale(x10, kPointerSizeLog2)); | |
2208 __ Ldr(x0, MemOperand(x10, kDisplacement)); | |
2209 __ Ret(); | |
2210 | |
2211 // Slow case: handle non-smi or out-of-bounds access to arguments by calling | |
2212 // the runtime system. | |
2213 __ Bind(&slow); | |
2214 __ Push(key); | |
2215 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); | |
2216 } | |
2217 | |
2218 | |
2219 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) { | |
2220 // Stack layout on entry. | |
2221 // jssp[0]: number of parameters (tagged) | |
2222 // jssp[8]: address of receiver argument | |
2223 // jssp[16]: function | |
2224 | |
2225 // Check if the calling frame is an arguments adaptor frame. | |
2226 Label runtime; | |
2227 Register caller_fp = x10; | |
2228 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
2229 // Load and untag the context. | |
2230 STATIC_ASSERT((kSmiShift / kBitsPerByte) == 4); | |
2231 __ Ldr(w11, MemOperand(caller_fp, StandardFrameConstants::kContextOffset + | |
2232 (kSmiShift / kBitsPerByte))); | |
2233 __ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR); | |
2234 __ B(ne, &runtime); | |
2235 | |
2236 // Patch the arguments.length and parameters pointer in the current frame. | |
2237 __ Ldr(x11, MemOperand(caller_fp, | |
2238 ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
2239 __ Poke(x11, 0 * kXRegSize); | |
2240 __ Add(x10, caller_fp, Operand::UntagSmiAndScale(x11, kPointerSizeLog2)); | |
2241 __ Add(x10, x10, StandardFrameConstants::kCallerSPOffset); | |
2242 __ Poke(x10, 1 * kXRegSize); | |
2243 | |
2244 __ Bind(&runtime); | |
2245 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); | |
2246 } | |
2247 | |
2248 | |
2249 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { | |
2250 // Stack layout on entry. | |
2251 // jssp[0]: number of parameters (tagged) | |
2252 // jssp[8]: address of receiver argument | |
2253 // jssp[16]: function | |
2254 // | |
2255 // Returns pointer to result object in x0. | |
2256 | |
2257 // Note: arg_count_smi is an alias of param_count_smi. | |
2258 Register arg_count_smi = x3; | |
2259 Register param_count_smi = x3; | |
2260 Register param_count = x7; | |
2261 Register recv_arg = x14; | |
2262 Register function = x4; | |
2263 __ Pop(param_count_smi, recv_arg, function); | |
2264 __ SmiUntag(param_count, param_count_smi); | |
2265 | |
2266 // Check if the calling frame is an arguments adaptor frame. | |
2267 Register caller_fp = x11; | |
2268 Register caller_ctx = x12; | |
2269 Label runtime; | |
2270 Label adaptor_frame, try_allocate; | |
2271 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
2272 __ Ldr(caller_ctx, MemOperand(caller_fp, | |
2273 StandardFrameConstants::kContextOffset)); | |
2274 __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); | |
2275 __ B(eq, &adaptor_frame); | |
2276 | |
2277 // No adaptor, parameter count = argument count. | |
2278 | |
2279 // x1 mapped_params number of mapped params, min(params, args) (uninit) | |
2280 // x2 arg_count number of function arguments (uninit) | |
2281 // x3 arg_count_smi number of function arguments (smi) | |
2282 // x4 function function pointer | |
2283 // x7 param_count number of function parameters | |
2284 // x11 caller_fp caller's frame pointer | |
2285 // x14 recv_arg pointer to receiver arguments | |
2286 | |
2287 Register arg_count = x2; | |
2288 __ Mov(arg_count, param_count); | |
2289 __ B(&try_allocate); | |
2290 | |
2291 // We have an adaptor frame. Patch the parameters pointer. | |
2292 __ Bind(&adaptor_frame); | |
2293 __ Ldr(arg_count_smi, | |
2294 MemOperand(caller_fp, | |
2295 ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
2296 __ SmiUntag(arg_count, arg_count_smi); | |
2297 __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2)); | |
2298 __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset); | |
2299 | |
2300 // Compute the mapped parameter count = min(param_count, arg_count) | |
2301 Register mapped_params = x1; | |
2302 __ Cmp(param_count, arg_count); | |
2303 __ Csel(mapped_params, param_count, arg_count, lt); | |
2304 | |
2305 __ Bind(&try_allocate); | |
2306 | |
2307 // x0 alloc_obj pointer to allocated objects: param map, backing | |
2308 // store, arguments (uninit) | |
2309 // x1 mapped_params number of mapped parameters, min(params, args) | |
2310 // x2 arg_count number of function arguments | |
2311 // x3 arg_count_smi number of function arguments (smi) | |
2312 // x4 function function pointer | |
2313 // x7 param_count number of function parameters | |
2314 // x10 size size of objects to allocate (uninit) | |
2315 // x14 recv_arg pointer to receiver arguments | |
2316 | |
2317 // Compute the size of backing store, parameter map, and arguments object. | |
2318 // 1. Parameter map, has two extra words containing context and backing | |
2319 // store. | |
2320 const int kParameterMapHeaderSize = | |
2321 FixedArray::kHeaderSize + 2 * kPointerSize; | |
2322 | |
2323 // Calculate the parameter map size, assuming it exists. | |
2324 Register size = x10; | |
2325 __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2)); | |
2326 __ Add(size, size, kParameterMapHeaderSize); | |
2327 | |
2328 // If there are no mapped parameters, set the running size total to zero. | |
2329 // Otherwise, use the parameter map size calculated earlier. | |
2330 __ Cmp(mapped_params, 0); | |
2331 __ CzeroX(size, eq); | |
2332 | |
2333 // 2. Add the size of the backing store and arguments object. | |
2334 __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2)); | |
2335 __ Add(size, size, | |
2336 FixedArray::kHeaderSize + Heap::kSloppyArgumentsObjectSize); | |
2337 | |
2338 // Do the allocation of all three objects in one go. Assign this to x0, as it | |
2339 // will be returned to the caller. | |
2340 Register alloc_obj = x0; | |
2341 __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT); | |
2342 | |
2343 // Get the arguments boilerplate from the current (global) context. | |
2344 | |
2345 // x0 alloc_obj pointer to allocated objects (param map, backing | |
2346 // store, arguments) | |
2347 // x1 mapped_params number of mapped parameters, min(params, args) | |
2348 // x2 arg_count number of function arguments | |
2349 // x3 arg_count_smi number of function arguments (smi) | |
2350 // x4 function function pointer | |
2351 // x7 param_count number of function parameters | |
2352 // x11 args_offset offset to args (or aliased args) boilerplate (uninit) | |
2353 // x14 recv_arg pointer to receiver arguments | |
2354 | |
2355 Register global_object = x10; | |
2356 Register global_ctx = x10; | |
2357 Register args_offset = x11; | |
2358 Register aliased_args_offset = x10; | |
2359 __ Ldr(global_object, GlobalObjectMemOperand()); | |
2360 __ Ldr(global_ctx, FieldMemOperand(global_object, | |
2361 GlobalObject::kNativeContextOffset)); | |
2362 | |
2363 __ Ldr(args_offset, | |
2364 ContextMemOperand(global_ctx, | |
2365 Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX)); | |
2366 __ Ldr(aliased_args_offset, | |
2367 ContextMemOperand(global_ctx, | |
2368 Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX)); | |
2369 __ Cmp(mapped_params, 0); | |
2370 __ CmovX(args_offset, aliased_args_offset, ne); | |
2371 | |
2372 // Copy the JS object part. | |
2373 __ CopyFields(alloc_obj, args_offset, CPURegList(x10, x12, x13), | |
2374 JSObject::kHeaderSize / kPointerSize); | |
2375 | |
2376 // Set up the callee in-object property. | |
2377 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); | |
2378 const int kCalleeOffset = JSObject::kHeaderSize + | |
2379 Heap::kArgumentsCalleeIndex * kPointerSize; | |
2380 __ Str(function, FieldMemOperand(alloc_obj, kCalleeOffset)); | |
2381 | |
2382 // Use the length and set that as an in-object property. | |
2383 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); | |
2384 const int kLengthOffset = JSObject::kHeaderSize + | |
2385 Heap::kArgumentsLengthIndex * kPointerSize; | |
2386 __ Str(arg_count_smi, FieldMemOperand(alloc_obj, kLengthOffset)); | |
2387 | |
2388 // Set up the elements pointer in the allocated arguments object. | |
2389 // If we allocated a parameter map, "elements" will point there, otherwise | |
2390 // it will point to the backing store. | |
2391 | |
2392 // x0 alloc_obj pointer to allocated objects (param map, backing | |
2393 // store, arguments) | |
2394 // x1 mapped_params number of mapped parameters, min(params, args) | |
2395 // x2 arg_count number of function arguments | |
2396 // x3 arg_count_smi number of function arguments (smi) | |
2397 // x4 function function pointer | |
2398 // x5 elements pointer to parameter map or backing store (uninit) | |
2399 // x6 backing_store pointer to backing store (uninit) | |
2400 // x7 param_count number of function parameters | |
2401 // x14 recv_arg pointer to receiver arguments | |
2402 | |
2403 Register elements = x5; | |
2404 __ Add(elements, alloc_obj, Heap::kSloppyArgumentsObjectSize); | |
2405 __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset)); | |
2406 | |
2407 // Initialize parameter map. If there are no mapped arguments, we're done. | |
2408 Label skip_parameter_map; | |
2409 __ Cmp(mapped_params, 0); | |
2410 // Set up backing store address, because it is needed later for filling in | |
2411 // the unmapped arguments. | |
2412 Register backing_store = x6; | |
2413 __ CmovX(backing_store, elements, eq); | |
2414 __ B(eq, &skip_parameter_map); | |
2415 | |
2416 __ LoadRoot(x10, Heap::kSloppyArgumentsElementsMapRootIndex); | |
2417 __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset)); | |
2418 __ Add(x10, mapped_params, 2); | |
2419 __ SmiTag(x10); | |
2420 __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset)); | |
2421 __ Str(cp, FieldMemOperand(elements, | |
2422 FixedArray::kHeaderSize + 0 * kPointerSize)); | |
2423 __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2)); | |
2424 __ Add(x10, x10, kParameterMapHeaderSize); | |
2425 __ Str(x10, FieldMemOperand(elements, | |
2426 FixedArray::kHeaderSize + 1 * kPointerSize)); | |
2427 | |
2428 // Copy the parameter slots and the holes in the arguments. | |
2429 // We need to fill in mapped_parameter_count slots. Then index the context, | |
2430 // where parameters are stored in reverse order, at: | |
2431 // | |
2432 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1 | |
2433 // | |
2434 // The mapped parameter thus needs to get indices: | |
2435 // | |
2436 // MIN_CONTEXT_SLOTS + parameter_count - 1 .. | |
2437 // MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count | |
2438 // | |
2439 // We loop from right to left. | |
2440 | |
2441 // x0 alloc_obj pointer to allocated objects (param map, backing | |
2442 // store, arguments) | |
2443 // x1 mapped_params number of mapped parameters, min(params, args) | |
2444 // x2 arg_count number of function arguments | |
2445 // x3 arg_count_smi number of function arguments (smi) | |
2446 // x4 function function pointer | |
2447 // x5 elements pointer to parameter map or backing store (uninit) | |
2448 // x6 backing_store pointer to backing store (uninit) | |
2449 // x7 param_count number of function parameters | |
2450 // x11 loop_count parameter loop counter (uninit) | |
2451 // x12 index parameter index (smi, uninit) | |
2452 // x13 the_hole hole value (uninit) | |
2453 // x14 recv_arg pointer to receiver arguments | |
2454 | |
2455 Register loop_count = x11; | |
2456 Register index = x12; | |
2457 Register the_hole = x13; | |
2458 Label parameters_loop, parameters_test; | |
2459 __ Mov(loop_count, mapped_params); | |
2460 __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS)); | |
2461 __ Sub(index, index, mapped_params); | |
2462 __ SmiTag(index); | |
2463 __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex); | |
2464 __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2)); | |
2465 __ Add(backing_store, backing_store, kParameterMapHeaderSize); | |
2466 | |
2467 __ B(¶meters_test); | |
2468 | |
2469 __ Bind(¶meters_loop); | |
2470 __ Sub(loop_count, loop_count, 1); | |
2471 __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2)); | |
2472 __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag); | |
2473 __ Str(index, MemOperand(elements, x10)); | |
2474 __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize); | |
2475 __ Str(the_hole, MemOperand(backing_store, x10)); | |
2476 __ Add(index, index, Smi::FromInt(1)); | |
2477 __ Bind(¶meters_test); | |
2478 __ Cbnz(loop_count, ¶meters_loop); | |
2479 | |
2480 __ Bind(&skip_parameter_map); | |
2481 // Copy arguments header and remaining slots (if there are any.) | |
2482 __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex); | |
2483 __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset)); | |
2484 __ Str(arg_count_smi, FieldMemOperand(backing_store, | |
2485 FixedArray::kLengthOffset)); | |
2486 | |
2487 // x0 alloc_obj pointer to allocated objects (param map, backing | |
2488 // store, arguments) | |
2489 // x1 mapped_params number of mapped parameters, min(params, args) | |
2490 // x2 arg_count number of function arguments | |
2491 // x4 function function pointer | |
2492 // x3 arg_count_smi number of function arguments (smi) | |
2493 // x6 backing_store pointer to backing store (uninit) | |
2494 // x14 recv_arg pointer to receiver arguments | |
2495 | |
2496 Label arguments_loop, arguments_test; | |
2497 __ Mov(x10, mapped_params); | |
2498 __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2)); | |
2499 __ B(&arguments_test); | |
2500 | |
2501 __ Bind(&arguments_loop); | |
2502 __ Sub(recv_arg, recv_arg, kPointerSize); | |
2503 __ Ldr(x11, MemOperand(recv_arg)); | |
2504 __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2)); | |
2505 __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize)); | |
2506 __ Add(x10, x10, 1); | |
2507 | |
2508 __ Bind(&arguments_test); | |
2509 __ Cmp(x10, arg_count); | |
2510 __ B(lt, &arguments_loop); | |
2511 | |
2512 __ Ret(); | |
2513 | |
2514 // Do the runtime call to allocate the arguments object. | |
2515 __ Bind(&runtime); | |
2516 __ Push(function, recv_arg, arg_count_smi); | |
2517 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); | |
2518 } | |
2519 | |
2520 | |
2521 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { | |
2522 // Stack layout on entry. | |
2523 // jssp[0]: number of parameters (tagged) | |
2524 // jssp[8]: address of receiver argument | |
2525 // jssp[16]: function | |
2526 // | |
2527 // Returns pointer to result object in x0. | |
2528 | |
2529 // Get the stub arguments from the frame, and make an untagged copy of the | |
2530 // parameter count. | |
2531 Register param_count_smi = x1; | |
2532 Register params = x2; | |
2533 Register function = x3; | |
2534 Register param_count = x13; | |
2535 __ Pop(param_count_smi, params, function); | |
2536 __ SmiUntag(param_count, param_count_smi); | |
2537 | |
2538 // Test if arguments adaptor needed. | |
2539 Register caller_fp = x11; | |
2540 Register caller_ctx = x12; | |
2541 Label try_allocate, runtime; | |
2542 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
2543 __ Ldr(caller_ctx, MemOperand(caller_fp, | |
2544 StandardFrameConstants::kContextOffset)); | |
2545 __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); | |
2546 __ B(ne, &try_allocate); | |
2547 | |
2548 // x1 param_count_smi number of parameters passed to function (smi) | |
2549 // x2 params pointer to parameters | |
2550 // x3 function function pointer | |
2551 // x11 caller_fp caller's frame pointer | |
2552 // x13 param_count number of parameters passed to function | |
2553 | |
2554 // Patch the argument length and parameters pointer. | |
2555 __ Ldr(param_count_smi, | |
2556 MemOperand(caller_fp, | |
2557 ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
2558 __ SmiUntag(param_count, param_count_smi); | |
2559 __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2)); | |
2560 __ Add(params, x10, StandardFrameConstants::kCallerSPOffset); | |
2561 | |
2562 // Try the new space allocation. Start out with computing the size of the | |
2563 // arguments object and the elements array in words. | |
2564 Register size = x10; | |
2565 __ Bind(&try_allocate); | |
2566 __ Add(size, param_count, FixedArray::kHeaderSize / kPointerSize); | |
2567 __ Cmp(param_count, 0); | |
2568 __ CzeroX(size, eq); | |
2569 __ Add(size, size, Heap::kStrictArgumentsObjectSize / kPointerSize); | |
2570 | |
2571 // Do the allocation of both objects in one go. Assign this to x0, as it will | |
2572 // be returned to the caller. | |
2573 Register alloc_obj = x0; | |
2574 __ Allocate(size, alloc_obj, x11, x12, &runtime, | |
2575 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); | |
2576 | |
2577 // Get the arguments boilerplate from the current (native) context. | |
2578 Register global_object = x10; | |
2579 Register global_ctx = x10; | |
2580 Register args_offset = x4; | |
2581 __ Ldr(global_object, GlobalObjectMemOperand()); | |
2582 __ Ldr(global_ctx, FieldMemOperand(global_object, | |
2583 GlobalObject::kNativeContextOffset)); | |
2584 __ Ldr(args_offset, | |
2585 ContextMemOperand(global_ctx, | |
2586 Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX)); | |
2587 | |
2588 // x0 alloc_obj pointer to allocated objects: parameter array and | |
2589 // arguments object | |
2590 // x1 param_count_smi number of parameters passed to function (smi) | |
2591 // x2 params pointer to parameters | |
2592 // x3 function function pointer | |
2593 // x4 args_offset offset to arguments boilerplate | |
2594 // x13 param_count number of parameters passed to function | |
2595 | |
2596 // Copy the JS object part. | |
2597 __ CopyFields(alloc_obj, args_offset, CPURegList(x5, x6, x7), | |
2598 JSObject::kHeaderSize / kPointerSize); | |
2599 | |
2600 // Set the smi-tagged length as an in-object property. | |
2601 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); | |
2602 const int kLengthOffset = JSObject::kHeaderSize + | |
2603 Heap::kArgumentsLengthIndex * kPointerSize; | |
2604 __ Str(param_count_smi, FieldMemOperand(alloc_obj, kLengthOffset)); | |
2605 | |
2606 // If there are no actual arguments, we're done. | |
2607 Label done; | |
2608 __ Cbz(param_count, &done); | |
2609 | |
2610 // Set up the elements pointer in the allocated arguments object and | |
2611 // initialize the header in the elements fixed array. | |
2612 Register elements = x5; | |
2613 __ Add(elements, alloc_obj, Heap::kStrictArgumentsObjectSize); | |
2614 __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset)); | |
2615 __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex); | |
2616 __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset)); | |
2617 __ Str(param_count_smi, FieldMemOperand(elements, FixedArray::kLengthOffset)); | |
2618 | |
2619 // x0 alloc_obj pointer to allocated objects: parameter array and | |
2620 // arguments object | |
2621 // x1 param_count_smi number of parameters passed to function (smi) | |
2622 // x2 params pointer to parameters | |
2623 // x3 function function pointer | |
2624 // x4 array pointer to array slot (uninit) | |
2625 // x5 elements pointer to elements array of alloc_obj | |
2626 // x13 param_count number of parameters passed to function | |
2627 | |
2628 // Copy the fixed array slots. | |
2629 Label loop; | |
2630 Register array = x4; | |
2631 // Set up pointer to first array slot. | |
2632 __ Add(array, elements, FixedArray::kHeaderSize - kHeapObjectTag); | |
2633 | |
2634 __ Bind(&loop); | |
2635 // Pre-decrement the parameters pointer by kPointerSize on each iteration. | |
2636 // Pre-decrement in order to skip receiver. | |
2637 __ Ldr(x10, MemOperand(params, -kPointerSize, PreIndex)); | |
2638 // Post-increment elements by kPointerSize on each iteration. | |
2639 __ Str(x10, MemOperand(array, kPointerSize, PostIndex)); | |
2640 __ Sub(param_count, param_count, 1); | |
2641 __ Cbnz(param_count, &loop); | |
2642 | |
2643 // Return from stub. | |
2644 __ Bind(&done); | |
2645 __ Ret(); | |
2646 | |
2647 // Do the runtime call to allocate the arguments object. | |
2648 __ Bind(&runtime); | |
2649 __ Push(function, params, param_count_smi); | |
2650 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1); | |
2651 } | |
2652 | |
2653 | |
2654 void RegExpExecStub::Generate(MacroAssembler* masm) { | |
2655 #ifdef V8_INTERPRETED_REGEXP | |
2656 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); | |
2657 #else // V8_INTERPRETED_REGEXP | |
2658 | |
2659 // Stack frame on entry. | |
2660 // jssp[0]: last_match_info (expected JSArray) | |
2661 // jssp[8]: previous index | |
2662 // jssp[16]: subject string | |
2663 // jssp[24]: JSRegExp object | |
2664 Label runtime; | |
2665 | |
2666 // Use of registers for this function. | |
2667 | |
2668 // Variable registers: | |
2669 // x10-x13 used as scratch registers | |
2670 // w0 string_type type of subject string | |
2671 // x2 jsstring_length subject string length | |
2672 // x3 jsregexp_object JSRegExp object | |
2673 // w4 string_encoding ASCII or UC16 | |
2674 // w5 sliced_string_offset if the string is a SlicedString | |
2675 // offset to the underlying string | |
2676 // w6 string_representation groups attributes of the string: | |
2677 // - is a string | |
2678 // - type of the string | |
2679 // - is a short external string | |
2680 Register string_type = w0; | |
2681 Register jsstring_length = x2; | |
2682 Register jsregexp_object = x3; | |
2683 Register string_encoding = w4; | |
2684 Register sliced_string_offset = w5; | |
2685 Register string_representation = w6; | |
2686 | |
2687 // These are in callee save registers and will be preserved by the call | |
2688 // to the native RegExp code, as this code is called using the normal | |
2689 // C calling convention. When calling directly from generated code the | |
2690 // native RegExp code will not do a GC and therefore the content of | |
2691 // these registers are safe to use after the call. | |
2692 | |
2693 // x19 subject subject string | |
2694 // x20 regexp_data RegExp data (FixedArray) | |
2695 // x21 last_match_info_elements info relative to the last match | |
2696 // (FixedArray) | |
2697 // x22 code_object generated regexp code | |
2698 Register subject = x19; | |
2699 Register regexp_data = x20; | |
2700 Register last_match_info_elements = x21; | |
2701 Register code_object = x22; | |
2702 | |
2703 // TODO(jbramley): Is it necessary to preserve these? I don't think ARM does. | |
2704 CPURegList used_callee_saved_registers(subject, | |
2705 regexp_data, | |
2706 last_match_info_elements, | |
2707 code_object); | |
2708 __ PushCPURegList(used_callee_saved_registers); | |
2709 | |
2710 // Stack frame. | |
2711 // jssp[0] : x19 | |
2712 // jssp[8] : x20 | |
2713 // jssp[16]: x21 | |
2714 // jssp[24]: x22 | |
2715 // jssp[32]: last_match_info (JSArray) | |
2716 // jssp[40]: previous index | |
2717 // jssp[48]: subject string | |
2718 // jssp[56]: JSRegExp object | |
2719 | |
2720 const int kLastMatchInfoOffset = 4 * kPointerSize; | |
2721 const int kPreviousIndexOffset = 5 * kPointerSize; | |
2722 const int kSubjectOffset = 6 * kPointerSize; | |
2723 const int kJSRegExpOffset = 7 * kPointerSize; | |
2724 | |
2725 // Ensure that a RegExp stack is allocated. | |
2726 Isolate* isolate = masm->isolate(); | |
2727 ExternalReference address_of_regexp_stack_memory_address = | |
2728 ExternalReference::address_of_regexp_stack_memory_address(isolate); | |
2729 ExternalReference address_of_regexp_stack_memory_size = | |
2730 ExternalReference::address_of_regexp_stack_memory_size(isolate); | |
2731 __ Mov(x10, address_of_regexp_stack_memory_size); | |
2732 __ Ldr(x10, MemOperand(x10)); | |
2733 __ Cbz(x10, &runtime); | |
2734 | |
2735 // Check that the first argument is a JSRegExp object. | |
2736 ASSERT(jssp.Is(__ StackPointer())); | |
2737 __ Peek(jsregexp_object, kJSRegExpOffset); | |
2738 __ JumpIfSmi(jsregexp_object, &runtime); | |
2739 __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime); | |
2740 | |
2741 // Check that the RegExp has been compiled (data contains a fixed array). | |
2742 __ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset)); | |
2743 if (FLAG_debug_code) { | |
2744 STATIC_ASSERT(kSmiTag == 0); | |
2745 __ Tst(regexp_data, kSmiTagMask); | |
2746 __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected); | |
2747 __ CompareObjectType(regexp_data, x10, x10, FIXED_ARRAY_TYPE); | |
2748 __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected); | |
2749 } | |
2750 | |
2751 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. | |
2752 __ Ldr(x10, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); | |
2753 __ Cmp(x10, Smi::FromInt(JSRegExp::IRREGEXP)); | |
2754 __ B(ne, &runtime); | |
2755 | |
2756 // Check that the number of captures fit in the static offsets vector buffer. | |
2757 // We have always at least one capture for the whole match, plus additional | |
2758 // ones due to capturing parentheses. A capture takes 2 registers. | |
2759 // The number of capture registers then is (number_of_captures + 1) * 2. | |
2760 __ Ldrsw(x10, | |
2761 UntagSmiFieldMemOperand(regexp_data, | |
2762 JSRegExp::kIrregexpCaptureCountOffset)); | |
2763 // Check (number_of_captures + 1) * 2 <= offsets vector size | |
2764 // number_of_captures * 2 <= offsets vector size - 2 | |
2765 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2); | |
2766 __ Add(x10, x10, x10); | |
2767 __ Cmp(x10, Isolate::kJSRegexpStaticOffsetsVectorSize - 2); | |
2768 __ B(hi, &runtime); | |
2769 | |
2770 // Initialize offset for possibly sliced string. | |
2771 __ Mov(sliced_string_offset, 0); | |
2772 | |
2773 ASSERT(jssp.Is(__ StackPointer())); | |
2774 __ Peek(subject, kSubjectOffset); | |
2775 __ JumpIfSmi(subject, &runtime); | |
2776 | |
2777 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset)); | |
2778 __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset)); | |
2779 | |
2780 __ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset)); | |
2781 | |
2782 // Handle subject string according to its encoding and representation: | |
2783 // (1) Sequential string? If yes, go to (5). | |
2784 // (2) Anything but sequential or cons? If yes, go to (6). | |
2785 // (3) Cons string. If the string is flat, replace subject with first string. | |
2786 // Otherwise bailout. | |
2787 // (4) Is subject external? If yes, go to (7). | |
2788 // (5) Sequential string. Load regexp code according to encoding. | |
2789 // (E) Carry on. | |
2790 /// [...] | |
2791 | |
2792 // Deferred code at the end of the stub: | |
2793 // (6) Not a long external string? If yes, go to (8). | |
2794 // (7) External string. Make it, offset-wise, look like a sequential string. | |
2795 // Go to (5). | |
2796 // (8) Short external string or not a string? If yes, bail out to runtime. | |
2797 // (9) Sliced string. Replace subject with parent. Go to (4). | |
2798 | |
2799 Label check_underlying; // (4) | |
2800 Label seq_string; // (5) | |
2801 Label not_seq_nor_cons; // (6) | |
2802 Label external_string; // (7) | |
2803 Label not_long_external; // (8) | |
2804 | |
2805 // (1) Sequential string? If yes, go to (5). | |
2806 __ And(string_representation, | |
2807 string_type, | |
2808 kIsNotStringMask | | |
2809 kStringRepresentationMask | | |
2810 kShortExternalStringMask); | |
2811 // We depend on the fact that Strings of type | |
2812 // SeqString and not ShortExternalString are defined | |
2813 // by the following pattern: | |
2814 // string_type: 0XX0 XX00 | |
2815 // ^ ^ ^^ | |
2816 // | | || | |
2817 // | | is a SeqString | |
2818 // | is not a short external String | |
2819 // is a String | |
2820 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); | |
2821 STATIC_ASSERT(kShortExternalStringTag != 0); | |
2822 __ Cbz(string_representation, &seq_string); // Go to (5). | |
2823 | |
2824 // (2) Anything but sequential or cons? If yes, go to (6). | |
2825 STATIC_ASSERT(kConsStringTag < kExternalStringTag); | |
2826 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); | |
2827 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); | |
2828 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); | |
2829 __ Cmp(string_representation, kExternalStringTag); | |
2830 __ B(ge, ¬_seq_nor_cons); // Go to (6). | |
2831 | |
2832 // (3) Cons string. Check that it's flat. | |
2833 __ Ldr(x10, FieldMemOperand(subject, ConsString::kSecondOffset)); | |
2834 __ JumpIfNotRoot(x10, Heap::kempty_stringRootIndex, &runtime); | |
2835 // Replace subject with first string. | |
2836 __ Ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); | |
2837 | |
2838 // (4) Is subject external? If yes, go to (7). | |
2839 __ Bind(&check_underlying); | |
2840 // Reload the string type. | |
2841 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset)); | |
2842 __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset)); | |
2843 STATIC_ASSERT(kSeqStringTag == 0); | |
2844 // The underlying external string is never a short external string. | |
2845 STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength); | |
2846 STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength); | |
2847 __ TestAndBranchIfAnySet(string_type.X(), | |
2848 kStringRepresentationMask, | |
2849 &external_string); // Go to (7). | |
2850 | |
2851 // (5) Sequential string. Load regexp code according to encoding. | |
2852 __ Bind(&seq_string); | |
2853 | |
2854 // Check that the third argument is a positive smi less than the subject | |
2855 // string length. A negative value will be greater (unsigned comparison). | |
2856 ASSERT(jssp.Is(__ StackPointer())); | |
2857 __ Peek(x10, kPreviousIndexOffset); | |
2858 __ JumpIfNotSmi(x10, &runtime); | |
2859 __ Cmp(jsstring_length, x10); | |
2860 __ B(ls, &runtime); | |
2861 | |
2862 // Argument 2 (x1): We need to load argument 2 (the previous index) into x1 | |
2863 // before entering the exit frame. | |
2864 __ SmiUntag(x1, x10); | |
2865 | |
2866 // The third bit determines the string encoding in string_type. | |
2867 STATIC_ASSERT(kOneByteStringTag == 0x04); | |
2868 STATIC_ASSERT(kTwoByteStringTag == 0x00); | |
2869 STATIC_ASSERT(kStringEncodingMask == 0x04); | |
2870 | |
2871 // Find the code object based on the assumptions above. | |
2872 // kDataAsciiCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset | |
2873 // of kPointerSize to reach the latter. | |
2874 ASSERT_EQ(JSRegExp::kDataAsciiCodeOffset + kPointerSize, | |
2875 JSRegExp::kDataUC16CodeOffset); | |
2876 __ Mov(x10, kPointerSize); | |
2877 // We will need the encoding later: ASCII = 0x04 | |
2878 // UC16 = 0x00 | |
2879 __ Ands(string_encoding, string_type, kStringEncodingMask); | |
2880 __ CzeroX(x10, ne); | |
2881 __ Add(x10, regexp_data, x10); | |
2882 __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataAsciiCodeOffset)); | |
2883 | |
2884 // (E) Carry on. String handling is done. | |
2885 | |
2886 // Check that the irregexp code has been generated for the actual string | |
2887 // encoding. If it has, the field contains a code object otherwise it contains | |
2888 // a smi (code flushing support). | |
2889 __ JumpIfSmi(code_object, &runtime); | |
2890 | |
2891 // All checks done. Now push arguments for native regexp code. | |
2892 __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, | |
2893 x10, | |
2894 x11); | |
2895 | |
2896 // Isolates: note we add an additional parameter here (isolate pointer). | |
2897 __ EnterExitFrame(false, x10, 1); | |
2898 ASSERT(csp.Is(__ StackPointer())); | |
2899 | |
2900 // We have 9 arguments to pass to the regexp code, therefore we have to pass | |
2901 // one on the stack and the rest as registers. | |
2902 | |
2903 // Note that the placement of the argument on the stack isn't standard | |
2904 // AAPCS64: | |
2905 // csp[0]: Space for the return address placed by DirectCEntryStub. | |
2906 // csp[8]: Argument 9, the current isolate address. | |
2907 | |
2908 __ Mov(x10, ExternalReference::isolate_address(isolate)); | |
2909 __ Poke(x10, kPointerSize); | |
2910 | |
2911 Register length = w11; | |
2912 Register previous_index_in_bytes = w12; | |
2913 Register start = x13; | |
2914 | |
2915 // Load start of the subject string. | |
2916 __ Add(start, subject, SeqString::kHeaderSize - kHeapObjectTag); | |
2917 // Load the length from the original subject string from the previous stack | |
2918 // frame. Therefore we have to use fp, which points exactly to two pointer | |
2919 // sizes below the previous sp. (Because creating a new stack frame pushes | |
2920 // the previous fp onto the stack and decrements sp by 2 * kPointerSize.) | |
2921 __ Ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize)); | |
2922 __ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset)); | |
2923 | |
2924 // Handle UC16 encoding, two bytes make one character. | |
2925 // string_encoding: if ASCII: 0x04 | |
2926 // if UC16: 0x00 | |
2927 STATIC_ASSERT(kStringEncodingMask == 0x04); | |
2928 __ Ubfx(string_encoding, string_encoding, 2, 1); | |
2929 __ Eor(string_encoding, string_encoding, 1); | |
2930 // string_encoding: if ASCII: 0 | |
2931 // if UC16: 1 | |
2932 | |
2933 // Convert string positions from characters to bytes. | |
2934 // Previous index is in x1. | |
2935 __ Lsl(previous_index_in_bytes, w1, string_encoding); | |
2936 __ Lsl(length, length, string_encoding); | |
2937 __ Lsl(sliced_string_offset, sliced_string_offset, string_encoding); | |
2938 | |
2939 // Argument 1 (x0): Subject string. | |
2940 __ Mov(x0, subject); | |
2941 | |
2942 // Argument 2 (x1): Previous index, already there. | |
2943 | |
2944 // Argument 3 (x2): Get the start of input. | |
2945 // Start of input = start of string + previous index + substring offset | |
2946 // (0 if the string | |
2947 // is not sliced). | |
2948 __ Add(w10, previous_index_in_bytes, sliced_string_offset); | |
2949 __ Add(x2, start, Operand(w10, UXTW)); | |
2950 | |
2951 // Argument 4 (x3): | |
2952 // End of input = start of input + (length of input - previous index) | |
2953 __ Sub(w10, length, previous_index_in_bytes); | |
2954 __ Add(x3, x2, Operand(w10, UXTW)); | |
2955 | |
2956 // Argument 5 (x4): static offsets vector buffer. | |
2957 __ Mov(x4, ExternalReference::address_of_static_offsets_vector(isolate)); | |
2958 | |
2959 // Argument 6 (x5): Set the number of capture registers to zero to force | |
2960 // global regexps to behave as non-global. This stub is not used for global | |
2961 // regexps. | |
2962 __ Mov(x5, 0); | |
2963 | |
2964 // Argument 7 (x6): Start (high end) of backtracking stack memory area. | |
2965 __ Mov(x10, address_of_regexp_stack_memory_address); | |
2966 __ Ldr(x10, MemOperand(x10)); | |
2967 __ Mov(x11, address_of_regexp_stack_memory_size); | |
2968 __ Ldr(x11, MemOperand(x11)); | |
2969 __ Add(x6, x10, x11); | |
2970 | |
2971 // Argument 8 (x7): Indicate that this is a direct call from JavaScript. | |
2972 __ Mov(x7, 1); | |
2973 | |
2974 // Locate the code entry and call it. | |
2975 __ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag); | |
2976 DirectCEntryStub stub; | |
2977 stub.GenerateCall(masm, code_object); | |
2978 | |
2979 __ LeaveExitFrame(false, x10, true); | |
2980 | |
2981 // The generated regexp code returns an int32 in w0. | |
2982 Label failure, exception; | |
2983 __ CompareAndBranch(w0, NativeRegExpMacroAssembler::FAILURE, eq, &failure); | |
2984 __ CompareAndBranch(w0, | |
2985 NativeRegExpMacroAssembler::EXCEPTION, | |
2986 eq, | |
2987 &exception); | |
2988 __ CompareAndBranch(w0, NativeRegExpMacroAssembler::RETRY, eq, &runtime); | |
2989 | |
2990 // Success: process the result from the native regexp code. | |
2991 Register number_of_capture_registers = x12; | |
2992 | |
2993 // Calculate number of capture registers (number_of_captures + 1) * 2 | |
2994 // and store it in the last match info. | |
2995 __ Ldrsw(x10, | |
2996 UntagSmiFieldMemOperand(regexp_data, | |
2997 JSRegExp::kIrregexpCaptureCountOffset)); | |
2998 __ Add(x10, x10, x10); | |
2999 __ Add(number_of_capture_registers, x10, 2); | |
3000 | |
3001 // Check that the fourth object is a JSArray object. | |
3002 ASSERT(jssp.Is(__ StackPointer())); | |
3003 __ Peek(x10, kLastMatchInfoOffset); | |
3004 __ JumpIfSmi(x10, &runtime); | |
3005 __ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime); | |
3006 | |
3007 // Check that the JSArray is the fast case. | |
3008 __ Ldr(last_match_info_elements, | |
3009 FieldMemOperand(x10, JSArray::kElementsOffset)); | |
3010 __ Ldr(x10, | |
3011 FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); | |
3012 __ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime); | |
3013 | |
3014 // Check that the last match info has space for the capture registers and the | |
3015 // additional information (overhead). | |
3016 // (number_of_captures + 1) * 2 + overhead <= last match info size | |
3017 // (number_of_captures * 2) + 2 + overhead <= last match info size | |
3018 // number_of_capture_registers + overhead <= last match info size | |
3019 __ Ldrsw(x10, | |
3020 UntagSmiFieldMemOperand(last_match_info_elements, | |
3021 FixedArray::kLengthOffset)); | |
3022 __ Add(x11, number_of_capture_registers, RegExpImpl::kLastMatchOverhead); | |
3023 __ Cmp(x11, x10); | |
3024 __ B(gt, &runtime); | |
3025 | |
3026 // Store the capture count. | |
3027 __ SmiTag(x10, number_of_capture_registers); | |
3028 __ Str(x10, | |
3029 FieldMemOperand(last_match_info_elements, | |
3030 RegExpImpl::kLastCaptureCountOffset)); | |
3031 // Store last subject and last input. | |
3032 __ Str(subject, | |
3033 FieldMemOperand(last_match_info_elements, | |
3034 RegExpImpl::kLastSubjectOffset)); | |
3035 // Use x10 as the subject string in order to only need | |
3036 // one RecordWriteStub. | |
3037 __ Mov(x10, subject); | |
3038 __ RecordWriteField(last_match_info_elements, | |
3039 RegExpImpl::kLastSubjectOffset, | |
3040 x10, | |
3041 x11, | |
3042 kLRHasNotBeenSaved, | |
3043 kDontSaveFPRegs); | |
3044 __ Str(subject, | |
3045 FieldMemOperand(last_match_info_elements, | |
3046 RegExpImpl::kLastInputOffset)); | |
3047 __ Mov(x10, subject); | |
3048 __ RecordWriteField(last_match_info_elements, | |
3049 RegExpImpl::kLastInputOffset, | |
3050 x10, | |
3051 x11, | |
3052 kLRHasNotBeenSaved, | |
3053 kDontSaveFPRegs); | |
3054 | |
3055 Register last_match_offsets = x13; | |
3056 Register offsets_vector_index = x14; | |
3057 Register current_offset = x15; | |
3058 | |
3059 // Get the static offsets vector filled by the native regexp code | |
3060 // and fill the last match info. | |
3061 ExternalReference address_of_static_offsets_vector = | |
3062 ExternalReference::address_of_static_offsets_vector(isolate); | |
3063 __ Mov(offsets_vector_index, address_of_static_offsets_vector); | |
3064 | |
3065 Label next_capture, done; | |
3066 // Capture register counter starts from number of capture registers and | |
3067 // iterates down to zero (inclusive). | |
3068 __ Add(last_match_offsets, | |
3069 last_match_info_elements, | |
3070 RegExpImpl::kFirstCaptureOffset - kHeapObjectTag); | |
3071 __ Bind(&next_capture); | |
3072 __ Subs(number_of_capture_registers, number_of_capture_registers, 2); | |
3073 __ B(mi, &done); | |
3074 // Read two 32 bit values from the static offsets vector buffer into | |
3075 // an X register | |
3076 __ Ldr(current_offset, | |
3077 MemOperand(offsets_vector_index, kWRegSize * 2, PostIndex)); | |
3078 // Store the smi values in the last match info. | |
3079 __ SmiTag(x10, current_offset); | |
3080 // Clearing the 32 bottom bits gives us a Smi. | |
3081 STATIC_ASSERT(kSmiShift == 32); | |
3082 __ And(x11, current_offset, ~kWRegMask); | |
3083 __ Stp(x10, | |
3084 x11, | |
3085 MemOperand(last_match_offsets, kXRegSize * 2, PostIndex)); | |
3086 __ B(&next_capture); | |
3087 __ Bind(&done); | |
3088 | |
3089 // Return last match info. | |
3090 __ Peek(x0, kLastMatchInfoOffset); | |
3091 __ PopCPURegList(used_callee_saved_registers); | |
3092 // Drop the 4 arguments of the stub from the stack. | |
3093 __ Drop(4); | |
3094 __ Ret(); | |
3095 | |
3096 __ Bind(&exception); | |
3097 Register exception_value = x0; | |
3098 // A stack overflow (on the backtrack stack) may have occured | |
3099 // in the RegExp code but no exception has been created yet. | |
3100 // If there is no pending exception, handle that in the runtime system. | |
3101 __ Mov(x10, Operand(isolate->factory()->the_hole_value())); | |
3102 __ Mov(x11, | |
3103 Operand(ExternalReference(Isolate::kPendingExceptionAddress, | |
3104 isolate))); | |
3105 __ Ldr(exception_value, MemOperand(x11)); | |
3106 __ Cmp(x10, exception_value); | |
3107 __ B(eq, &runtime); | |
3108 | |
3109 __ Str(x10, MemOperand(x11)); // Clear pending exception. | |
3110 | |
3111 // Check if the exception is a termination. If so, throw as uncatchable. | |
3112 Label termination_exception; | |
3113 __ JumpIfRoot(exception_value, | |
3114 Heap::kTerminationExceptionRootIndex, | |
3115 &termination_exception); | |
3116 | |
3117 __ Throw(exception_value, x10, x11, x12, x13); | |
3118 | |
3119 __ Bind(&termination_exception); | |
3120 __ ThrowUncatchable(exception_value, x10, x11, x12, x13); | |
3121 | |
3122 __ Bind(&failure); | |
3123 __ Mov(x0, Operand(masm->isolate()->factory()->null_value())); | |
3124 __ PopCPURegList(used_callee_saved_registers); | |
3125 // Drop the 4 arguments of the stub from the stack. | |
3126 __ Drop(4); | |
3127 __ Ret(); | |
3128 | |
3129 __ Bind(&runtime); | |
3130 __ PopCPURegList(used_callee_saved_registers); | |
3131 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); | |
3132 | |
3133 // Deferred code for string handling. | |
3134 // (6) Not a long external string? If yes, go to (8). | |
3135 __ Bind(¬_seq_nor_cons); | |
3136 // Compare flags are still set. | |
3137 __ B(ne, ¬_long_external); // Go to (8). | |
3138 | |
3139 // (7) External string. Make it, offset-wise, look like a sequential string. | |
3140 __ Bind(&external_string); | |
3141 if (masm->emit_debug_code()) { | |
3142 // Assert that we do not have a cons or slice (indirect strings) here. | |
3143 // Sequential strings have already been ruled out. | |
3144 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset)); | |
3145 __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset)); | |
3146 __ Tst(x10, kIsIndirectStringMask); | |
3147 __ Check(eq, kExternalStringExpectedButNotFound); | |
3148 __ And(x10, x10, kStringRepresentationMask); | |
3149 __ Cmp(x10, 0); | |
3150 __ Check(ne, kExternalStringExpectedButNotFound); | |
3151 } | |
3152 __ Ldr(subject, | |
3153 FieldMemOperand(subject, ExternalString::kResourceDataOffset)); | |
3154 // Move the pointer so that offset-wise, it looks like a sequential string. | |
3155 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); | |
3156 __ Sub(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag); | |
3157 __ B(&seq_string); // Go to (5). | |
3158 | |
3159 // (8) If this is a short external string or not a string, bail out to | |
3160 // runtime. | |
3161 __ Bind(¬_long_external); | |
3162 STATIC_ASSERT(kShortExternalStringTag != 0); | |
3163 __ TestAndBranchIfAnySet(string_representation, | |
3164 kShortExternalStringMask | kIsNotStringMask, | |
3165 &runtime); | |
3166 | |
3167 // (9) Sliced string. Replace subject with parent. | |
3168 __ Ldr(sliced_string_offset, | |
3169 UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset)); | |
3170 __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); | |
3171 __ B(&check_underlying); // Go to (4). | |
3172 #endif | |
3173 } | |
3174 | |
3175 | |
3176 static void GenerateRecordCallTarget(MacroAssembler* masm, | |
3177 Register argc, | |
3178 Register function, | |
3179 Register feedback_vector, | |
3180 Register index, | |
3181 Register scratch1, | |
3182 Register scratch2) { | |
3183 ASM_LOCATION("GenerateRecordCallTarget"); | |
3184 ASSERT(!AreAliased(scratch1, scratch2, | |
3185 argc, function, feedback_vector, index)); | |
3186 // Cache the called function in a feedback vector slot. Cache states are | |
3187 // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic. | |
3188 // argc : number of arguments to the construct function | |
3189 // function : the function to call | |
3190 // feedback_vector : the feedback vector | |
3191 // index : slot in feedback vector (smi) | |
3192 Label initialize, done, miss, megamorphic, not_array_function; | |
3193 | |
3194 ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), | |
3195 masm->isolate()->heap()->megamorphic_symbol()); | |
3196 ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()), | |
3197 masm->isolate()->heap()->uninitialized_symbol()); | |
3198 | |
3199 // Load the cache state. | |
3200 __ Add(scratch1, feedback_vector, | |
3201 Operand::UntagSmiAndScale(index, kPointerSizeLog2)); | |
3202 __ Ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); | |
3203 | |
3204 // A monomorphic cache hit or an already megamorphic state: invoke the | |
3205 // function without changing the state. | |
3206 __ Cmp(scratch1, function); | |
3207 __ B(eq, &done); | |
3208 | |
3209 if (!FLAG_pretenuring_call_new) { | |
3210 // If we came here, we need to see if we are the array function. | |
3211 // If we didn't have a matching function, and we didn't find the megamorph | |
3212 // sentinel, then we have in the slot either some other function or an | |
3213 // AllocationSite. Do a map check on the object in scratch1 register. | |
3214 __ Ldr(scratch2, FieldMemOperand(scratch1, AllocationSite::kMapOffset)); | |
3215 __ JumpIfNotRoot(scratch2, Heap::kAllocationSiteMapRootIndex, &miss); | |
3216 | |
3217 // Make sure the function is the Array() function | |
3218 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1); | |
3219 __ Cmp(function, scratch1); | |
3220 __ B(ne, &megamorphic); | |
3221 __ B(&done); | |
3222 } | |
3223 | |
3224 __ Bind(&miss); | |
3225 | |
3226 // A monomorphic miss (i.e, here the cache is not uninitialized) goes | |
3227 // megamorphic. | |
3228 __ JumpIfRoot(scratch1, Heap::kUninitializedSymbolRootIndex, &initialize); | |
3229 // MegamorphicSentinel is an immortal immovable object (undefined) so no | |
3230 // write-barrier is needed. | |
3231 __ Bind(&megamorphic); | |
3232 __ Add(scratch1, feedback_vector, | |
3233 Operand::UntagSmiAndScale(index, kPointerSizeLog2)); | |
3234 __ LoadRoot(scratch2, Heap::kMegamorphicSymbolRootIndex); | |
3235 __ Str(scratch2, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); | |
3236 __ B(&done); | |
3237 | |
3238 // An uninitialized cache is patched with the function or sentinel to | |
3239 // indicate the ElementsKind if function is the Array constructor. | |
3240 __ Bind(&initialize); | |
3241 | |
3242 if (!FLAG_pretenuring_call_new) { | |
3243 // Make sure the function is the Array() function | |
3244 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1); | |
3245 __ Cmp(function, scratch1); | |
3246 __ B(ne, ¬_array_function); | |
3247 | |
3248 // The target function is the Array constructor, | |
3249 // Create an AllocationSite if we don't already have it, store it in the | |
3250 // slot. | |
3251 { | |
3252 FrameScope scope(masm, StackFrame::INTERNAL); | |
3253 CreateAllocationSiteStub create_stub; | |
3254 | |
3255 // Arguments register must be smi-tagged to call out. | |
3256 __ SmiTag(argc); | |
3257 __ Push(argc, function, feedback_vector, index); | |
3258 | |
3259 // CreateAllocationSiteStub expect the feedback vector in x2 and the slot | |
3260 // index in x3. | |
3261 ASSERT(feedback_vector.Is(x2) && index.Is(x3)); | |
3262 __ CallStub(&create_stub); | |
3263 | |
3264 __ Pop(index, feedback_vector, function, argc); | |
3265 __ SmiUntag(argc); | |
3266 } | |
3267 __ B(&done); | |
3268 | |
3269 __ Bind(¬_array_function); | |
3270 } | |
3271 | |
3272 // An uninitialized cache is patched with the function. | |
3273 | |
3274 __ Add(scratch1, feedback_vector, | |
3275 Operand::UntagSmiAndScale(index, kPointerSizeLog2)); | |
3276 __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag); | |
3277 __ Str(function, MemOperand(scratch1, 0)); | |
3278 | |
3279 __ Push(function); | |
3280 __ RecordWrite(feedback_vector, scratch1, function, kLRHasNotBeenSaved, | |
3281 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); | |
3282 __ Pop(function); | |
3283 | |
3284 __ Bind(&done); | |
3285 } | |
3286 | |
3287 | |
3288 void CallFunctionStub::Generate(MacroAssembler* masm) { | |
3289 ASM_LOCATION("CallFunctionStub::Generate"); | |
3290 // x1 function the function to call | |
3291 // x2 : feedback vector | |
3292 // x3 : slot in feedback vector (smi) (if x2 is not the megamorphic symbol) | |
3293 Register function = x1; | |
3294 Register cache_cell = x2; | |
3295 Register slot = x3; | |
3296 Register type = x4; | |
3297 Label slow, non_function, wrap, cont; | |
3298 | |
3299 // TODO(jbramley): This function has a lot of unnamed registers. Name them, | |
3300 // and tidy things up a bit. | |
3301 | |
3302 if (NeedsChecks()) { | |
3303 // Check that the function is really a JavaScript function. | |
3304 __ JumpIfSmi(function, &non_function); | |
3305 | |
3306 // Goto slow case if we do not have a function. | |
3307 __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow); | |
3308 | |
3309 if (RecordCallTarget()) { | |
3310 GenerateRecordCallTarget(masm, x0, function, cache_cell, slot, x4, x5); | |
3311 // Type information was updated. Because we may call Array, which | |
3312 // expects either undefined or an AllocationSite in ebx we need | |
3313 // to set ebx to undefined. | |
3314 __ LoadRoot(cache_cell, Heap::kUndefinedValueRootIndex); | |
3315 } | |
3316 } | |
3317 | |
3318 // Fast-case: Invoke the function now. | |
3319 // x1 function pushed function | |
3320 ParameterCount actual(argc_); | |
3321 | |
3322 if (CallAsMethod()) { | |
3323 if (NeedsChecks()) { | |
3324 // Do not transform the receiver for strict mode functions. | |
3325 __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); | |
3326 __ Ldr(w4, FieldMemOperand(x3, SharedFunctionInfo::kCompilerHintsOffset)); | |
3327 __ Tbnz(w4, SharedFunctionInfo::kStrictModeFunction, &cont); | |
3328 | |
3329 // Do not transform the receiver for native (Compilerhints already in x3). | |
3330 __ Tbnz(w4, SharedFunctionInfo::kNative, &cont); | |
3331 } | |
3332 | |
3333 // Compute the receiver in sloppy mode. | |
3334 __ Peek(x3, argc_ * kPointerSize); | |
3335 | |
3336 if (NeedsChecks()) { | |
3337 __ JumpIfSmi(x3, &wrap); | |
3338 __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt); | |
3339 } else { | |
3340 __ B(&wrap); | |
3341 } | |
3342 | |
3343 __ Bind(&cont); | |
3344 } | |
3345 __ InvokeFunction(function, | |
3346 actual, | |
3347 JUMP_FUNCTION, | |
3348 NullCallWrapper()); | |
3349 | |
3350 if (NeedsChecks()) { | |
3351 // Slow-case: Non-function called. | |
3352 __ Bind(&slow); | |
3353 if (RecordCallTarget()) { | |
3354 // If there is a call target cache, mark it megamorphic in the | |
3355 // non-function case. MegamorphicSentinel is an immortal immovable object | |
3356 // (megamorphic symbol) so no write barrier is needed. | |
3357 ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), | |
3358 masm->isolate()->heap()->megamorphic_symbol()); | |
3359 __ Add(x12, cache_cell, Operand::UntagSmiAndScale(slot, | |
3360 kPointerSizeLog2)); | |
3361 __ LoadRoot(x11, Heap::kMegamorphicSymbolRootIndex); | |
3362 __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize)); | |
3363 } | |
3364 // Check for function proxy. | |
3365 // x10 : function type. | |
3366 __ CompareAndBranch(type, JS_FUNCTION_PROXY_TYPE, ne, &non_function); | |
3367 __ Push(function); // put proxy as additional argument | |
3368 __ Mov(x0, argc_ + 1); | |
3369 __ Mov(x2, 0); | |
3370 __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY); | |
3371 { | |
3372 Handle<Code> adaptor = | |
3373 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); | |
3374 __ Jump(adaptor, RelocInfo::CODE_TARGET); | |
3375 } | |
3376 | |
3377 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead | |
3378 // of the original receiver from the call site). | |
3379 __ Bind(&non_function); | |
3380 __ Poke(function, argc_ * kXRegSize); | |
3381 __ Mov(x0, argc_); // Set up the number of arguments. | |
3382 __ Mov(x2, 0); | |
3383 __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION); | |
3384 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), | |
3385 RelocInfo::CODE_TARGET); | |
3386 } | |
3387 | |
3388 if (CallAsMethod()) { | |
3389 __ Bind(&wrap); | |
3390 // Wrap the receiver and patch it back onto the stack. | |
3391 { FrameScope frame_scope(masm, StackFrame::INTERNAL); | |
3392 __ Push(x1, x3); | |
3393 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); | |
3394 __ Pop(x1); | |
3395 } | |
3396 __ Poke(x0, argc_ * kPointerSize); | |
3397 __ B(&cont); | |
3398 } | |
3399 } | |
3400 | |
3401 | |
3402 void CallConstructStub::Generate(MacroAssembler* masm) { | |
3403 ASM_LOCATION("CallConstructStub::Generate"); | |
3404 // x0 : number of arguments | |
3405 // x1 : the function to call | |
3406 // x2 : feedback vector | |
3407 // x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol) | |
3408 Register function = x1; | |
3409 Label slow, non_function_call; | |
3410 | |
3411 // Check that the function is not a smi. | |
3412 __ JumpIfSmi(function, &non_function_call); | |
3413 // Check that the function is a JSFunction. | |
3414 Register object_type = x10; | |
3415 __ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE, | |
3416 &slow); | |
3417 | |
3418 if (RecordCallTarget()) { | |
3419 GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5); | |
3420 | |
3421 __ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2)); | |
3422 if (FLAG_pretenuring_call_new) { | |
3423 // Put the AllocationSite from the feedback vector into x2. | |
3424 // By adding kPointerSize we encode that we know the AllocationSite | |
3425 // entry is at the feedback vector slot given by x3 + 1. | |
3426 __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize + kPointerSize)); | |
3427 } else { | |
3428 Label feedback_register_initialized; | |
3429 // Put the AllocationSite from the feedback vector into x2, or undefined. | |
3430 __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize)); | |
3431 __ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset)); | |
3432 __ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex, | |
3433 &feedback_register_initialized); | |
3434 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex); | |
3435 __ bind(&feedback_register_initialized); | |
3436 } | |
3437 | |
3438 __ AssertUndefinedOrAllocationSite(x2, x5); | |
3439 } | |
3440 | |
3441 // Jump to the function-specific construct stub. | |
3442 Register jump_reg = x4; | |
3443 Register shared_func_info = jump_reg; | |
3444 Register cons_stub = jump_reg; | |
3445 Register cons_stub_code = jump_reg; | |
3446 __ Ldr(shared_func_info, | |
3447 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); | |
3448 __ Ldr(cons_stub, | |
3449 FieldMemOperand(shared_func_info, | |
3450 SharedFunctionInfo::kConstructStubOffset)); | |
3451 __ Add(cons_stub_code, cons_stub, Code::kHeaderSize - kHeapObjectTag); | |
3452 __ Br(cons_stub_code); | |
3453 | |
3454 Label do_call; | |
3455 __ Bind(&slow); | |
3456 __ Cmp(object_type, JS_FUNCTION_PROXY_TYPE); | |
3457 __ B(ne, &non_function_call); | |
3458 __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); | |
3459 __ B(&do_call); | |
3460 | |
3461 __ Bind(&non_function_call); | |
3462 __ GetBuiltinFunction(x1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); | |
3463 | |
3464 __ Bind(&do_call); | |
3465 // Set expected number of arguments to zero (not changing x0). | |
3466 __ Mov(x2, 0); | |
3467 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), | |
3468 RelocInfo::CODE_TARGET); | |
3469 } | |
3470 | |
3471 | |
3472 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { | |
3473 // If the receiver is a smi trigger the non-string case. | |
3474 __ JumpIfSmi(object_, receiver_not_string_); | |
3475 | |
3476 // Fetch the instance type of the receiver into result register. | |
3477 __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | |
3478 __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | |
3479 | |
3480 // If the receiver is not a string trigger the non-string case. | |
3481 __ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_); | |
3482 | |
3483 // If the index is non-smi trigger the non-smi case. | |
3484 __ JumpIfNotSmi(index_, &index_not_smi_); | |
3485 | |
3486 __ Bind(&got_smi_index_); | |
3487 // Check for index out of range. | |
3488 __ Ldrsw(result_, UntagSmiFieldMemOperand(object_, String::kLengthOffset)); | |
3489 __ Cmp(result_, Operand::UntagSmi(index_)); | |
3490 __ B(ls, index_out_of_range_); | |
3491 | |
3492 __ SmiUntag(index_); | |
3493 | |
3494 StringCharLoadGenerator::Generate(masm, | |
3495 object_, | |
3496 index_.W(), | |
3497 result_, | |
3498 &call_runtime_); | |
3499 __ SmiTag(result_); | |
3500 __ Bind(&exit_); | |
3501 } | |
3502 | |
3503 | |
3504 void StringCharCodeAtGenerator::GenerateSlow( | |
3505 MacroAssembler* masm, | |
3506 const RuntimeCallHelper& call_helper) { | |
3507 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase); | |
3508 | |
3509 __ Bind(&index_not_smi_); | |
3510 // If index is a heap number, try converting it to an integer. | |
3511 __ CheckMap(index_, | |
3512 result_, | |
3513 Heap::kHeapNumberMapRootIndex, | |
3514 index_not_number_, | |
3515 DONT_DO_SMI_CHECK); | |
3516 call_helper.BeforeCall(masm); | |
3517 // Save object_ on the stack and pass index_ as argument for runtime call. | |
3518 __ Push(object_, index_); | |
3519 if (index_flags_ == STRING_INDEX_IS_NUMBER) { | |
3520 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); | |
3521 } else { | |
3522 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); | |
3523 // NumberToSmi discards numbers that are not exact integers. | |
3524 __ CallRuntime(Runtime::kNumberToSmi, 1); | |
3525 } | |
3526 // Save the conversion result before the pop instructions below | |
3527 // have a chance to overwrite it. | |
3528 __ Mov(index_, x0); | |
3529 __ Pop(object_); | |
3530 // Reload the instance type. | |
3531 __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | |
3532 __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | |
3533 call_helper.AfterCall(masm); | |
3534 | |
3535 // If index is still not a smi, it must be out of range. | |
3536 __ JumpIfNotSmi(index_, index_out_of_range_); | |
3537 // Otherwise, return to the fast path. | |
3538 __ B(&got_smi_index_); | |
3539 | |
3540 // Call runtime. We get here when the receiver is a string and the | |
3541 // index is a number, but the code of getting the actual character | |
3542 // is too complex (e.g., when the string needs to be flattened). | |
3543 __ Bind(&call_runtime_); | |
3544 call_helper.BeforeCall(masm); | |
3545 __ SmiTag(index_); | |
3546 __ Push(object_, index_); | |
3547 __ CallRuntime(Runtime::kStringCharCodeAt, 2); | |
3548 __ Mov(result_, x0); | |
3549 call_helper.AfterCall(masm); | |
3550 __ B(&exit_); | |
3551 | |
3552 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase); | |
3553 } | |
3554 | |
3555 | |
3556 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { | |
3557 __ JumpIfNotSmi(code_, &slow_case_); | |
3558 __ Cmp(code_, Smi::FromInt(String::kMaxOneByteCharCode)); | |
3559 __ B(hi, &slow_case_); | |
3560 | |
3561 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); | |
3562 // At this point code register contains smi tagged ASCII char code. | |
3563 STATIC_ASSERT(kSmiShift > kPointerSizeLog2); | |
3564 __ Add(result_, result_, Operand(code_, LSR, kSmiShift - kPointerSizeLog2)); | |
3565 __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); | |
3566 __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_); | |
3567 __ Bind(&exit_); | |
3568 } | |
3569 | |
3570 | |
3571 void StringCharFromCodeGenerator::GenerateSlow( | |
3572 MacroAssembler* masm, | |
3573 const RuntimeCallHelper& call_helper) { | |
3574 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase); | |
3575 | |
3576 __ Bind(&slow_case_); | |
3577 call_helper.BeforeCall(masm); | |
3578 __ Push(code_); | |
3579 __ CallRuntime(Runtime::kCharFromCode, 1); | |
3580 __ Mov(result_, x0); | |
3581 call_helper.AfterCall(masm); | |
3582 __ B(&exit_); | |
3583 | |
3584 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase); | |
3585 } | |
3586 | |
3587 | |
3588 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { | |
3589 // Inputs are in x0 (lhs) and x1 (rhs). | |
3590 ASSERT(state_ == CompareIC::SMI); | |
3591 ASM_LOCATION("ICCompareStub[Smis]"); | |
3592 Label miss; | |
3593 // Bail out (to 'miss') unless both x0 and x1 are smis. | |
3594 __ JumpIfEitherNotSmi(x0, x1, &miss); | |
3595 | |
3596 if (GetCondition() == eq) { | |
3597 // For equality we do not care about the sign of the result. | |
3598 __ Sub(x0, x0, x1); | |
3599 } else { | |
3600 // Untag before subtracting to avoid handling overflow. | |
3601 __ SmiUntag(x1); | |
3602 __ Sub(x0, x1, Operand::UntagSmi(x0)); | |
3603 } | |
3604 __ Ret(); | |
3605 | |
3606 __ Bind(&miss); | |
3607 GenerateMiss(masm); | |
3608 } | |
3609 | |
3610 | |
3611 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { | |
3612 ASSERT(state_ == CompareIC::NUMBER); | |
3613 ASM_LOCATION("ICCompareStub[HeapNumbers]"); | |
3614 | |
3615 Label unordered, maybe_undefined1, maybe_undefined2; | |
3616 Label miss, handle_lhs, values_in_d_regs; | |
3617 Label untag_rhs, untag_lhs; | |
3618 | |
3619 Register result = x0; | |
3620 Register rhs = x0; | |
3621 Register lhs = x1; | |
3622 FPRegister rhs_d = d0; | |
3623 FPRegister lhs_d = d1; | |
3624 | |
3625 if (left_ == CompareIC::SMI) { | |
3626 __ JumpIfNotSmi(lhs, &miss); | |
3627 } | |
3628 if (right_ == CompareIC::SMI) { | |
3629 __ JumpIfNotSmi(rhs, &miss); | |
3630 } | |
3631 | |
3632 __ SmiUntagToDouble(rhs_d, rhs, kSpeculativeUntag); | |
3633 __ SmiUntagToDouble(lhs_d, lhs, kSpeculativeUntag); | |
3634 | |
3635 // Load rhs if it's a heap number. | |
3636 __ JumpIfSmi(rhs, &handle_lhs); | |
3637 __ CheckMap(rhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, | |
3638 DONT_DO_SMI_CHECK); | |
3639 __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | |
3640 | |
3641 // Load lhs if it's a heap number. | |
3642 __ Bind(&handle_lhs); | |
3643 __ JumpIfSmi(lhs, &values_in_d_regs); | |
3644 __ CheckMap(lhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, | |
3645 DONT_DO_SMI_CHECK); | |
3646 __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | |
3647 | |
3648 __ Bind(&values_in_d_regs); | |
3649 __ Fcmp(lhs_d, rhs_d); | |
3650 __ B(vs, &unordered); // Overflow flag set if either is NaN. | |
3651 STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1)); | |
3652 __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL). | |
3653 __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0. | |
3654 __ Ret(); | |
3655 | |
3656 __ Bind(&unordered); | |
3657 ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, | |
3658 CompareIC::GENERIC); | |
3659 __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); | |
3660 | |
3661 __ Bind(&maybe_undefined1); | |
3662 if (Token::IsOrderedRelationalCompareOp(op_)) { | |
3663 __ JumpIfNotRoot(rhs, Heap::kUndefinedValueRootIndex, &miss); | |
3664 __ JumpIfSmi(lhs, &unordered); | |
3665 __ JumpIfNotObjectType(lhs, x10, x10, HEAP_NUMBER_TYPE, &maybe_undefined2); | |
3666 __ B(&unordered); | |
3667 } | |
3668 | |
3669 __ Bind(&maybe_undefined2); | |
3670 if (Token::IsOrderedRelationalCompareOp(op_)) { | |
3671 __ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered); | |
3672 } | |
3673 | |
3674 __ Bind(&miss); | |
3675 GenerateMiss(masm); | |
3676 } | |
3677 | |
3678 | |
3679 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { | |
3680 ASSERT(state_ == CompareIC::INTERNALIZED_STRING); | |
3681 ASM_LOCATION("ICCompareStub[InternalizedStrings]"); | |
3682 Label miss; | |
3683 | |
3684 Register result = x0; | |
3685 Register rhs = x0; | |
3686 Register lhs = x1; | |
3687 | |
3688 // Check that both operands are heap objects. | |
3689 __ JumpIfEitherSmi(lhs, rhs, &miss); | |
3690 | |
3691 // Check that both operands are internalized strings. | |
3692 Register rhs_map = x10; | |
3693 Register lhs_map = x11; | |
3694 Register rhs_type = x10; | |
3695 Register lhs_type = x11; | |
3696 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset)); | |
3697 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset)); | |
3698 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset)); | |
3699 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset)); | |
3700 | |
3701 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0)); | |
3702 __ Orr(x12, lhs_type, rhs_type); | |
3703 __ TestAndBranchIfAnySet( | |
3704 x12, kIsNotStringMask | kIsNotInternalizedMask, &miss); | |
3705 | |
3706 // Internalized strings are compared by identity. | |
3707 STATIC_ASSERT(EQUAL == 0); | |
3708 __ Cmp(lhs, rhs); | |
3709 __ Cset(result, ne); | |
3710 __ Ret(); | |
3711 | |
3712 __ Bind(&miss); | |
3713 GenerateMiss(masm); | |
3714 } | |
3715 | |
3716 | |
3717 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { | |
3718 ASSERT(state_ == CompareIC::UNIQUE_NAME); | |
3719 ASM_LOCATION("ICCompareStub[UniqueNames]"); | |
3720 ASSERT(GetCondition() == eq); | |
3721 Label miss; | |
3722 | |
3723 Register result = x0; | |
3724 Register rhs = x0; | |
3725 Register lhs = x1; | |
3726 | |
3727 Register lhs_instance_type = w2; | |
3728 Register rhs_instance_type = w3; | |
3729 | |
3730 // Check that both operands are heap objects. | |
3731 __ JumpIfEitherSmi(lhs, rhs, &miss); | |
3732 | |
3733 // Check that both operands are unique names. This leaves the instance | |
3734 // types loaded in tmp1 and tmp2. | |
3735 __ Ldr(x10, FieldMemOperand(lhs, HeapObject::kMapOffset)); | |
3736 __ Ldr(x11, FieldMemOperand(rhs, HeapObject::kMapOffset)); | |
3737 __ Ldrb(lhs_instance_type, FieldMemOperand(x10, Map::kInstanceTypeOffset)); | |
3738 __ Ldrb(rhs_instance_type, FieldMemOperand(x11, Map::kInstanceTypeOffset)); | |
3739 | |
3740 // To avoid a miss, each instance type should be either SYMBOL_TYPE or it | |
3741 // should have kInternalizedTag set. | |
3742 __ JumpIfNotUniqueName(lhs_instance_type, &miss); | |
3743 __ JumpIfNotUniqueName(rhs_instance_type, &miss); | |
3744 | |
3745 // Unique names are compared by identity. | |
3746 STATIC_ASSERT(EQUAL == 0); | |
3747 __ Cmp(lhs, rhs); | |
3748 __ Cset(result, ne); | |
3749 __ Ret(); | |
3750 | |
3751 __ Bind(&miss); | |
3752 GenerateMiss(masm); | |
3753 } | |
3754 | |
3755 | |
3756 void ICCompareStub::GenerateStrings(MacroAssembler* masm) { | |
3757 ASSERT(state_ == CompareIC::STRING); | |
3758 ASM_LOCATION("ICCompareStub[Strings]"); | |
3759 | |
3760 Label miss; | |
3761 | |
3762 bool equality = Token::IsEqualityOp(op_); | |
3763 | |
3764 Register result = x0; | |
3765 Register rhs = x0; | |
3766 Register lhs = x1; | |
3767 | |
3768 // Check that both operands are heap objects. | |
3769 __ JumpIfEitherSmi(rhs, lhs, &miss); | |
3770 | |
3771 // Check that both operands are strings. | |
3772 Register rhs_map = x10; | |
3773 Register lhs_map = x11; | |
3774 Register rhs_type = x10; | |
3775 Register lhs_type = x11; | |
3776 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset)); | |
3777 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset)); | |
3778 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset)); | |
3779 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset)); | |
3780 STATIC_ASSERT(kNotStringTag != 0); | |
3781 __ Orr(x12, lhs_type, rhs_type); | |
3782 __ Tbnz(x12, MaskToBit(kIsNotStringMask), &miss); | |
3783 | |
3784 // Fast check for identical strings. | |
3785 Label not_equal; | |
3786 __ Cmp(lhs, rhs); | |
3787 __ B(ne, ¬_equal); | |
3788 __ Mov(result, EQUAL); | |
3789 __ Ret(); | |
3790 | |
3791 __ Bind(¬_equal); | |
3792 // Handle not identical strings | |
3793 | |
3794 // Check that both strings are internalized strings. If they are, we're done | |
3795 // because we already know they are not identical. We know they are both | |
3796 // strings. | |
3797 if (equality) { | |
3798 ASSERT(GetCondition() == eq); | |
3799 STATIC_ASSERT(kInternalizedTag == 0); | |
3800 Label not_internalized_strings; | |
3801 __ Orr(x12, lhs_type, rhs_type); | |
3802 __ TestAndBranchIfAnySet( | |
3803 x12, kIsNotInternalizedMask, ¬_internalized_strings); | |
3804 // Result is in rhs (x0), and not EQUAL, as rhs is not a smi. | |
3805 __ Ret(); | |
3806 __ Bind(¬_internalized_strings); | |
3807 } | |
3808 | |
3809 // Check that both strings are sequential ASCII. | |
3810 Label runtime; | |
3811 __ JumpIfBothInstanceTypesAreNotSequentialAscii( | |
3812 lhs_type, rhs_type, x12, x13, &runtime); | |
3813 | |
3814 // Compare flat ASCII strings. Returns when done. | |
3815 if (equality) { | |
3816 StringCompareStub::GenerateFlatAsciiStringEquals( | |
3817 masm, lhs, rhs, x10, x11, x12); | |
3818 } else { | |
3819 StringCompareStub::GenerateCompareFlatAsciiStrings( | |
3820 masm, lhs, rhs, x10, x11, x12, x13); | |
3821 } | |
3822 | |
3823 // Handle more complex cases in runtime. | |
3824 __ Bind(&runtime); | |
3825 __ Push(lhs, rhs); | |
3826 if (equality) { | |
3827 __ TailCallRuntime(Runtime::kStringEquals, 2, 1); | |
3828 } else { | |
3829 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); | |
3830 } | |
3831 | |
3832 __ Bind(&miss); | |
3833 GenerateMiss(masm); | |
3834 } | |
3835 | |
3836 | |
3837 void ICCompareStub::GenerateObjects(MacroAssembler* masm) { | |
3838 ASSERT(state_ == CompareIC::OBJECT); | |
3839 ASM_LOCATION("ICCompareStub[Objects]"); | |
3840 | |
3841 Label miss; | |
3842 | |
3843 Register result = x0; | |
3844 Register rhs = x0; | |
3845 Register lhs = x1; | |
3846 | |
3847 __ JumpIfEitherSmi(rhs, lhs, &miss); | |
3848 | |
3849 __ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss); | |
3850 __ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss); | |
3851 | |
3852 ASSERT(GetCondition() == eq); | |
3853 __ Sub(result, rhs, lhs); | |
3854 __ Ret(); | |
3855 | |
3856 __ Bind(&miss); | |
3857 GenerateMiss(masm); | |
3858 } | |
3859 | |
3860 | |
3861 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { | |
3862 ASM_LOCATION("ICCompareStub[KnownObjects]"); | |
3863 | |
3864 Label miss; | |
3865 | |
3866 Register result = x0; | |
3867 Register rhs = x0; | |
3868 Register lhs = x1; | |
3869 | |
3870 __ JumpIfEitherSmi(rhs, lhs, &miss); | |
3871 | |
3872 Register rhs_map = x10; | |
3873 Register lhs_map = x11; | |
3874 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset)); | |
3875 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset)); | |
3876 __ Cmp(rhs_map, Operand(known_map_)); | |
3877 __ B(ne, &miss); | |
3878 __ Cmp(lhs_map, Operand(known_map_)); | |
3879 __ B(ne, &miss); | |
3880 | |
3881 __ Sub(result, rhs, lhs); | |
3882 __ Ret(); | |
3883 | |
3884 __ Bind(&miss); | |
3885 GenerateMiss(masm); | |
3886 } | |
3887 | |
3888 | |
3889 // This method handles the case where a compare stub had the wrong | |
3890 // implementation. It calls a miss handler, which re-writes the stub. All other | |
3891 // ICCompareStub::Generate* methods should fall back into this one if their | |
3892 // operands were not the expected types. | |
3893 void ICCompareStub::GenerateMiss(MacroAssembler* masm) { | |
3894 ASM_LOCATION("ICCompareStub[Miss]"); | |
3895 | |
3896 Register stub_entry = x11; | |
3897 { | |
3898 ExternalReference miss = | |
3899 ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); | |
3900 | |
3901 FrameScope scope(masm, StackFrame::INTERNAL); | |
3902 Register op = x10; | |
3903 Register left = x1; | |
3904 Register right = x0; | |
3905 // Preserve some caller-saved registers. | |
3906 __ Push(x1, x0, lr); | |
3907 // Push the arguments. | |
3908 __ Mov(op, Smi::FromInt(op_)); | |
3909 __ Push(left, right, op); | |
3910 | |
3911 // Call the miss handler. This also pops the arguments. | |
3912 __ CallExternalReference(miss, 3); | |
3913 | |
3914 // Compute the entry point of the rewritten stub. | |
3915 __ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag); | |
3916 // Restore caller-saved registers. | |
3917 __ Pop(lr, x0, x1); | |
3918 } | |
3919 | |
3920 // Tail-call to the new stub. | |
3921 __ Jump(stub_entry); | |
3922 } | |
3923 | |
3924 | |
3925 void StringHelper::GenerateHashInit(MacroAssembler* masm, | |
3926 Register hash, | |
3927 Register character) { | |
3928 ASSERT(!AreAliased(hash, character)); | |
3929 | |
3930 // hash = character + (character << 10); | |
3931 __ LoadRoot(hash, Heap::kHashSeedRootIndex); | |
3932 // Untag smi seed and add the character. | |
3933 __ Add(hash, character, Operand(hash, LSR, kSmiShift)); | |
3934 | |
3935 // Compute hashes modulo 2^32 using a 32-bit W register. | |
3936 Register hash_w = hash.W(); | |
3937 | |
3938 // hash += hash << 10; | |
3939 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10)); | |
3940 // hash ^= hash >> 6; | |
3941 __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6)); | |
3942 } | |
3943 | |
3944 | |
3945 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, | |
3946 Register hash, | |
3947 Register character) { | |
3948 ASSERT(!AreAliased(hash, character)); | |
3949 | |
3950 // hash += character; | |
3951 __ Add(hash, hash, character); | |
3952 | |
3953 // Compute hashes modulo 2^32 using a 32-bit W register. | |
3954 Register hash_w = hash.W(); | |
3955 | |
3956 // hash += hash << 10; | |
3957 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10)); | |
3958 // hash ^= hash >> 6; | |
3959 __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6)); | |
3960 } | |
3961 | |
3962 | |
3963 void StringHelper::GenerateHashGetHash(MacroAssembler* masm, | |
3964 Register hash, | |
3965 Register scratch) { | |
3966 // Compute hashes modulo 2^32 using a 32-bit W register. | |
3967 Register hash_w = hash.W(); | |
3968 Register scratch_w = scratch.W(); | |
3969 ASSERT(!AreAliased(hash_w, scratch_w)); | |
3970 | |
3971 // hash += hash << 3; | |
3972 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 3)); | |
3973 // hash ^= hash >> 11; | |
3974 __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 11)); | |
3975 // hash += hash << 15; | |
3976 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 15)); | |
3977 | |
3978 __ Ands(hash_w, hash_w, String::kHashBitMask); | |
3979 | |
3980 // if (hash == 0) hash = 27; | |
3981 __ Mov(scratch_w, StringHasher::kZeroHash); | |
3982 __ Csel(hash_w, scratch_w, hash_w, eq); | |
3983 } | |
3984 | |
3985 | |
3986 void SubStringStub::Generate(MacroAssembler* masm) { | |
3987 ASM_LOCATION("SubStringStub::Generate"); | |
3988 Label runtime; | |
3989 | |
3990 // Stack frame on entry. | |
3991 // lr: return address | |
3992 // jssp[0]: substring "to" offset | |
3993 // jssp[8]: substring "from" offset | |
3994 // jssp[16]: pointer to string object | |
3995 | |
3996 // This stub is called from the native-call %_SubString(...), so | |
3997 // nothing can be assumed about the arguments. It is tested that: | |
3998 // "string" is a sequential string, | |
3999 // both "from" and "to" are smis, and | |
4000 // 0 <= from <= to <= string.length (in debug mode.) | |
4001 // If any of these assumptions fail, we call the runtime system. | |
4002 | |
4003 static const int kToOffset = 0 * kPointerSize; | |
4004 static const int kFromOffset = 1 * kPointerSize; | |
4005 static const int kStringOffset = 2 * kPointerSize; | |
4006 | |
4007 Register to = x0; | |
4008 Register from = x15; | |
4009 Register input_string = x10; | |
4010 Register input_length = x11; | |
4011 Register input_type = x12; | |
4012 Register result_string = x0; | |
4013 Register result_length = x1; | |
4014 Register temp = x3; | |
4015 | |
4016 __ Peek(to, kToOffset); | |
4017 __ Peek(from, kFromOffset); | |
4018 | |
4019 // Check that both from and to are smis. If not, jump to runtime. | |
4020 __ JumpIfEitherNotSmi(from, to, &runtime); | |
4021 __ SmiUntag(from); | |
4022 __ SmiUntag(to); | |
4023 | |
4024 // Calculate difference between from and to. If to < from, branch to runtime. | |
4025 __ Subs(result_length, to, from); | |
4026 __ B(mi, &runtime); | |
4027 | |
4028 // Check from is positive. | |
4029 __ Tbnz(from, kWSignBit, &runtime); | |
4030 | |
4031 // Make sure first argument is a string. | |
4032 __ Peek(input_string, kStringOffset); | |
4033 __ JumpIfSmi(input_string, &runtime); | |
4034 __ IsObjectJSStringType(input_string, input_type, &runtime); | |
4035 | |
4036 Label single_char; | |
4037 __ Cmp(result_length, 1); | |
4038 __ B(eq, &single_char); | |
4039 | |
4040 // Short-cut for the case of trivial substring. | |
4041 Label return_x0; | |
4042 __ Ldrsw(input_length, | |
4043 UntagSmiFieldMemOperand(input_string, String::kLengthOffset)); | |
4044 | |
4045 __ Cmp(result_length, input_length); | |
4046 __ CmovX(x0, input_string, eq); | |
4047 // Return original string. | |
4048 __ B(eq, &return_x0); | |
4049 | |
4050 // Longer than original string's length or negative: unsafe arguments. | |
4051 __ B(hi, &runtime); | |
4052 | |
4053 // Shorter than original string's length: an actual substring. | |
4054 | |
4055 // x0 to substring end character offset | |
4056 // x1 result_length length of substring result | |
4057 // x10 input_string pointer to input string object | |
4058 // x10 unpacked_string pointer to unpacked string object | |
4059 // x11 input_length length of input string | |
4060 // x12 input_type instance type of input string | |
4061 // x15 from substring start character offset | |
4062 | |
4063 // Deal with different string types: update the index if necessary and put | |
4064 // the underlying string into register unpacked_string. | |
4065 Label underlying_unpacked, sliced_string, seq_or_external_string; | |
4066 Label update_instance_type; | |
4067 // If the string is not indirect, it can only be sequential or external. | |
4068 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag)); | |
4069 STATIC_ASSERT(kIsIndirectStringMask != 0); | |
4070 | |
4071 // Test for string types, and branch/fall through to appropriate unpacking | |
4072 // code. | |
4073 __ Tst(input_type, kIsIndirectStringMask); | |
4074 __ B(eq, &seq_or_external_string); | |
4075 __ Tst(input_type, kSlicedNotConsMask); | |
4076 __ B(ne, &sliced_string); | |
4077 | |
4078 Register unpacked_string = input_string; | |
4079 | |
4080 // Cons string. Check whether it is flat, then fetch first part. | |
4081 __ Ldr(temp, FieldMemOperand(input_string, ConsString::kSecondOffset)); | |
4082 __ JumpIfNotRoot(temp, Heap::kempty_stringRootIndex, &runtime); | |
4083 __ Ldr(unpacked_string, | |
4084 FieldMemOperand(input_string, ConsString::kFirstOffset)); | |
4085 __ B(&update_instance_type); | |
4086 | |
4087 __ Bind(&sliced_string); | |
4088 // Sliced string. Fetch parent and correct start index by offset. | |
4089 __ Ldrsw(temp, | |
4090 UntagSmiFieldMemOperand(input_string, SlicedString::kOffsetOffset)); | |
4091 __ Add(from, from, temp); | |
4092 __ Ldr(unpacked_string, | |
4093 FieldMemOperand(input_string, SlicedString::kParentOffset)); | |
4094 | |
4095 __ Bind(&update_instance_type); | |
4096 __ Ldr(temp, FieldMemOperand(unpacked_string, HeapObject::kMapOffset)); | |
4097 __ Ldrb(input_type, FieldMemOperand(temp, Map::kInstanceTypeOffset)); | |
4098 // Now control must go to &underlying_unpacked. Since the no code is generated | |
4099 // before then we fall through instead of generating a useless branch. | |
4100 | |
4101 __ Bind(&seq_or_external_string); | |
4102 // Sequential or external string. Registers unpacked_string and input_string | |
4103 // alias, so there's nothing to do here. | |
4104 // Note that if code is added here, the above code must be updated. | |
4105 | |
4106 // x0 result_string pointer to result string object (uninit) | |
4107 // x1 result_length length of substring result | |
4108 // x10 unpacked_string pointer to unpacked string object | |
4109 // x11 input_length length of input string | |
4110 // x12 input_type instance type of input string | |
4111 // x15 from substring start character offset | |
4112 __ Bind(&underlying_unpacked); | |
4113 | |
4114 if (FLAG_string_slices) { | |
4115 Label copy_routine; | |
4116 __ Cmp(result_length, SlicedString::kMinLength); | |
4117 // Short slice. Copy instead of slicing. | |
4118 __ B(lt, ©_routine); | |
4119 // Allocate new sliced string. At this point we do not reload the instance | |
4120 // type including the string encoding because we simply rely on the info | |
4121 // provided by the original string. It does not matter if the original | |
4122 // string's encoding is wrong because we always have to recheck encoding of | |
4123 // the newly created string's parent anyway due to externalized strings. | |
4124 Label two_byte_slice, set_slice_header; | |
4125 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); | |
4126 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); | |
4127 __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_slice); | |
4128 __ AllocateAsciiSlicedString(result_string, result_length, x3, x4, | |
4129 &runtime); | |
4130 __ B(&set_slice_header); | |
4131 | |
4132 __ Bind(&two_byte_slice); | |
4133 __ AllocateTwoByteSlicedString(result_string, result_length, x3, x4, | |
4134 &runtime); | |
4135 | |
4136 __ Bind(&set_slice_header); | |
4137 __ SmiTag(from); | |
4138 __ Str(from, FieldMemOperand(result_string, SlicedString::kOffsetOffset)); | |
4139 __ Str(unpacked_string, | |
4140 FieldMemOperand(result_string, SlicedString::kParentOffset)); | |
4141 __ B(&return_x0); | |
4142 | |
4143 __ Bind(©_routine); | |
4144 } | |
4145 | |
4146 // x0 result_string pointer to result string object (uninit) | |
4147 // x1 result_length length of substring result | |
4148 // x10 unpacked_string pointer to unpacked string object | |
4149 // x11 input_length length of input string | |
4150 // x12 input_type instance type of input string | |
4151 // x13 unpacked_char0 pointer to first char of unpacked string (uninit) | |
4152 // x13 substring_char0 pointer to first char of substring (uninit) | |
4153 // x14 result_char0 pointer to first char of result (uninit) | |
4154 // x15 from substring start character offset | |
4155 Register unpacked_char0 = x13; | |
4156 Register substring_char0 = x13; | |
4157 Register result_char0 = x14; | |
4158 Label two_byte_sequential, sequential_string, allocate_result; | |
4159 STATIC_ASSERT(kExternalStringTag != 0); | |
4160 STATIC_ASSERT(kSeqStringTag == 0); | |
4161 | |
4162 __ Tst(input_type, kExternalStringTag); | |
4163 __ B(eq, &sequential_string); | |
4164 | |
4165 __ Tst(input_type, kShortExternalStringTag); | |
4166 __ B(ne, &runtime); | |
4167 __ Ldr(unpacked_char0, | |
4168 FieldMemOperand(unpacked_string, ExternalString::kResourceDataOffset)); | |
4169 // unpacked_char0 points to the first character of the underlying string. | |
4170 __ B(&allocate_result); | |
4171 | |
4172 __ Bind(&sequential_string); | |
4173 // Locate first character of underlying subject string. | |
4174 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); | |
4175 __ Add(unpacked_char0, unpacked_string, | |
4176 SeqOneByteString::kHeaderSize - kHeapObjectTag); | |
4177 | |
4178 __ Bind(&allocate_result); | |
4179 // Sequential ASCII string. Allocate the result. | |
4180 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); | |
4181 __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_sequential); | |
4182 | |
4183 // Allocate and copy the resulting ASCII string. | |
4184 __ AllocateAsciiString(result_string, result_length, x3, x4, x5, &runtime); | |
4185 | |
4186 // Locate first character of substring to copy. | |
4187 __ Add(substring_char0, unpacked_char0, from); | |
4188 | |
4189 // Locate first character of result. | |
4190 __ Add(result_char0, result_string, | |
4191 SeqOneByteString::kHeaderSize - kHeapObjectTag); | |
4192 | |
4193 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); | |
4194 __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong); | |
4195 __ B(&return_x0); | |
4196 | |
4197 // Allocate and copy the resulting two-byte string. | |
4198 __ Bind(&two_byte_sequential); | |
4199 __ AllocateTwoByteString(result_string, result_length, x3, x4, x5, &runtime); | |
4200 | |
4201 // Locate first character of substring to copy. | |
4202 __ Add(substring_char0, unpacked_char0, Operand(from, LSL, 1)); | |
4203 | |
4204 // Locate first character of result. | |
4205 __ Add(result_char0, result_string, | |
4206 SeqTwoByteString::kHeaderSize - kHeapObjectTag); | |
4207 | |
4208 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); | |
4209 __ Add(result_length, result_length, result_length); | |
4210 __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong); | |
4211 | |
4212 __ Bind(&return_x0); | |
4213 Counters* counters = masm->isolate()->counters(); | |
4214 __ IncrementCounter(counters->sub_string_native(), 1, x3, x4); | |
4215 __ Drop(3); | |
4216 __ Ret(); | |
4217 | |
4218 __ Bind(&runtime); | |
4219 __ TailCallRuntime(Runtime::kSubString, 3, 1); | |
4220 | |
4221 __ bind(&single_char); | |
4222 // x1: result_length | |
4223 // x10: input_string | |
4224 // x12: input_type | |
4225 // x15: from (untagged) | |
4226 __ SmiTag(from); | |
4227 StringCharAtGenerator generator( | |
4228 input_string, from, result_length, x0, | |
4229 &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER); | |
4230 generator.GenerateFast(masm); | |
4231 __ Drop(3); | |
4232 __ Ret(); | |
4233 generator.SkipSlow(masm, &runtime); | |
4234 } | |
4235 | |
4236 | |
4237 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, | |
4238 Register left, | |
4239 Register right, | |
4240 Register scratch1, | |
4241 Register scratch2, | |
4242 Register scratch3) { | |
4243 ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3)); | |
4244 Register result = x0; | |
4245 Register left_length = scratch1; | |
4246 Register right_length = scratch2; | |
4247 | |
4248 // Compare lengths. If lengths differ, strings can't be equal. Lengths are | |
4249 // smis, and don't need to be untagged. | |
4250 Label strings_not_equal, check_zero_length; | |
4251 __ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset)); | |
4252 __ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset)); | |
4253 __ Cmp(left_length, right_length); | |
4254 __ B(eq, &check_zero_length); | |
4255 | |
4256 __ Bind(&strings_not_equal); | |
4257 __ Mov(result, Smi::FromInt(NOT_EQUAL)); | |
4258 __ Ret(); | |
4259 | |
4260 // Check if the length is zero. If so, the strings must be equal (and empty.) | |
4261 Label compare_chars; | |
4262 __ Bind(&check_zero_length); | |
4263 STATIC_ASSERT(kSmiTag == 0); | |
4264 __ Cbnz(left_length, &compare_chars); | |
4265 __ Mov(result, Smi::FromInt(EQUAL)); | |
4266 __ Ret(); | |
4267 | |
4268 // Compare characters. Falls through if all characters are equal. | |
4269 __ Bind(&compare_chars); | |
4270 GenerateAsciiCharsCompareLoop(masm, left, right, left_length, scratch2, | |
4271 scratch3, &strings_not_equal); | |
4272 | |
4273 // Characters in strings are equal. | |
4274 __ Mov(result, Smi::FromInt(EQUAL)); | |
4275 __ Ret(); | |
4276 } | |
4277 | |
4278 | |
4279 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, | |
4280 Register left, | |
4281 Register right, | |
4282 Register scratch1, | |
4283 Register scratch2, | |
4284 Register scratch3, | |
4285 Register scratch4) { | |
4286 ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4)); | |
4287 Label result_not_equal, compare_lengths; | |
4288 | |
4289 // Find minimum length and length difference. | |
4290 Register length_delta = scratch3; | |
4291 __ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset)); | |
4292 __ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); | |
4293 __ Subs(length_delta, scratch1, scratch2); | |
4294 | |
4295 Register min_length = scratch1; | |
4296 __ Csel(min_length, scratch2, scratch1, gt); | |
4297 __ Cbz(min_length, &compare_lengths); | |
4298 | |
4299 // Compare loop. | |
4300 GenerateAsciiCharsCompareLoop(masm, | |
4301 left, right, min_length, scratch2, scratch4, | |
4302 &result_not_equal); | |
4303 | |
4304 // Compare lengths - strings up to min-length are equal. | |
4305 __ Bind(&compare_lengths); | |
4306 | |
4307 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); | |
4308 | |
4309 // Use length_delta as result if it's zero. | |
4310 Register result = x0; | |
4311 __ Subs(result, length_delta, 0); | |
4312 | |
4313 __ Bind(&result_not_equal); | |
4314 Register greater = x10; | |
4315 Register less = x11; | |
4316 __ Mov(greater, Smi::FromInt(GREATER)); | |
4317 __ Mov(less, Smi::FromInt(LESS)); | |
4318 __ CmovX(result, greater, gt); | |
4319 __ CmovX(result, less, lt); | |
4320 __ Ret(); | |
4321 } | |
4322 | |
4323 | |
4324 void StringCompareStub::GenerateAsciiCharsCompareLoop( | |
4325 MacroAssembler* masm, | |
4326 Register left, | |
4327 Register right, | |
4328 Register length, | |
4329 Register scratch1, | |
4330 Register scratch2, | |
4331 Label* chars_not_equal) { | |
4332 ASSERT(!AreAliased(left, right, length, scratch1, scratch2)); | |
4333 | |
4334 // Change index to run from -length to -1 by adding length to string | |
4335 // start. This means that loop ends when index reaches zero, which | |
4336 // doesn't need an additional compare. | |
4337 __ SmiUntag(length); | |
4338 __ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag); | |
4339 __ Add(left, left, scratch1); | |
4340 __ Add(right, right, scratch1); | |
4341 | |
4342 Register index = length; | |
4343 __ Neg(index, length); // index = -length; | |
4344 | |
4345 // Compare loop | |
4346 Label loop; | |
4347 __ Bind(&loop); | |
4348 __ Ldrb(scratch1, MemOperand(left, index)); | |
4349 __ Ldrb(scratch2, MemOperand(right, index)); | |
4350 __ Cmp(scratch1, scratch2); | |
4351 __ B(ne, chars_not_equal); | |
4352 __ Add(index, index, 1); | |
4353 __ Cbnz(index, &loop); | |
4354 } | |
4355 | |
4356 | |
4357 void StringCompareStub::Generate(MacroAssembler* masm) { | |
4358 Label runtime; | |
4359 | |
4360 Counters* counters = masm->isolate()->counters(); | |
4361 | |
4362 // Stack frame on entry. | |
4363 // sp[0]: right string | |
4364 // sp[8]: left string | |
4365 Register right = x10; | |
4366 Register left = x11; | |
4367 Register result = x0; | |
4368 __ Pop(right, left); | |
4369 | |
4370 Label not_same; | |
4371 __ Subs(result, right, left); | |
4372 __ B(ne, ¬_same); | |
4373 STATIC_ASSERT(EQUAL == 0); | |
4374 __ IncrementCounter(counters->string_compare_native(), 1, x3, x4); | |
4375 __ Ret(); | |
4376 | |
4377 __ Bind(¬_same); | |
4378 | |
4379 // Check that both objects are sequential ASCII strings. | |
4380 __ JumpIfEitherIsNotSequentialAsciiStrings(left, right, x12, x13, &runtime); | |
4381 | |
4382 // Compare flat ASCII strings natively. Remove arguments from stack first, | |
4383 // as this function will generate a return. | |
4384 __ IncrementCounter(counters->string_compare_native(), 1, x3, x4); | |
4385 GenerateCompareFlatAsciiStrings(masm, left, right, x12, x13, x14, x15); | |
4386 | |
4387 __ Bind(&runtime); | |
4388 | |
4389 // Push arguments back on to the stack. | |
4390 // sp[0] = right string | |
4391 // sp[8] = left string. | |
4392 __ Push(left, right); | |
4393 | |
4394 // Call the runtime. | |
4395 // Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer. | |
4396 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); | |
4397 } | |
4398 | |
4399 | |
4400 void ArrayPushStub::Generate(MacroAssembler* masm) { | |
4401 Register receiver = x0; | |
4402 | |
4403 int argc = arguments_count(); | |
4404 | |
4405 if (argc == 0) { | |
4406 // Nothing to do, just return the length. | |
4407 __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset)); | |
4408 __ Drop(argc + 1); | |
4409 __ Ret(); | |
4410 return; | |
4411 } | |
4412 | |
4413 Isolate* isolate = masm->isolate(); | |
4414 | |
4415 if (argc != 1) { | |
4416 __ TailCallExternalReference( | |
4417 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); | |
4418 return; | |
4419 } | |
4420 | |
4421 Label call_builtin, attempt_to_grow_elements, with_write_barrier; | |
4422 | |
4423 Register elements_length = x8; | |
4424 Register length = x7; | |
4425 Register elements = x6; | |
4426 Register end_elements = x5; | |
4427 Register value = x4; | |
4428 // Get the elements array of the object. | |
4429 __ Ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset)); | |
4430 | |
4431 if (IsFastSmiOrObjectElementsKind(elements_kind())) { | |
4432 // Check that the elements are in fast mode and writable. | |
4433 __ CheckMap(elements, | |
4434 x10, | |
4435 Heap::kFixedArrayMapRootIndex, | |
4436 &call_builtin, | |
4437 DONT_DO_SMI_CHECK); | |
4438 } | |
4439 | |
4440 // Get the array's length and calculate new length. | |
4441 __ Ldr(length, FieldMemOperand(receiver, JSArray::kLengthOffset)); | |
4442 STATIC_ASSERT(kSmiTag == 0); | |
4443 __ Add(length, length, Smi::FromInt(argc)); | |
4444 | |
4445 // Check if we could survive without allocation. | |
4446 __ Ldr(elements_length, | |
4447 FieldMemOperand(elements, FixedArray::kLengthOffset)); | |
4448 __ Cmp(length, elements_length); | |
4449 | |
4450 const int kEndElementsOffset = | |
4451 FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize; | |
4452 | |
4453 if (IsFastSmiOrObjectElementsKind(elements_kind())) { | |
4454 __ B(gt, &attempt_to_grow_elements); | |
4455 | |
4456 // Check if value is a smi. | |
4457 __ Peek(value, (argc - 1) * kPointerSize); | |
4458 __ JumpIfNotSmi(value, &with_write_barrier); | |
4459 | |
4460 // Store the value. | |
4461 // We may need a register containing the address end_elements below, | |
4462 // so write back the value in end_elements. | |
4463 __ Add(end_elements, elements, | |
4464 Operand::UntagSmiAndScale(length, kPointerSizeLog2)); | |
4465 __ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex)); | |
4466 } else { | |
4467 __ B(gt, &call_builtin); | |
4468 | |
4469 __ Peek(value, (argc - 1) * kPointerSize); | |
4470 __ StoreNumberToDoubleElements(value, length, elements, x10, d0, d1, | |
4471 &call_builtin, argc * kDoubleSize); | |
4472 } | |
4473 | |
4474 // Save new length. | |
4475 __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset)); | |
4476 | |
4477 // Return length. | |
4478 __ Drop(argc + 1); | |
4479 __ Mov(x0, length); | |
4480 __ Ret(); | |
4481 | |
4482 if (IsFastDoubleElementsKind(elements_kind())) { | |
4483 __ Bind(&call_builtin); | |
4484 __ TailCallExternalReference( | |
4485 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); | |
4486 return; | |
4487 } | |
4488 | |
4489 __ Bind(&with_write_barrier); | |
4490 | |
4491 if (IsFastSmiElementsKind(elements_kind())) { | |
4492 if (FLAG_trace_elements_transitions) { | |
4493 __ B(&call_builtin); | |
4494 } | |
4495 | |
4496 __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset)); | |
4497 __ JumpIfHeapNumber(x10, &call_builtin); | |
4498 | |
4499 ElementsKind target_kind = IsHoleyElementsKind(elements_kind()) | |
4500 ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS; | |
4501 __ Ldr(x10, GlobalObjectMemOperand()); | |
4502 __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kNativeContextOffset)); | |
4503 __ Ldr(x10, ContextMemOperand(x10, Context::JS_ARRAY_MAPS_INDEX)); | |
4504 const int header_size = FixedArrayBase::kHeaderSize; | |
4505 // Verify that the object can be transitioned in place. | |
4506 const int origin_offset = header_size + elements_kind() * kPointerSize; | |
4507 __ ldr(x11, FieldMemOperand(receiver, origin_offset)); | |
4508 __ ldr(x12, FieldMemOperand(x10, HeapObject::kMapOffset)); | |
4509 __ cmp(x11, x12); | |
4510 __ B(ne, &call_builtin); | |
4511 | |
4512 const int target_offset = header_size + target_kind * kPointerSize; | |
4513 __ Ldr(x10, FieldMemOperand(x10, target_offset)); | |
4514 __ Mov(x11, receiver); | |
4515 ElementsTransitionGenerator::GenerateMapChangeElementsTransition( | |
4516 masm, DONT_TRACK_ALLOCATION_SITE, NULL); | |
4517 } | |
4518 | |
4519 // Save new length. | |
4520 __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset)); | |
4521 | |
4522 // Store the value. | |
4523 // We may need a register containing the address end_elements below, | |
4524 // so write back the value in end_elements. | |
4525 __ Add(end_elements, elements, | |
4526 Operand::UntagSmiAndScale(length, kPointerSizeLog2)); | |
4527 __ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex)); | |
4528 | |
4529 __ RecordWrite(elements, | |
4530 end_elements, | |
4531 value, | |
4532 kLRHasNotBeenSaved, | |
4533 kDontSaveFPRegs, | |
4534 EMIT_REMEMBERED_SET, | |
4535 OMIT_SMI_CHECK); | |
4536 __ Drop(argc + 1); | |
4537 __ Mov(x0, length); | |
4538 __ Ret(); | |
4539 | |
4540 __ Bind(&attempt_to_grow_elements); | |
4541 | |
4542 if (!FLAG_inline_new) { | |
4543 __ B(&call_builtin); | |
4544 } | |
4545 | |
4546 Register argument = x2; | |
4547 __ Peek(argument, (argc - 1) * kPointerSize); | |
4548 // Growing elements that are SMI-only requires special handling in case | |
4549 // the new element is non-Smi. For now, delegate to the builtin. | |
4550 if (IsFastSmiElementsKind(elements_kind())) { | |
4551 __ JumpIfNotSmi(argument, &call_builtin); | |
4552 } | |
4553 | |
4554 // We could be lucky and the elements array could be at the top of new-space. | |
4555 // In this case we can just grow it in place by moving the allocation pointer | |
4556 // up. | |
4557 ExternalReference new_space_allocation_top = | |
4558 ExternalReference::new_space_allocation_top_address(isolate); | |
4559 ExternalReference new_space_allocation_limit = | |
4560 ExternalReference::new_space_allocation_limit_address(isolate); | |
4561 | |
4562 const int kAllocationDelta = 4; | |
4563 ASSERT(kAllocationDelta >= argc); | |
4564 Register allocation_top_addr = x5; | |
4565 Register allocation_top = x9; | |
4566 // Load top and check if it is the end of elements. | |
4567 __ Add(end_elements, elements, | |
4568 Operand::UntagSmiAndScale(length, kPointerSizeLog2)); | |
4569 __ Add(end_elements, end_elements, kEndElementsOffset); | |
4570 __ Mov(allocation_top_addr, new_space_allocation_top); | |
4571 __ Ldr(allocation_top, MemOperand(allocation_top_addr)); | |
4572 __ Cmp(end_elements, allocation_top); | |
4573 __ B(ne, &call_builtin); | |
4574 | |
4575 __ Mov(x10, new_space_allocation_limit); | |
4576 __ Ldr(x10, MemOperand(x10)); | |
4577 __ Add(allocation_top, allocation_top, kAllocationDelta * kPointerSize); | |
4578 __ Cmp(allocation_top, x10); | |
4579 __ B(hi, &call_builtin); | |
4580 | |
4581 // We fit and could grow elements. | |
4582 // Update new_space_allocation_top. | |
4583 __ Str(allocation_top, MemOperand(allocation_top_addr)); | |
4584 // Push the argument. | |
4585 __ Str(argument, MemOperand(end_elements)); | |
4586 // Fill the rest with holes. | |
4587 __ LoadRoot(x10, Heap::kTheHoleValueRootIndex); | |
4588 ASSERT(kAllocationDelta == 4); | |
4589 __ Stp(x10, x10, MemOperand(end_elements, 1 * kPointerSize)); | |
4590 __ Stp(x10, x10, MemOperand(end_elements, 3 * kPointerSize)); | |
4591 | |
4592 // Update elements' and array's sizes. | |
4593 __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset)); | |
4594 __ Add(elements_length, elements_length, Smi::FromInt(kAllocationDelta)); | |
4595 __ Str(elements_length, | |
4596 FieldMemOperand(elements, FixedArray::kLengthOffset)); | |
4597 | |
4598 // Elements are in new space, so write barrier is not required. | |
4599 __ Drop(argc + 1); | |
4600 __ Mov(x0, length); | |
4601 __ Ret(); | |
4602 | |
4603 __ Bind(&call_builtin); | |
4604 __ TailCallExternalReference( | |
4605 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); | |
4606 } | |
4607 | |
4608 | |
4609 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) { | |
4610 // ----------- S t a t e ------------- | |
4611 // -- x1 : left | |
4612 // -- x0 : right | |
4613 // -- lr : return address | |
4614 // ----------------------------------- | |
4615 Isolate* isolate = masm->isolate(); | |
4616 | |
4617 // Load x2 with the allocation site. We stick an undefined dummy value here | |
4618 // and replace it with the real allocation site later when we instantiate this | |
4619 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate(). | |
4620 __ LoadObject(x2, handle(isolate->heap()->undefined_value())); | |
4621 | |
4622 // Make sure that we actually patched the allocation site. | |
4623 if (FLAG_debug_code) { | |
4624 __ AssertNotSmi(x2, kExpectedAllocationSite); | |
4625 __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset)); | |
4626 __ AssertRegisterIsRoot(x10, Heap::kAllocationSiteMapRootIndex, | |
4627 kExpectedAllocationSite); | |
4628 } | |
4629 | |
4630 // Tail call into the stub that handles binary operations with allocation | |
4631 // sites. | |
4632 BinaryOpWithAllocationSiteStub stub(state_); | |
4633 __ TailCallStub(&stub); | |
4634 } | |
4635 | |
4636 | |
4637 bool CodeStub::CanUseFPRegisters() { | |
4638 // FP registers always available on A64. | |
4639 return true; | |
4640 } | |
4641 | |
4642 | |
4643 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { | |
4644 // We need some extra registers for this stub, they have been allocated | |
4645 // but we need to save them before using them. | |
4646 regs_.Save(masm); | |
4647 | |
4648 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { | |
4649 Label dont_need_remembered_set; | |
4650 | |
4651 Register value = regs_.scratch0(); | |
4652 __ Ldr(value, MemOperand(regs_.address())); | |
4653 __ JumpIfNotInNewSpace(value, &dont_need_remembered_set); | |
4654 | |
4655 __ CheckPageFlagSet(regs_.object(), | |
4656 value, | |
4657 1 << MemoryChunk::SCAN_ON_SCAVENGE, | |
4658 &dont_need_remembered_set); | |
4659 | |
4660 // First notify the incremental marker if necessary, then update the | |
4661 // remembered set. | |
4662 CheckNeedsToInformIncrementalMarker( | |
4663 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode); | |
4664 InformIncrementalMarker(masm); | |
4665 regs_.Restore(masm); // Restore the extra scratch registers we used. | |
4666 | |
4667 __ RememberedSetHelper(object_, | |
4668 address_, | |
4669 value_, // scratch1 | |
4670 save_fp_regs_mode_, | |
4671 MacroAssembler::kReturnAtEnd); | |
4672 | |
4673 __ Bind(&dont_need_remembered_set); | |
4674 } | |
4675 | |
4676 CheckNeedsToInformIncrementalMarker( | |
4677 masm, kReturnOnNoNeedToInformIncrementalMarker, mode); | |
4678 InformIncrementalMarker(masm); | |
4679 regs_.Restore(masm); // Restore the extra scratch registers we used. | |
4680 __ Ret(); | |
4681 } | |
4682 | |
4683 | |
4684 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) { | |
4685 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); | |
4686 Register address = | |
4687 x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address(); | |
4688 ASSERT(!address.Is(regs_.object())); | |
4689 ASSERT(!address.Is(x0)); | |
4690 __ Mov(address, regs_.address()); | |
4691 __ Mov(x0, regs_.object()); | |
4692 __ Mov(x1, address); | |
4693 __ Mov(x2, ExternalReference::isolate_address(masm->isolate())); | |
4694 | |
4695 AllowExternalCallThatCantCauseGC scope(masm); | |
4696 ExternalReference function = | |
4697 ExternalReference::incremental_marking_record_write_function( | |
4698 masm->isolate()); | |
4699 __ CallCFunction(function, 3, 0); | |
4700 | |
4701 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); | |
4702 } | |
4703 | |
4704 | |
4705 void RecordWriteStub::CheckNeedsToInformIncrementalMarker( | |
4706 MacroAssembler* masm, | |
4707 OnNoNeedToInformIncrementalMarker on_no_need, | |
4708 Mode mode) { | |
4709 Label on_black; | |
4710 Label need_incremental; | |
4711 Label need_incremental_pop_scratch; | |
4712 | |
4713 Register mem_chunk = regs_.scratch0(); | |
4714 Register counter = regs_.scratch1(); | |
4715 __ Bic(mem_chunk, regs_.object(), Page::kPageAlignmentMask); | |
4716 __ Ldr(counter, | |
4717 MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset)); | |
4718 __ Subs(counter, counter, 1); | |
4719 __ Str(counter, | |
4720 MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset)); | |
4721 __ B(mi, &need_incremental); | |
4722 | |
4723 // If the object is not black we don't have to inform the incremental marker. | |
4724 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); | |
4725 | |
4726 regs_.Restore(masm); // Restore the extra scratch registers we used. | |
4727 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { | |
4728 __ RememberedSetHelper(object_, | |
4729 address_, | |
4730 value_, // scratch1 | |
4731 save_fp_regs_mode_, | |
4732 MacroAssembler::kReturnAtEnd); | |
4733 } else { | |
4734 __ Ret(); | |
4735 } | |
4736 | |
4737 __ Bind(&on_black); | |
4738 // Get the value from the slot. | |
4739 Register value = regs_.scratch0(); | |
4740 __ Ldr(value, MemOperand(regs_.address())); | |
4741 | |
4742 if (mode == INCREMENTAL_COMPACTION) { | |
4743 Label ensure_not_white; | |
4744 | |
4745 __ CheckPageFlagClear(value, | |
4746 regs_.scratch1(), | |
4747 MemoryChunk::kEvacuationCandidateMask, | |
4748 &ensure_not_white); | |
4749 | |
4750 __ CheckPageFlagClear(regs_.object(), | |
4751 regs_.scratch1(), | |
4752 MemoryChunk::kSkipEvacuationSlotsRecordingMask, | |
4753 &need_incremental); | |
4754 | |
4755 __ Bind(&ensure_not_white); | |
4756 } | |
4757 | |
4758 // We need extra registers for this, so we push the object and the address | |
4759 // register temporarily. | |
4760 __ Push(regs_.address(), regs_.object()); | |
4761 __ EnsureNotWhite(value, | |
4762 regs_.scratch1(), // Scratch. | |
4763 regs_.object(), // Scratch. | |
4764 regs_.address(), // Scratch. | |
4765 regs_.scratch2(), // Scratch. | |
4766 &need_incremental_pop_scratch); | |
4767 __ Pop(regs_.object(), regs_.address()); | |
4768 | |
4769 regs_.Restore(masm); // Restore the extra scratch registers we used. | |
4770 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { | |
4771 __ RememberedSetHelper(object_, | |
4772 address_, | |
4773 value_, // scratch1 | |
4774 save_fp_regs_mode_, | |
4775 MacroAssembler::kReturnAtEnd); | |
4776 } else { | |
4777 __ Ret(); | |
4778 } | |
4779 | |
4780 __ Bind(&need_incremental_pop_scratch); | |
4781 __ Pop(regs_.object(), regs_.address()); | |
4782 | |
4783 __ Bind(&need_incremental); | |
4784 // Fall through when we need to inform the incremental marker. | |
4785 } | |
4786 | |
4787 | |
4788 void RecordWriteStub::Generate(MacroAssembler* masm) { | |
4789 Label skip_to_incremental_noncompacting; | |
4790 Label skip_to_incremental_compacting; | |
4791 | |
4792 // We patch these two first instructions back and forth between a nop and | |
4793 // real branch when we start and stop incremental heap marking. | |
4794 // Initially the stub is expected to be in STORE_BUFFER_ONLY mode, so 2 nops | |
4795 // are generated. | |
4796 // See RecordWriteStub::Patch for details. | |
4797 { | |
4798 InstructionAccurateScope scope(masm, 2); | |
4799 __ adr(xzr, &skip_to_incremental_noncompacting); | |
4800 __ adr(xzr, &skip_to_incremental_compacting); | |
4801 } | |
4802 | |
4803 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { | |
4804 __ RememberedSetHelper(object_, | |
4805 address_, | |
4806 value_, // scratch1 | |
4807 save_fp_regs_mode_, | |
4808 MacroAssembler::kReturnAtEnd); | |
4809 } | |
4810 __ Ret(); | |
4811 | |
4812 __ Bind(&skip_to_incremental_noncompacting); | |
4813 GenerateIncremental(masm, INCREMENTAL); | |
4814 | |
4815 __ Bind(&skip_to_incremental_compacting); | |
4816 GenerateIncremental(masm, INCREMENTAL_COMPACTION); | |
4817 } | |
4818 | |
4819 | |
4820 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { | |
4821 // x0 value element value to store | |
4822 // x3 index_smi element index as smi | |
4823 // sp[0] array_index_smi array literal index in function as smi | |
4824 // sp[1] array array literal | |
4825 | |
4826 Register value = x0; | |
4827 Register index_smi = x3; | |
4828 | |
4829 Register array = x1; | |
4830 Register array_map = x2; | |
4831 Register array_index_smi = x4; | |
4832 __ PeekPair(array_index_smi, array, 0); | |
4833 __ Ldr(array_map, FieldMemOperand(array, JSObject::kMapOffset)); | |
4834 | |
4835 Label double_elements, smi_element, fast_elements, slow_elements; | |
4836 Register bitfield2 = x10; | |
4837 __ Ldrb(bitfield2, FieldMemOperand(array_map, Map::kBitField2Offset)); | |
4838 | |
4839 // Jump if array's ElementsKind is not FAST*_SMI_ELEMENTS, FAST_ELEMENTS or | |
4840 // FAST_HOLEY_ELEMENTS. | |
4841 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | |
4842 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | |
4843 STATIC_ASSERT(FAST_ELEMENTS == 2); | |
4844 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); | |
4845 __ Cmp(bitfield2, Map::kMaximumBitField2FastHoleyElementValue); | |
4846 __ B(hi, &double_elements); | |
4847 | |
4848 __ JumpIfSmi(value, &smi_element); | |
4849 | |
4850 // Jump if array's ElementsKind is not FAST_ELEMENTS or FAST_HOLEY_ELEMENTS. | |
4851 __ Tbnz(bitfield2, MaskToBit(FAST_ELEMENTS << Map::kElementsKindShift), | |
4852 &fast_elements); | |
4853 | |
4854 // Store into the array literal requires an elements transition. Call into | |
4855 // the runtime. | |
4856 __ Bind(&slow_elements); | |
4857 __ Push(array, index_smi, value); | |
4858 __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | |
4859 __ Ldr(x11, FieldMemOperand(x10, JSFunction::kLiteralsOffset)); | |
4860 __ Push(x11, array_index_smi); | |
4861 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); | |
4862 | |
4863 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. | |
4864 __ Bind(&fast_elements); | |
4865 __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset)); | |
4866 __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2)); | |
4867 __ Add(x11, x11, FixedArray::kHeaderSize - kHeapObjectTag); | |
4868 __ Str(value, MemOperand(x11)); | |
4869 // Update the write barrier for the array store. | |
4870 __ RecordWrite(x10, x11, value, kLRHasNotBeenSaved, kDontSaveFPRegs, | |
4871 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); | |
4872 __ Ret(); | |
4873 | |
4874 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, | |
4875 // and value is Smi. | |
4876 __ Bind(&smi_element); | |
4877 __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset)); | |
4878 __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2)); | |
4879 __ Str(value, FieldMemOperand(x11, FixedArray::kHeaderSize)); | |
4880 __ Ret(); | |
4881 | |
4882 __ Bind(&double_elements); | |
4883 __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset)); | |
4884 __ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0, d1, | |
4885 &slow_elements); | |
4886 __ Ret(); | |
4887 } | |
4888 | |
4889 | |
4890 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { | |
4891 CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs); | |
4892 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); | |
4893 int parameter_count_offset = | |
4894 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; | |
4895 __ Ldr(x1, MemOperand(fp, parameter_count_offset)); | |
4896 if (function_mode_ == JS_FUNCTION_STUB_MODE) { | |
4897 __ Add(x1, x1, 1); | |
4898 } | |
4899 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); | |
4900 __ Drop(x1); | |
4901 // Return to IC Miss stub, continuation still on stack. | |
4902 __ Ret(); | |
4903 } | |
4904 | |
4905 | |
4906 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { | |
4907 if (masm->isolate()->function_entry_hook() != NULL) { | |
4908 // TODO(all): This needs to be reliably consistent with | |
4909 // kReturnAddressDistanceFromFunctionStart in ::Generate. | |
4910 Assembler::BlockPoolsScope no_pools(masm); | |
4911 ProfileEntryHookStub stub; | |
4912 __ Push(lr); | |
4913 __ CallStub(&stub); | |
4914 __ Pop(lr); | |
4915 } | |
4916 } | |
4917 | |
4918 | |
4919 void ProfileEntryHookStub::Generate(MacroAssembler* masm) { | |
4920 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm); | |
4921 // The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by | |
4922 // a "Push lr" instruction, followed by a call. | |
4923 static const int kReturnAddressDistanceFromFunctionStart = | |
4924 Assembler::kCallSizeWithRelocation + (2 * kInstructionSize); | |
4925 | |
4926 // Save all kCallerSaved registers (including lr), since this can be called | |
4927 // from anywhere. | |
4928 // TODO(jbramley): What about FP registers? | |
4929 __ PushCPURegList(kCallerSaved); | |
4930 ASSERT(kCallerSaved.IncludesAliasOf(lr)); | |
4931 const int kNumSavedRegs = kCallerSaved.Count(); | |
4932 | |
4933 // Compute the function's address as the first argument. | |
4934 __ Sub(x0, lr, kReturnAddressDistanceFromFunctionStart); | |
4935 | |
4936 #if V8_HOST_ARCH_A64 | |
4937 uintptr_t entry_hook = | |
4938 reinterpret_cast<uintptr_t>(masm->isolate()->function_entry_hook()); | |
4939 __ Mov(x10, entry_hook); | |
4940 #else | |
4941 // Under the simulator we need to indirect the entry hook through a trampoline | |
4942 // function at a known address. | |
4943 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline)); | |
4944 __ Mov(x10, Operand(ExternalReference(&dispatcher, | |
4945 ExternalReference::BUILTIN_CALL, | |
4946 masm->isolate()))); | |
4947 // It additionally takes an isolate as a third parameter | |
4948 __ Mov(x2, ExternalReference::isolate_address(masm->isolate())); | |
4949 #endif | |
4950 | |
4951 // The caller's return address is above the saved temporaries. | |
4952 // Grab its location for the second argument to the hook. | |
4953 __ Add(x1, __ StackPointer(), kNumSavedRegs * kPointerSize); | |
4954 | |
4955 { | |
4956 // Create a dummy frame, as CallCFunction requires this. | |
4957 FrameScope frame(masm, StackFrame::MANUAL); | |
4958 __ CallCFunction(x10, 2, 0); | |
4959 } | |
4960 | |
4961 __ PopCPURegList(kCallerSaved); | |
4962 __ Ret(); | |
4963 } | |
4964 | |
4965 | |
4966 void DirectCEntryStub::Generate(MacroAssembler* masm) { | |
4967 // When calling into C++ code the stack pointer must be csp. | |
4968 // Therefore this code must use csp for peek/poke operations when the | |
4969 // stub is generated. When the stub is called | |
4970 // (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame | |
4971 // and configure the stack pointer *before* doing the call. | |
4972 const Register old_stack_pointer = __ StackPointer(); | |
4973 __ SetStackPointer(csp); | |
4974 | |
4975 // Put return address on the stack (accessible to GC through exit frame pc). | |
4976 __ Poke(lr, 0); | |
4977 // Call the C++ function. | |
4978 __ Blr(x10); | |
4979 // Return to calling code. | |
4980 __ Peek(lr, 0); | |
4981 __ Ret(); | |
4982 | |
4983 __ SetStackPointer(old_stack_pointer); | |
4984 } | |
4985 | |
4986 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, | |
4987 Register target) { | |
4988 // Make sure the caller configured the stack pointer (see comment in | |
4989 // DirectCEntryStub::Generate). | |
4990 ASSERT(csp.Is(__ StackPointer())); | |
4991 | |
4992 intptr_t code = | |
4993 reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location()); | |
4994 __ Mov(lr, Operand(code, RelocInfo::CODE_TARGET)); | |
4995 __ Mov(x10, target); | |
4996 // Branch to the stub. | |
4997 __ Blr(lr); | |
4998 } | |
4999 | |
5000 | |
5001 // Probe the name dictionary in the 'elements' register. | |
5002 // Jump to the 'done' label if a property with the given name is found. | |
5003 // Jump to the 'miss' label otherwise. | |
5004 // | |
5005 // If lookup was successful 'scratch2' will be equal to elements + 4 * index. | |
5006 // 'elements' and 'name' registers are preserved on miss. | |
5007 void NameDictionaryLookupStub::GeneratePositiveLookup( | |
5008 MacroAssembler* masm, | |
5009 Label* miss, | |
5010 Label* done, | |
5011 Register elements, | |
5012 Register name, | |
5013 Register scratch1, | |
5014 Register scratch2) { | |
5015 ASSERT(!AreAliased(elements, name, scratch1, scratch2)); | |
5016 | |
5017 // Assert that name contains a string. | |
5018 __ AssertName(name); | |
5019 | |
5020 // Compute the capacity mask. | |
5021 __ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset)); | |
5022 __ Sub(scratch1, scratch1, 1); | |
5023 | |
5024 // Generate an unrolled loop that performs a few probes before giving up. | |
5025 for (int i = 0; i < kInlinedProbes; i++) { | |
5026 // Compute the masked index: (hash + i + i * i) & mask. | |
5027 __ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset)); | |
5028 if (i > 0) { | |
5029 // Add the probe offset (i + i * i) left shifted to avoid right shifting | |
5030 // the hash in a separate instruction. The value hash + i + i * i is right | |
5031 // shifted in the following and instruction. | |
5032 ASSERT(NameDictionary::GetProbeOffset(i) < | |
5033 1 << (32 - Name::kHashFieldOffset)); | |
5034 __ Add(scratch2, scratch2, Operand( | |
5035 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); | |
5036 } | |
5037 __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift)); | |
5038 | |
5039 // Scale the index by multiplying by the element size. | |
5040 ASSERT(NameDictionary::kEntrySize == 3); | |
5041 __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1)); | |
5042 | |
5043 // Check if the key is identical to the name. | |
5044 UseScratchRegisterScope temps(masm); | |
5045 Register scratch3 = temps.AcquireX(); | |
5046 __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2)); | |
5047 __ Ldr(scratch3, FieldMemOperand(scratch2, kElementsStartOffset)); | |
5048 __ Cmp(name, scratch3); | |
5049 __ B(eq, done); | |
5050 } | |
5051 | |
5052 // The inlined probes didn't find the entry. | |
5053 // Call the complete stub to scan the whole dictionary. | |
5054 | |
5055 CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6); | |
5056 spill_list.Combine(lr); | |
5057 spill_list.Remove(scratch1); | |
5058 spill_list.Remove(scratch2); | |
5059 | |
5060 __ PushCPURegList(spill_list); | |
5061 | |
5062 if (name.is(x0)) { | |
5063 ASSERT(!elements.is(x1)); | |
5064 __ Mov(x1, name); | |
5065 __ Mov(x0, elements); | |
5066 } else { | |
5067 __ Mov(x0, elements); | |
5068 __ Mov(x1, name); | |
5069 } | |
5070 | |
5071 Label not_found; | |
5072 NameDictionaryLookupStub stub(POSITIVE_LOOKUP); | |
5073 __ CallStub(&stub); | |
5074 __ Cbz(x0, ¬_found); | |
5075 __ Mov(scratch2, x2); // Move entry index into scratch2. | |
5076 __ PopCPURegList(spill_list); | |
5077 __ B(done); | |
5078 | |
5079 __ Bind(¬_found); | |
5080 __ PopCPURegList(spill_list); | |
5081 __ B(miss); | |
5082 } | |
5083 | |
5084 | |
5085 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, | |
5086 Label* miss, | |
5087 Label* done, | |
5088 Register receiver, | |
5089 Register properties, | |
5090 Handle<Name> name, | |
5091 Register scratch0) { | |
5092 ASSERT(!AreAliased(receiver, properties, scratch0)); | |
5093 ASSERT(name->IsUniqueName()); | |
5094 // If names of slots in range from 1 to kProbes - 1 for the hash value are | |
5095 // not equal to the name and kProbes-th slot is not used (its name is the | |
5096 // undefined value), it guarantees the hash table doesn't contain the | |
5097 // property. It's true even if some slots represent deleted properties | |
5098 // (their names are the hole value). | |
5099 for (int i = 0; i < kInlinedProbes; i++) { | |
5100 // scratch0 points to properties hash. | |
5101 // Compute the masked index: (hash + i + i * i) & mask. | |
5102 Register index = scratch0; | |
5103 // Capacity is smi 2^n. | |
5104 __ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset)); | |
5105 __ Sub(index, index, 1); | |
5106 __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i)); | |
5107 | |
5108 // Scale the index by multiplying by the entry size. | |
5109 ASSERT(NameDictionary::kEntrySize == 3); | |
5110 __ Add(index, index, Operand(index, LSL, 1)); // index *= 3. | |
5111 | |
5112 Register entity_name = scratch0; | |
5113 // Having undefined at this place means the name is not contained. | |
5114 Register tmp = index; | |
5115 __ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2)); | |
5116 __ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); | |
5117 | |
5118 __ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done); | |
5119 | |
5120 // Stop if found the property. | |
5121 __ Cmp(entity_name, Operand(name)); | |
5122 __ B(eq, miss); | |
5123 | |
5124 Label good; | |
5125 __ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good); | |
5126 | |
5127 // Check if the entry name is not a unique name. | |
5128 __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); | |
5129 __ Ldrb(entity_name, | |
5130 FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); | |
5131 __ JumpIfNotUniqueName(entity_name, miss); | |
5132 __ Bind(&good); | |
5133 } | |
5134 | |
5135 CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6); | |
5136 spill_list.Combine(lr); | |
5137 spill_list.Remove(scratch0); // Scratch registers don't need to be preserved. | |
5138 | |
5139 __ PushCPURegList(spill_list); | |
5140 | |
5141 __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | |
5142 __ Mov(x1, Operand(name)); | |
5143 NameDictionaryLookupStub stub(NEGATIVE_LOOKUP); | |
5144 __ CallStub(&stub); | |
5145 // Move stub return value to scratch0. Note that scratch0 is not included in | |
5146 // spill_list and won't be clobbered by PopCPURegList. | |
5147 __ Mov(scratch0, x0); | |
5148 __ PopCPURegList(spill_list); | |
5149 | |
5150 __ Cbz(scratch0, done); | |
5151 __ B(miss); | |
5152 } | |
5153 | |
5154 | |
5155 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { | |
5156 // This stub overrides SometimesSetsUpAFrame() to return false. That means | |
5157 // we cannot call anything that could cause a GC from this stub. | |
5158 // | |
5159 // Arguments are in x0 and x1: | |
5160 // x0: property dictionary. | |
5161 // x1: the name of the property we are looking for. | |
5162 // | |
5163 // Return value is in x0 and is zero if lookup failed, non zero otherwise. | |
5164 // If the lookup is successful, x2 will contains the index of the entry. | |
5165 | |
5166 Register result = x0; | |
5167 Register dictionary = x0; | |
5168 Register key = x1; | |
5169 Register index = x2; | |
5170 Register mask = x3; | |
5171 Register hash = x4; | |
5172 Register undefined = x5; | |
5173 Register entry_key = x6; | |
5174 | |
5175 Label in_dictionary, maybe_in_dictionary, not_in_dictionary; | |
5176 | |
5177 __ Ldrsw(mask, UntagSmiFieldMemOperand(dictionary, kCapacityOffset)); | |
5178 __ Sub(mask, mask, 1); | |
5179 | |
5180 __ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset)); | |
5181 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); | |
5182 | |
5183 for (int i = kInlinedProbes; i < kTotalProbes; i++) { | |
5184 // Compute the masked index: (hash + i + i * i) & mask. | |
5185 // Capacity is smi 2^n. | |
5186 if (i > 0) { | |
5187 // Add the probe offset (i + i * i) left shifted to avoid right shifting | |
5188 // the hash in a separate instruction. The value hash + i + i * i is right | |
5189 // shifted in the following and instruction. | |
5190 ASSERT(NameDictionary::GetProbeOffset(i) < | |
5191 1 << (32 - Name::kHashFieldOffset)); | |
5192 __ Add(index, hash, | |
5193 NameDictionary::GetProbeOffset(i) << Name::kHashShift); | |
5194 } else { | |
5195 __ Mov(index, hash); | |
5196 } | |
5197 __ And(index, mask, Operand(index, LSR, Name::kHashShift)); | |
5198 | |
5199 // Scale the index by multiplying by the entry size. | |
5200 ASSERT(NameDictionary::kEntrySize == 3); | |
5201 __ Add(index, index, Operand(index, LSL, 1)); // index *= 3. | |
5202 | |
5203 __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2)); | |
5204 __ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset)); | |
5205 | |
5206 // Having undefined at this place means the name is not contained. | |
5207 __ Cmp(entry_key, undefined); | |
5208 __ B(eq, ¬_in_dictionary); | |
5209 | |
5210 // Stop if found the property. | |
5211 __ Cmp(entry_key, key); | |
5212 __ B(eq, &in_dictionary); | |
5213 | |
5214 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { | |
5215 // Check if the entry name is not a unique name. | |
5216 __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); | |
5217 __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); | |
5218 __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary); | |
5219 } | |
5220 } | |
5221 | |
5222 __ Bind(&maybe_in_dictionary); | |
5223 // If we are doing negative lookup then probing failure should be | |
5224 // treated as a lookup success. For positive lookup, probing failure | |
5225 // should be treated as lookup failure. | |
5226 if (mode_ == POSITIVE_LOOKUP) { | |
5227 __ Mov(result, 0); | |
5228 __ Ret(); | |
5229 } | |
5230 | |
5231 __ Bind(&in_dictionary); | |
5232 __ Mov(result, 1); | |
5233 __ Ret(); | |
5234 | |
5235 __ Bind(¬_in_dictionary); | |
5236 __ Mov(result, 0); | |
5237 __ Ret(); | |
5238 } | |
5239 | |
5240 | |
5241 template<class T> | |
5242 static void CreateArrayDispatch(MacroAssembler* masm, | |
5243 AllocationSiteOverrideMode mode) { | |
5244 ASM_LOCATION("CreateArrayDispatch"); | |
5245 if (mode == DISABLE_ALLOCATION_SITES) { | |
5246 T stub(GetInitialFastElementsKind(), mode); | |
5247 __ TailCallStub(&stub); | |
5248 | |
5249 } else if (mode == DONT_OVERRIDE) { | |
5250 Register kind = x3; | |
5251 int last_index = | |
5252 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND); | |
5253 for (int i = 0; i <= last_index; ++i) { | |
5254 Label next; | |
5255 ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i); | |
5256 // TODO(jbramley): Is this the best way to handle this? Can we make the | |
5257 // tail calls conditional, rather than hopping over each one? | |
5258 __ CompareAndBranch(kind, candidate_kind, ne, &next); | |
5259 T stub(candidate_kind); | |
5260 __ TailCallStub(&stub); | |
5261 __ Bind(&next); | |
5262 } | |
5263 | |
5264 // If we reached this point there is a problem. | |
5265 __ Abort(kUnexpectedElementsKindInArrayConstructor); | |
5266 | |
5267 } else { | |
5268 UNREACHABLE(); | |
5269 } | |
5270 } | |
5271 | |
5272 | |
5273 // TODO(jbramley): If this needs to be a special case, make it a proper template | |
5274 // specialization, and not a separate function. | |
5275 static void CreateArrayDispatchOneArgument(MacroAssembler* masm, | |
5276 AllocationSiteOverrideMode mode) { | |
5277 ASM_LOCATION("CreateArrayDispatchOneArgument"); | |
5278 // x0 - argc | |
5279 // x1 - constructor? | |
5280 // x2 - allocation site (if mode != DISABLE_ALLOCATION_SITES) | |
5281 // x3 - kind (if mode != DISABLE_ALLOCATION_SITES) | |
5282 // sp[0] - last argument | |
5283 | |
5284 Register allocation_site = x2; | |
5285 Register kind = x3; | |
5286 | |
5287 Label normal_sequence; | |
5288 if (mode == DONT_OVERRIDE) { | |
5289 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | |
5290 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | |
5291 STATIC_ASSERT(FAST_ELEMENTS == 2); | |
5292 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); | |
5293 STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4); | |
5294 STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); | |
5295 | |
5296 // Is the low bit set? If so, the array is holey. | |
5297 __ Tbnz(kind, 0, &normal_sequence); | |
5298 } | |
5299 | |
5300 // Look at the last argument. | |
5301 // TODO(jbramley): What does a 0 argument represent? | |
5302 __ Peek(x10, 0); | |
5303 __ Cbz(x10, &normal_sequence); | |
5304 | |
5305 if (mode == DISABLE_ALLOCATION_SITES) { | |
5306 ElementsKind initial = GetInitialFastElementsKind(); | |
5307 ElementsKind holey_initial = GetHoleyElementsKind(initial); | |
5308 | |
5309 ArraySingleArgumentConstructorStub stub_holey(holey_initial, | |
5310 DISABLE_ALLOCATION_SITES); | |
5311 __ TailCallStub(&stub_holey); | |
5312 | |
5313 __ Bind(&normal_sequence); | |
5314 ArraySingleArgumentConstructorStub stub(initial, | |
5315 DISABLE_ALLOCATION_SITES); | |
5316 __ TailCallStub(&stub); | |
5317 } else if (mode == DONT_OVERRIDE) { | |
5318 // We are going to create a holey array, but our kind is non-holey. | |
5319 // Fix kind and retry (only if we have an allocation site in the slot). | |
5320 __ Orr(kind, kind, 1); | |
5321 | |
5322 if (FLAG_debug_code) { | |
5323 __ Ldr(x10, FieldMemOperand(allocation_site, 0)); | |
5324 __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex, | |
5325 &normal_sequence); | |
5326 __ Assert(eq, kExpectedAllocationSite); | |
5327 } | |
5328 | |
5329 // Save the resulting elements kind in type info. We can't just store 'kind' | |
5330 // in the AllocationSite::transition_info field because elements kind is | |
5331 // restricted to a portion of the field; upper bits need to be left alone. | |
5332 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); | |
5333 __ Ldr(x11, FieldMemOperand(allocation_site, | |
5334 AllocationSite::kTransitionInfoOffset)); | |
5335 __ Add(x11, x11, Smi::FromInt(kFastElementsKindPackedToHoley)); | |
5336 __ Str(x11, FieldMemOperand(allocation_site, | |
5337 AllocationSite::kTransitionInfoOffset)); | |
5338 | |
5339 __ Bind(&normal_sequence); | |
5340 int last_index = | |
5341 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND); | |
5342 for (int i = 0; i <= last_index; ++i) { | |
5343 Label next; | |
5344 ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i); | |
5345 __ CompareAndBranch(kind, candidate_kind, ne, &next); | |
5346 ArraySingleArgumentConstructorStub stub(candidate_kind); | |
5347 __ TailCallStub(&stub); | |
5348 __ Bind(&next); | |
5349 } | |
5350 | |
5351 // If we reached this point there is a problem. | |
5352 __ Abort(kUnexpectedElementsKindInArrayConstructor); | |
5353 } else { | |
5354 UNREACHABLE(); | |
5355 } | |
5356 } | |
5357 | |
5358 | |
5359 template<class T> | |
5360 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) { | |
5361 int to_index = GetSequenceIndexFromFastElementsKind( | |
5362 TERMINAL_FAST_ELEMENTS_KIND); | |
5363 for (int i = 0; i <= to_index; ++i) { | |
5364 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); | |
5365 T stub(kind); | |
5366 stub.GetCode(isolate); | |
5367 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { | |
5368 T stub1(kind, DISABLE_ALLOCATION_SITES); | |
5369 stub1.GetCode(isolate); | |
5370 } | |
5371 } | |
5372 } | |
5373 | |
5374 | |
5375 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) { | |
5376 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>( | |
5377 isolate); | |
5378 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>( | |
5379 isolate); | |
5380 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>( | |
5381 isolate); | |
5382 } | |
5383 | |
5384 | |
5385 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime( | |
5386 Isolate* isolate) { | |
5387 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS }; | |
5388 for (int i = 0; i < 2; i++) { | |
5389 // For internal arrays we only need a few things | |
5390 InternalArrayNoArgumentConstructorStub stubh1(kinds[i]); | |
5391 stubh1.GetCode(isolate); | |
5392 InternalArraySingleArgumentConstructorStub stubh2(kinds[i]); | |
5393 stubh2.GetCode(isolate); | |
5394 InternalArrayNArgumentsConstructorStub stubh3(kinds[i]); | |
5395 stubh3.GetCode(isolate); | |
5396 } | |
5397 } | |
5398 | |
5399 | |
5400 void ArrayConstructorStub::GenerateDispatchToArrayStub( | |
5401 MacroAssembler* masm, | |
5402 AllocationSiteOverrideMode mode) { | |
5403 Register argc = x0; | |
5404 if (argument_count_ == ANY) { | |
5405 Label zero_case, n_case; | |
5406 __ Cbz(argc, &zero_case); | |
5407 __ Cmp(argc, 1); | |
5408 __ B(ne, &n_case); | |
5409 | |
5410 // One argument. | |
5411 CreateArrayDispatchOneArgument(masm, mode); | |
5412 | |
5413 __ Bind(&zero_case); | |
5414 // No arguments. | |
5415 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode); | |
5416 | |
5417 __ Bind(&n_case); | |
5418 // N arguments. | |
5419 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode); | |
5420 | |
5421 } else if (argument_count_ == NONE) { | |
5422 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode); | |
5423 } else if (argument_count_ == ONE) { | |
5424 CreateArrayDispatchOneArgument(masm, mode); | |
5425 } else if (argument_count_ == MORE_THAN_ONE) { | |
5426 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode); | |
5427 } else { | |
5428 UNREACHABLE(); | |
5429 } | |
5430 } | |
5431 | |
5432 | |
5433 void ArrayConstructorStub::Generate(MacroAssembler* masm) { | |
5434 ASM_LOCATION("ArrayConstructorStub::Generate"); | |
5435 // ----------- S t a t e ------------- | |
5436 // -- x0 : argc (only if argument_count_ == ANY) | |
5437 // -- x1 : constructor | |
5438 // -- x2 : AllocationSite or undefined | |
5439 // -- sp[0] : return address | |
5440 // -- sp[4] : last argument | |
5441 // ----------------------------------- | |
5442 Register constructor = x1; | |
5443 Register allocation_site = x2; | |
5444 | |
5445 if (FLAG_debug_code) { | |
5446 // The array construct code is only set for the global and natives | |
5447 // builtin Array functions which always have maps. | |
5448 | |
5449 Label unexpected_map, map_ok; | |
5450 // Initial map for the builtin Array function should be a map. | |
5451 __ Ldr(x10, FieldMemOperand(constructor, | |
5452 JSFunction::kPrototypeOrInitialMapOffset)); | |
5453 // Will both indicate a NULL and a Smi. | |
5454 __ JumpIfSmi(x10, &unexpected_map); | |
5455 __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok); | |
5456 __ Bind(&unexpected_map); | |
5457 __ Abort(kUnexpectedInitialMapForArrayFunction); | |
5458 __ Bind(&map_ok); | |
5459 | |
5460 // We should either have undefined in the allocation_site register or a | |
5461 // valid AllocationSite. | |
5462 __ AssertUndefinedOrAllocationSite(allocation_site, x10); | |
5463 } | |
5464 | |
5465 Register kind = x3; | |
5466 Label no_info; | |
5467 // Get the elements kind and case on that. | |
5468 __ JumpIfRoot(allocation_site, Heap::kUndefinedValueRootIndex, &no_info); | |
5469 | |
5470 __ Ldrsw(kind, | |
5471 UntagSmiFieldMemOperand(allocation_site, | |
5472 AllocationSite::kTransitionInfoOffset)); | |
5473 __ And(kind, kind, AllocationSite::ElementsKindBits::kMask); | |
5474 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE); | |
5475 | |
5476 __ Bind(&no_info); | |
5477 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES); | |
5478 } | |
5479 | |
5480 | |
5481 void InternalArrayConstructorStub::GenerateCase( | |
5482 MacroAssembler* masm, ElementsKind kind) { | |
5483 Label zero_case, n_case; | |
5484 Register argc = x0; | |
5485 | |
5486 __ Cbz(argc, &zero_case); | |
5487 __ CompareAndBranch(argc, 1, ne, &n_case); | |
5488 | |
5489 // One argument. | |
5490 if (IsFastPackedElementsKind(kind)) { | |
5491 Label packed_case; | |
5492 | |
5493 // We might need to create a holey array; look at the first argument. | |
5494 __ Peek(x10, 0); | |
5495 __ Cbz(x10, &packed_case); | |
5496 | |
5497 InternalArraySingleArgumentConstructorStub | |
5498 stub1_holey(GetHoleyElementsKind(kind)); | |
5499 __ TailCallStub(&stub1_holey); | |
5500 | |
5501 __ Bind(&packed_case); | |
5502 } | |
5503 InternalArraySingleArgumentConstructorStub stub1(kind); | |
5504 __ TailCallStub(&stub1); | |
5505 | |
5506 __ Bind(&zero_case); | |
5507 // No arguments. | |
5508 InternalArrayNoArgumentConstructorStub stub0(kind); | |
5509 __ TailCallStub(&stub0); | |
5510 | |
5511 __ Bind(&n_case); | |
5512 // N arguments. | |
5513 InternalArrayNArgumentsConstructorStub stubN(kind); | |
5514 __ TailCallStub(&stubN); | |
5515 } | |
5516 | |
5517 | |
5518 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { | |
5519 // ----------- S t a t e ------------- | |
5520 // -- x0 : argc | |
5521 // -- x1 : constructor | |
5522 // -- sp[0] : return address | |
5523 // -- sp[4] : last argument | |
5524 // ----------------------------------- | |
5525 Handle<Object> undefined_sentinel( | |
5526 masm->isolate()->heap()->undefined_value(), masm->isolate()); | |
5527 | |
5528 Register constructor = x1; | |
5529 | |
5530 if (FLAG_debug_code) { | |
5531 // The array construct code is only set for the global and natives | |
5532 // builtin Array functions which always have maps. | |
5533 | |
5534 Label unexpected_map, map_ok; | |
5535 // Initial map for the builtin Array function should be a map. | |
5536 __ Ldr(x10, FieldMemOperand(constructor, | |
5537 JSFunction::kPrototypeOrInitialMapOffset)); | |
5538 // Will both indicate a NULL and a Smi. | |
5539 __ JumpIfSmi(x10, &unexpected_map); | |
5540 __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok); | |
5541 __ Bind(&unexpected_map); | |
5542 __ Abort(kUnexpectedInitialMapForArrayFunction); | |
5543 __ Bind(&map_ok); | |
5544 } | |
5545 | |
5546 Register kind = w3; | |
5547 // Figure out the right elements kind | |
5548 __ Ldr(x10, FieldMemOperand(constructor, | |
5549 JSFunction::kPrototypeOrInitialMapOffset)); | |
5550 | |
5551 // Retrieve elements_kind from map. | |
5552 __ LoadElementsKindFromMap(kind, x10); | |
5553 | |
5554 if (FLAG_debug_code) { | |
5555 Label done; | |
5556 __ Cmp(x3, FAST_ELEMENTS); | |
5557 __ Ccmp(x3, FAST_HOLEY_ELEMENTS, ZFlag, ne); | |
5558 __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray); | |
5559 } | |
5560 | |
5561 Label fast_elements_case; | |
5562 __ CompareAndBranch(kind, FAST_ELEMENTS, eq, &fast_elements_case); | |
5563 GenerateCase(masm, FAST_HOLEY_ELEMENTS); | |
5564 | |
5565 __ Bind(&fast_elements_case); | |
5566 GenerateCase(masm, FAST_ELEMENTS); | |
5567 } | |
5568 | |
5569 | |
5570 void CallApiFunctionStub::Generate(MacroAssembler* masm) { | |
5571 // ----------- S t a t e ------------- | |
5572 // -- x0 : callee | |
5573 // -- x4 : call_data | |
5574 // -- x2 : holder | |
5575 // -- x1 : api_function_address | |
5576 // -- cp : context | |
5577 // -- | |
5578 // -- sp[0] : last argument | |
5579 // -- ... | |
5580 // -- sp[(argc - 1) * 8] : first argument | |
5581 // -- sp[argc * 8] : receiver | |
5582 // ----------------------------------- | |
5583 | |
5584 Register callee = x0; | |
5585 Register call_data = x4; | |
5586 Register holder = x2; | |
5587 Register api_function_address = x1; | |
5588 Register context = cp; | |
5589 | |
5590 int argc = ArgumentBits::decode(bit_field_); | |
5591 bool is_store = IsStoreBits::decode(bit_field_); | |
5592 bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_); | |
5593 | |
5594 typedef FunctionCallbackArguments FCA; | |
5595 | |
5596 STATIC_ASSERT(FCA::kContextSaveIndex == 6); | |
5597 STATIC_ASSERT(FCA::kCalleeIndex == 5); | |
5598 STATIC_ASSERT(FCA::kDataIndex == 4); | |
5599 STATIC_ASSERT(FCA::kReturnValueOffset == 3); | |
5600 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2); | |
5601 STATIC_ASSERT(FCA::kIsolateIndex == 1); | |
5602 STATIC_ASSERT(FCA::kHolderIndex == 0); | |
5603 STATIC_ASSERT(FCA::kArgsLength == 7); | |
5604 | |
5605 Isolate* isolate = masm->isolate(); | |
5606 | |
5607 // FunctionCallbackArguments: context, callee and call data. | |
5608 __ Push(context, callee, call_data); | |
5609 | |
5610 // Load context from callee | |
5611 __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset)); | |
5612 | |
5613 if (!call_data_undefined) { | |
5614 __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex); | |
5615 } | |
5616 Register isolate_reg = x5; | |
5617 __ Mov(isolate_reg, ExternalReference::isolate_address(isolate)); | |
5618 | |
5619 // FunctionCallbackArguments: | |
5620 // return value, return value default, isolate, holder. | |
5621 __ Push(call_data, call_data, isolate_reg, holder); | |
5622 | |
5623 // Prepare arguments. | |
5624 Register args = x6; | |
5625 __ Mov(args, masm->StackPointer()); | |
5626 | |
5627 // Allocate the v8::Arguments structure in the arguments' space, since it's | |
5628 // not controlled by GC. | |
5629 const int kApiStackSpace = 4; | |
5630 | |
5631 // Allocate space for CallApiFunctionAndReturn can store some scratch | |
5632 // registeres on the stack. | |
5633 const int kCallApiFunctionSpillSpace = 4; | |
5634 | |
5635 FrameScope frame_scope(masm, StackFrame::MANUAL); | |
5636 __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace); | |
5637 | |
5638 ASSERT(!AreAliased(x0, api_function_address)); | |
5639 // x0 = FunctionCallbackInfo& | |
5640 // Arguments is after the return address. | |
5641 __ Add(x0, masm->StackPointer(), 1 * kPointerSize); | |
5642 // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_ | |
5643 __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize)); | |
5644 __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize)); | |
5645 // FunctionCallbackInfo::length_ = argc and | |
5646 // FunctionCallbackInfo::is_construct_call = 0 | |
5647 __ Mov(x10, argc); | |
5648 __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize)); | |
5649 | |
5650 const int kStackUnwindSpace = argc + FCA::kArgsLength + 1; | |
5651 Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback); | |
5652 ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL; | |
5653 ApiFunction thunk_fun(thunk_address); | |
5654 ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type, | |
5655 masm->isolate()); | |
5656 | |
5657 AllowExternalCallThatCantCauseGC scope(masm); | |
5658 MemOperand context_restore_operand( | |
5659 fp, (2 + FCA::kContextSaveIndex) * kPointerSize); | |
5660 // Stores return the first js argument | |
5661 int return_value_offset = 0; | |
5662 if (is_store) { | |
5663 return_value_offset = 2 + FCA::kArgsLength; | |
5664 } else { | |
5665 return_value_offset = 2 + FCA::kReturnValueOffset; | |
5666 } | |
5667 MemOperand return_value_operand(fp, return_value_offset * kPointerSize); | |
5668 | |
5669 const int spill_offset = 1 + kApiStackSpace; | |
5670 __ CallApiFunctionAndReturn(api_function_address, | |
5671 thunk_ref, | |
5672 kStackUnwindSpace, | |
5673 spill_offset, | |
5674 return_value_operand, | |
5675 &context_restore_operand); | |
5676 } | |
5677 | |
5678 | |
5679 void CallApiGetterStub::Generate(MacroAssembler* masm) { | |
5680 // ----------- S t a t e ------------- | |
5681 // -- sp[0] : name | |
5682 // -- sp[8 - kArgsLength*8] : PropertyCallbackArguments object | |
5683 // -- ... | |
5684 // -- x2 : api_function_address | |
5685 // ----------------------------------- | |
5686 | |
5687 Register api_function_address = x2; | |
5688 | |
5689 __ Mov(x0, masm->StackPointer()); // x0 = Handle<Name> | |
5690 __ Add(x1, x0, 1 * kPointerSize); // x1 = PCA | |
5691 | |
5692 const int kApiStackSpace = 1; | |
5693 | |
5694 // Allocate space for CallApiFunctionAndReturn can store some scratch | |
5695 // registeres on the stack. | |
5696 const int kCallApiFunctionSpillSpace = 4; | |
5697 | |
5698 FrameScope frame_scope(masm, StackFrame::MANUAL); | |
5699 __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace); | |
5700 | |
5701 // Create PropertyAccessorInfo instance on the stack above the exit frame with | |
5702 // x1 (internal::Object** args_) as the data. | |
5703 __ Poke(x1, 1 * kPointerSize); | |
5704 __ Add(x1, masm->StackPointer(), 1 * kPointerSize); // x1 = AccessorInfo& | |
5705 | |
5706 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; | |
5707 | |
5708 Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback); | |
5709 ExternalReference::Type thunk_type = | |
5710 ExternalReference::PROFILING_GETTER_CALL; | |
5711 ApiFunction thunk_fun(thunk_address); | |
5712 ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type, | |
5713 masm->isolate()); | |
5714 | |
5715 const int spill_offset = 1 + kApiStackSpace; | |
5716 __ CallApiFunctionAndReturn(api_function_address, | |
5717 thunk_ref, | |
5718 kStackUnwindSpace, | |
5719 spill_offset, | |
5720 MemOperand(fp, 6 * kPointerSize), | |
5721 NULL); | |
5722 } | |
5723 | |
5724 | |
5725 #undef __ | |
5726 | |
5727 } } // namespace v8::internal | |
5728 | |
5729 #endif // V8_TARGET_ARCH_A64 | |
OLD | NEW |