Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(632)

Side by Side Diff: src/a64/code-stubs-a64.cc

Issue 148293020: Merge experimental/a64 to bleeding_edge. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Remove ARM from OWNERS Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/a64/code-stubs-a64.h ('k') | src/a64/codegen-a64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #if V8_TARGET_ARCH_A64
31
32 #include "bootstrapper.h"
33 #include "code-stubs.h"
34 #include "regexp-macro-assembler.h"
35 #include "stub-cache.h"
36
37 namespace v8 {
38 namespace internal {
39
40
41 void FastNewClosureStub::InitializeInterfaceDescriptor(
42 Isolate* isolate,
43 CodeStubInterfaceDescriptor* descriptor) {
44 // x2: function info
45 static Register registers[] = { x2 };
46 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
47 descriptor->register_params_ = registers;
48 descriptor->deoptimization_handler_ =
49 Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
50 }
51
52
53 void FastNewContextStub::InitializeInterfaceDescriptor(
54 Isolate* isolate,
55 CodeStubInterfaceDescriptor* descriptor) {
56 // x1: function
57 static Register registers[] = { x1 };
58 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
59 descriptor->register_params_ = registers;
60 descriptor->deoptimization_handler_ = NULL;
61 }
62
63
64 void ToNumberStub::InitializeInterfaceDescriptor(
65 Isolate* isolate,
66 CodeStubInterfaceDescriptor* descriptor) {
67 // x0: value
68 static Register registers[] = { x0 };
69 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
70 descriptor->register_params_ = registers;
71 descriptor->deoptimization_handler_ = NULL;
72 }
73
74
75 void NumberToStringStub::InitializeInterfaceDescriptor(
76 Isolate* isolate,
77 CodeStubInterfaceDescriptor* descriptor) {
78 // x0: value
79 static Register registers[] = { x0 };
80 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
81 descriptor->register_params_ = registers;
82 descriptor->deoptimization_handler_ =
83 Runtime::FunctionForId(Runtime::kNumberToString)->entry;
84 }
85
86
87 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
88 Isolate* isolate,
89 CodeStubInterfaceDescriptor* descriptor) {
90 // x3: array literals array
91 // x2: array literal index
92 // x1: constant elements
93 static Register registers[] = { x3, x2, x1 };
94 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
95 descriptor->register_params_ = registers;
96 descriptor->deoptimization_handler_ =
97 Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
98 }
99
100
101 void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
102 Isolate* isolate,
103 CodeStubInterfaceDescriptor* descriptor) {
104 // x3: object literals array
105 // x2: object literal index
106 // x1: constant properties
107 // x0: object literal flags
108 static Register registers[] = { x3, x2, x1, x0 };
109 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
110 descriptor->register_params_ = registers;
111 descriptor->deoptimization_handler_ =
112 Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
113 }
114
115
116 void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
117 Isolate* isolate,
118 CodeStubInterfaceDescriptor* descriptor) {
119 // x2: feedback vector
120 // x3: call feedback slot
121 static Register registers[] = { x2, x3 };
122 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
123 descriptor->register_params_ = registers;
124 descriptor->deoptimization_handler_ = NULL;
125 }
126
127
128 void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
129 Isolate* isolate,
130 CodeStubInterfaceDescriptor* descriptor) {
131 // x1: receiver
132 // x0: key
133 static Register registers[] = { x1, x0 };
134 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
135 descriptor->register_params_ = registers;
136 descriptor->deoptimization_handler_ =
137 FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
138 }
139
140
141 void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
142 Isolate* isolate,
143 CodeStubInterfaceDescriptor* descriptor) {
144 // x1: receiver
145 // x0: key
146 static Register registers[] = { x1, x0 };
147 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
148 descriptor->register_params_ = registers;
149 descriptor->deoptimization_handler_ =
150 FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
151 }
152
153
154 void RegExpConstructResultStub::InitializeInterfaceDescriptor(
155 Isolate* isolate,
156 CodeStubInterfaceDescriptor* descriptor) {
157 // x2: length
158 // x1: index (of last match)
159 // x0: string
160 static Register registers[] = { x2, x1, x0 };
161 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
162 descriptor->register_params_ = registers;
163 descriptor->deoptimization_handler_ =
164 Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry;
165 }
166
167
168 void LoadFieldStub::InitializeInterfaceDescriptor(
169 Isolate* isolate,
170 CodeStubInterfaceDescriptor* descriptor) {
171 // x0: receiver
172 static Register registers[] = { x0 };
173 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
174 descriptor->register_params_ = registers;
175 descriptor->deoptimization_handler_ = NULL;
176 }
177
178
179 void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
180 Isolate* isolate,
181 CodeStubInterfaceDescriptor* descriptor) {
182 // x1: receiver
183 static Register registers[] = { x1 };
184 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
185 descriptor->register_params_ = registers;
186 descriptor->deoptimization_handler_ = NULL;
187 }
188
189
190 void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
191 Isolate* isolate,
192 CodeStubInterfaceDescriptor* descriptor) {
193 // x2: receiver
194 // x1: key
195 // x0: value
196 static Register registers[] = { x2, x1, x0 };
197 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
198 descriptor->register_params_ = registers;
199 descriptor->deoptimization_handler_ =
200 FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
201 }
202
203
204 void TransitionElementsKindStub::InitializeInterfaceDescriptor(
205 Isolate* isolate,
206 CodeStubInterfaceDescriptor* descriptor) {
207 // x0: value (js_array)
208 // x1: to_map
209 static Register registers[] = { x0, x1 };
210 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
211 descriptor->register_params_ = registers;
212 Address entry =
213 Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
214 descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
215 }
216
217
218 void CompareNilICStub::InitializeInterfaceDescriptor(
219 Isolate* isolate,
220 CodeStubInterfaceDescriptor* descriptor) {
221 // x0: value to compare
222 static Register registers[] = { x0 };
223 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
224 descriptor->register_params_ = registers;
225 descriptor->deoptimization_handler_ =
226 FUNCTION_ADDR(CompareNilIC_Miss);
227 descriptor->SetMissHandler(
228 ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
229 }
230
231
232 static void InitializeArrayConstructorDescriptor(
233 Isolate* isolate,
234 CodeStubInterfaceDescriptor* descriptor,
235 int constant_stack_parameter_count) {
236 // x1: function
237 // x2: allocation site with elements kind
238 // x0: number of arguments to the constructor function
239 static Register registers_variable_args[] = { x1, x2, x0 };
240 static Register registers_no_args[] = { x1, x2 };
241
242 if (constant_stack_parameter_count == 0) {
243 descriptor->register_param_count_ =
244 sizeof(registers_no_args) / sizeof(registers_no_args[0]);
245 descriptor->register_params_ = registers_no_args;
246 } else {
247 // stack param count needs (constructor pointer, and single argument)
248 descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
249 descriptor->stack_parameter_count_ = x0;
250 descriptor->register_param_count_ =
251 sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
252 descriptor->register_params_ = registers_variable_args;
253 }
254
255 descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
256 descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
257 descriptor->deoptimization_handler_ =
258 Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
259 }
260
261
262 void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
263 Isolate* isolate,
264 CodeStubInterfaceDescriptor* descriptor) {
265 InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
266 }
267
268
269 void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
270 Isolate* isolate,
271 CodeStubInterfaceDescriptor* descriptor) {
272 InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
273 }
274
275
276 void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
277 Isolate* isolate,
278 CodeStubInterfaceDescriptor* descriptor) {
279 InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
280 }
281
282
283 static void InitializeInternalArrayConstructorDescriptor(
284 Isolate* isolate,
285 CodeStubInterfaceDescriptor* descriptor,
286 int constant_stack_parameter_count) {
287 // x1: constructor function
288 // x0: number of arguments to the constructor function
289 static Register registers_variable_args[] = { x1, x0 };
290 static Register registers_no_args[] = { x1 };
291
292 if (constant_stack_parameter_count == 0) {
293 descriptor->register_param_count_ =
294 sizeof(registers_no_args) / sizeof(registers_no_args[0]);
295 descriptor->register_params_ = registers_no_args;
296 } else {
297 // stack param count needs (constructor pointer, and single argument)
298 descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
299 descriptor->stack_parameter_count_ = x0;
300 descriptor->register_param_count_ =
301 sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
302 descriptor->register_params_ = registers_variable_args;
303 }
304
305 descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
306 descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
307 descriptor->deoptimization_handler_ =
308 Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
309 }
310
311
312 void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
313 Isolate* isolate,
314 CodeStubInterfaceDescriptor* descriptor) {
315 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
316 }
317
318
319 void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
320 Isolate* isolate,
321 CodeStubInterfaceDescriptor* descriptor) {
322 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
323 }
324
325
326 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
327 Isolate* isolate,
328 CodeStubInterfaceDescriptor* descriptor) {
329 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
330 }
331
332
333 void ToBooleanStub::InitializeInterfaceDescriptor(
334 Isolate* isolate,
335 CodeStubInterfaceDescriptor* descriptor) {
336 // x0: value
337 static Register registers[] = { x0 };
338 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
339 descriptor->register_params_ = registers;
340 descriptor->deoptimization_handler_ = FUNCTION_ADDR(ToBooleanIC_Miss);
341 descriptor->SetMissHandler(
342 ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
343 }
344
345
346 void StoreGlobalStub::InitializeInterfaceDescriptor(
347 Isolate* isolate,
348 CodeStubInterfaceDescriptor* descriptor) {
349 // x1: receiver
350 // x2: key (unused)
351 // x0: value
352 static Register registers[] = { x1, x2, x0 };
353 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
354 descriptor->register_params_ = registers;
355 descriptor->deoptimization_handler_ =
356 FUNCTION_ADDR(StoreIC_MissFromStubFailure);
357 }
358
359
360 void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
361 Isolate* isolate,
362 CodeStubInterfaceDescriptor* descriptor) {
363 // x0: value
364 // x3: target map
365 // x1: key
366 // x2: receiver
367 static Register registers[] = { x0, x3, x1, x2 };
368 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
369 descriptor->register_params_ = registers;
370 descriptor->deoptimization_handler_ =
371 FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
372 }
373
374
375 void BinaryOpICStub::InitializeInterfaceDescriptor(
376 Isolate* isolate,
377 CodeStubInterfaceDescriptor* descriptor) {
378 // x1: left operand
379 // x0: right operand
380 static Register registers[] = { x1, x0 };
381 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
382 descriptor->register_params_ = registers;
383 descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
384 descriptor->SetMissHandler(
385 ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
386 }
387
388
389 void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
390 Isolate* isolate,
391 CodeStubInterfaceDescriptor* descriptor) {
392 // x2: allocation site
393 // x1: left operand
394 // x0: right operand
395 static Register registers[] = { x2, x1, x0 };
396 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
397 descriptor->register_params_ = registers;
398 descriptor->deoptimization_handler_ =
399 FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
400 }
401
402
403 void StringAddStub::InitializeInterfaceDescriptor(
404 Isolate* isolate,
405 CodeStubInterfaceDescriptor* descriptor) {
406 // x1: left operand
407 // x0: right operand
408 static Register registers[] = { x1, x0 };
409 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
410 descriptor->register_params_ = registers;
411 descriptor->deoptimization_handler_ =
412 Runtime::FunctionForId(Runtime::kStringAdd)->entry;
413 }
414
415
416 void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
417 static PlatformCallInterfaceDescriptor default_descriptor =
418 PlatformCallInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
419
420 static PlatformCallInterfaceDescriptor noInlineDescriptor =
421 PlatformCallInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
422
423 {
424 CallInterfaceDescriptor* descriptor =
425 isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
426 static Register registers[] = { x1, // JSFunction
427 cp, // context
428 x0, // actual number of arguments
429 x2, // expected number of arguments
430 };
431 static Representation representations[] = {
432 Representation::Tagged(), // JSFunction
433 Representation::Tagged(), // context
434 Representation::Integer32(), // actual number of arguments
435 Representation::Integer32(), // expected number of arguments
436 };
437 descriptor->register_param_count_ = 4;
438 descriptor->register_params_ = registers;
439 descriptor->param_representations_ = representations;
440 descriptor->platform_specific_descriptor_ = &default_descriptor;
441 }
442 {
443 CallInterfaceDescriptor* descriptor =
444 isolate->call_descriptor(Isolate::KeyedCall);
445 static Register registers[] = { cp, // context
446 x2, // key
447 };
448 static Representation representations[] = {
449 Representation::Tagged(), // context
450 Representation::Tagged(), // key
451 };
452 descriptor->register_param_count_ = 2;
453 descriptor->register_params_ = registers;
454 descriptor->param_representations_ = representations;
455 descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
456 }
457 {
458 CallInterfaceDescriptor* descriptor =
459 isolate->call_descriptor(Isolate::NamedCall);
460 static Register registers[] = { cp, // context
461 x2, // name
462 };
463 static Representation representations[] = {
464 Representation::Tagged(), // context
465 Representation::Tagged(), // name
466 };
467 descriptor->register_param_count_ = 2;
468 descriptor->register_params_ = registers;
469 descriptor->param_representations_ = representations;
470 descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
471 }
472 {
473 CallInterfaceDescriptor* descriptor =
474 isolate->call_descriptor(Isolate::CallHandler);
475 static Register registers[] = { cp, // context
476 x0, // receiver
477 };
478 static Representation representations[] = {
479 Representation::Tagged(), // context
480 Representation::Tagged(), // receiver
481 };
482 descriptor->register_param_count_ = 2;
483 descriptor->register_params_ = registers;
484 descriptor->param_representations_ = representations;
485 descriptor->platform_specific_descriptor_ = &default_descriptor;
486 }
487 {
488 CallInterfaceDescriptor* descriptor =
489 isolate->call_descriptor(Isolate::ApiFunctionCall);
490 static Register registers[] = { x0, // callee
491 x4, // call_data
492 x2, // holder
493 x1, // api_function_address
494 cp, // context
495 };
496 static Representation representations[] = {
497 Representation::Tagged(), // callee
498 Representation::Tagged(), // call_data
499 Representation::Tagged(), // holder
500 Representation::External(), // api_function_address
501 Representation::Tagged(), // context
502 };
503 descriptor->register_param_count_ = 5;
504 descriptor->register_params_ = registers;
505 descriptor->param_representations_ = representations;
506 descriptor->platform_specific_descriptor_ = &default_descriptor;
507 }
508 }
509
510
511 #define __ ACCESS_MASM(masm)
512
513
514 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
515 // Update the static counter each time a new code stub is generated.
516 Isolate* isolate = masm->isolate();
517 isolate->counters()->code_stubs()->Increment();
518
519 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
520 int param_count = descriptor->register_param_count_;
521 {
522 // Call the runtime system in a fresh internal frame.
523 FrameScope scope(masm, StackFrame::INTERNAL);
524 ASSERT((descriptor->register_param_count_ == 0) ||
525 x0.Is(descriptor->register_params_[param_count - 1]));
526 // Push arguments
527 // TODO(jbramley): Try to push these in blocks.
528 for (int i = 0; i < param_count; ++i) {
529 __ Push(descriptor->register_params_[i]);
530 }
531 ExternalReference miss = descriptor->miss_handler();
532 __ CallExternalReference(miss, descriptor->register_param_count_);
533 }
534
535 __ Ret();
536 }
537
538
539 // See call site for description.
540 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
541 Register left,
542 Register right,
543 Register scratch,
544 FPRegister double_scratch,
545 Label* slow,
546 Condition cond) {
547 ASSERT(!AreAliased(left, right, scratch));
548 Label not_identical, return_equal, heap_number;
549 Register result = x0;
550
551 __ Cmp(right, left);
552 __ B(ne, &not_identical);
553
554 // Test for NaN. Sadly, we can't just compare to factory::nan_value(),
555 // so we do the second best thing - test it ourselves.
556 // They are both equal and they are not both Smis so both of them are not
557 // Smis. If it's not a heap number, then return equal.
558 if ((cond == lt) || (cond == gt)) {
559 __ JumpIfObjectType(right, scratch, scratch, FIRST_SPEC_OBJECT_TYPE, slow,
560 ge);
561 } else {
562 Register right_type = scratch;
563 __ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
564 &heap_number);
565 // Comparing JS objects with <=, >= is complicated.
566 if (cond != eq) {
567 __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
568 __ B(ge, slow);
569 // Normally here we fall through to return_equal, but undefined is
570 // special: (undefined == undefined) == true, but
571 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
572 if ((cond == le) || (cond == ge)) {
573 __ Cmp(right_type, ODDBALL_TYPE);
574 __ B(ne, &return_equal);
575 __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
576 if (cond == le) {
577 // undefined <= undefined should fail.
578 __ Mov(result, GREATER);
579 } else {
580 // undefined >= undefined should fail.
581 __ Mov(result, LESS);
582 }
583 __ Ret();
584 }
585 }
586 }
587
588 __ Bind(&return_equal);
589 if (cond == lt) {
590 __ Mov(result, GREATER); // Things aren't less than themselves.
591 } else if (cond == gt) {
592 __ Mov(result, LESS); // Things aren't greater than themselves.
593 } else {
594 __ Mov(result, EQUAL); // Things are <=, >=, ==, === themselves.
595 }
596 __ Ret();
597
598 // Cases lt and gt have been handled earlier, and case ne is never seen, as
599 // it is handled in the parser (see Parser::ParseBinaryExpression). We are
600 // only concerned with cases ge, le and eq here.
601 if ((cond != lt) && (cond != gt)) {
602 ASSERT((cond == ge) || (cond == le) || (cond == eq));
603 __ Bind(&heap_number);
604 // Left and right are identical pointers to a heap number object. Return
605 // non-equal if the heap number is a NaN, and equal otherwise. Comparing
606 // the number to itself will set the overflow flag iff the number is NaN.
607 __ Ldr(double_scratch, FieldMemOperand(right, HeapNumber::kValueOffset));
608 __ Fcmp(double_scratch, double_scratch);
609 __ B(vc, &return_equal); // Not NaN, so treat as normal heap number.
610
611 if (cond == le) {
612 __ Mov(result, GREATER);
613 } else {
614 __ Mov(result, LESS);
615 }
616 __ Ret();
617 }
618
619 // No fall through here.
620 if (FLAG_debug_code) {
621 __ Unreachable();
622 }
623
624 __ Bind(&not_identical);
625 }
626
627
628 // See call site for description.
629 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
630 Register left,
631 Register right,
632 Register left_type,
633 Register right_type,
634 Register scratch) {
635 ASSERT(!AreAliased(left, right, left_type, right_type, scratch));
636
637 if (masm->emit_debug_code()) {
638 // We assume that the arguments are not identical.
639 __ Cmp(left, right);
640 __ Assert(ne, kExpectedNonIdenticalObjects);
641 }
642
643 // If either operand is a JS object or an oddball value, then they are not
644 // equal since their pointers are different.
645 // There is no test for undetectability in strict equality.
646 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
647 Label right_non_object;
648
649 __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
650 __ B(lt, &right_non_object);
651
652 // Return non-zero - x0 already contains a non-zero pointer.
653 ASSERT(left.is(x0) || right.is(x0));
654 Label return_not_equal;
655 __ Bind(&return_not_equal);
656 __ Ret();
657
658 __ Bind(&right_non_object);
659
660 // Check for oddballs: true, false, null, undefined.
661 __ Cmp(right_type, ODDBALL_TYPE);
662
663 // If right is not ODDBALL, test left. Otherwise, set eq condition.
664 __ Ccmp(left_type, ODDBALL_TYPE, ZFlag, ne);
665
666 // If right or left is not ODDBALL, test left >= FIRST_SPEC_OBJECT_TYPE.
667 // Otherwise, right or left is ODDBALL, so set a ge condition.
668 __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NVFlag, ne);
669
670 __ B(ge, &return_not_equal);
671
672 // Internalized strings are unique, so they can only be equal if they are the
673 // same object. We have already tested that case, so if left and right are
674 // both internalized strings, they cannot be equal.
675 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
676 __ Orr(scratch, left_type, right_type);
677 __ TestAndBranchIfAllClear(
678 scratch, kIsNotStringMask | kIsNotInternalizedMask, &return_not_equal);
679 }
680
681
682 // See call site for description.
683 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
684 Register left,
685 Register right,
686 FPRegister left_d,
687 FPRegister right_d,
688 Register scratch,
689 Label* slow,
690 bool strict) {
691 ASSERT(!AreAliased(left, right, scratch));
692 ASSERT(!AreAliased(left_d, right_d));
693 ASSERT((left.is(x0) && right.is(x1)) ||
694 (right.is(x0) && left.is(x1)));
695 Register result = x0;
696
697 Label right_is_smi, done;
698 __ JumpIfSmi(right, &right_is_smi);
699
700 // Left is the smi. Check whether right is a heap number.
701 if (strict) {
702 // If right is not a number and left is a smi, then strict equality cannot
703 // succeed. Return non-equal.
704 Label is_heap_number;
705 __ JumpIfObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE,
706 &is_heap_number);
707 // Register right is a non-zero pointer, which is a valid NOT_EQUAL result.
708 if (!right.is(result)) {
709 __ Mov(result, NOT_EQUAL);
710 }
711 __ Ret();
712 __ Bind(&is_heap_number);
713 } else {
714 // Smi compared non-strictly with a non-smi, non-heap-number. Call the
715 // runtime.
716 __ JumpIfNotObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE, slow);
717 }
718
719 // Left is the smi. Right is a heap number. Load right value into right_d, and
720 // convert left smi into double in left_d.
721 __ Ldr(right_d, FieldMemOperand(right, HeapNumber::kValueOffset));
722 __ SmiUntagToDouble(left_d, left);
723 __ B(&done);
724
725 __ Bind(&right_is_smi);
726 // Right is a smi. Check whether the non-smi left is a heap number.
727 if (strict) {
728 // If left is not a number and right is a smi then strict equality cannot
729 // succeed. Return non-equal.
730 Label is_heap_number;
731 __ JumpIfObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE,
732 &is_heap_number);
733 // Register left is a non-zero pointer, which is a valid NOT_EQUAL result.
734 if (!left.is(result)) {
735 __ Mov(result, NOT_EQUAL);
736 }
737 __ Ret();
738 __ Bind(&is_heap_number);
739 } else {
740 // Smi compared non-strictly with a non-smi, non-heap-number. Call the
741 // runtime.
742 __ JumpIfNotObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE, slow);
743 }
744
745 // Right is the smi. Left is a heap number. Load left value into left_d, and
746 // convert right smi into double in right_d.
747 __ Ldr(left_d, FieldMemOperand(left, HeapNumber::kValueOffset));
748 __ SmiUntagToDouble(right_d, right);
749
750 // Fall through to both_loaded_as_doubles.
751 __ Bind(&done);
752 }
753
754
755 // Fast negative check for internalized-to-internalized equality.
756 // See call site for description.
757 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
758 Register left,
759 Register right,
760 Register left_map,
761 Register right_map,
762 Register left_type,
763 Register right_type,
764 Label* possible_strings,
765 Label* not_both_strings) {
766 ASSERT(!AreAliased(left, right, left_map, right_map, left_type, right_type));
767 Register result = x0;
768
769 Label object_test;
770 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
771 // TODO(all): reexamine this branch sequence for optimisation wrt branch
772 // prediction.
773 __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test);
774 __ Tbnz(right_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
775 __ Tbnz(left_type, MaskToBit(kIsNotStringMask), not_both_strings);
776 __ Tbnz(left_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
777
778 // Both are internalized. We already checked that they weren't the same
779 // pointer, so they are not equal.
780 __ Mov(result, NOT_EQUAL);
781 __ Ret();
782
783 __ Bind(&object_test);
784
785 __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
786
787 // If right >= FIRST_SPEC_OBJECT_TYPE, test left.
788 // Otherwise, right < FIRST_SPEC_OBJECT_TYPE, so set lt condition.
789 __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NFlag, ge);
790
791 __ B(lt, not_both_strings);
792
793 // If both objects are undetectable, they are equal. Otherwise, they are not
794 // equal, since they are different objects and an object is not equal to
795 // undefined.
796
797 // Returning here, so we can corrupt right_type and left_type.
798 Register right_bitfield = right_type;
799 Register left_bitfield = left_type;
800 __ Ldrb(right_bitfield, FieldMemOperand(right_map, Map::kBitFieldOffset));
801 __ Ldrb(left_bitfield, FieldMemOperand(left_map, Map::kBitFieldOffset));
802 __ And(result, right_bitfield, left_bitfield);
803 __ And(result, result, 1 << Map::kIsUndetectable);
804 __ Eor(result, result, 1 << Map::kIsUndetectable);
805 __ Ret();
806 }
807
808
809 static void ICCompareStub_CheckInputType(MacroAssembler* masm,
810 Register input,
811 Register scratch,
812 CompareIC::State expected,
813 Label* fail) {
814 Label ok;
815 if (expected == CompareIC::SMI) {
816 __ JumpIfNotSmi(input, fail);
817 } else if (expected == CompareIC::NUMBER) {
818 __ JumpIfSmi(input, &ok);
819 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
820 DONT_DO_SMI_CHECK);
821 }
822 // We could be strict about internalized/non-internalized here, but as long as
823 // hydrogen doesn't care, the stub doesn't have to care either.
824 __ Bind(&ok);
825 }
826
827
828 void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
829 Register lhs = x1;
830 Register rhs = x0;
831 Register result = x0;
832 Condition cond = GetCondition();
833
834 Label miss;
835 ICCompareStub_CheckInputType(masm, lhs, x2, left_, &miss);
836 ICCompareStub_CheckInputType(masm, rhs, x3, right_, &miss);
837
838 Label slow; // Call builtin.
839 Label not_smis, both_loaded_as_doubles;
840 Label not_two_smis, smi_done;
841 __ JumpIfEitherNotSmi(lhs, rhs, &not_two_smis);
842 __ SmiUntag(lhs);
843 __ Sub(result, lhs, Operand::UntagSmi(rhs));
844 __ Ret();
845
846 __ Bind(&not_two_smis);
847
848 // NOTICE! This code is only reached after a smi-fast-case check, so it is
849 // certain that at least one operand isn't a smi.
850
851 // Handle the case where the objects are identical. Either returns the answer
852 // or goes to slow. Only falls through if the objects were not identical.
853 EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond);
854
855 // If either is a smi (we know that at least one is not a smi), then they can
856 // only be strictly equal if the other is a HeapNumber.
857 __ JumpIfBothNotSmi(lhs, rhs, &not_smis);
858
859 // Exactly one operand is a smi. EmitSmiNonsmiComparison generates code that
860 // can:
861 // 1) Return the answer.
862 // 2) Branch to the slow case.
863 // 3) Fall through to both_loaded_as_doubles.
864 // In case 3, we have found out that we were dealing with a number-number
865 // comparison. The double values of the numbers have been loaded, right into
866 // rhs_d, left into lhs_d.
867 FPRegister rhs_d = d0;
868 FPRegister lhs_d = d1;
869 EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, x10, &slow, strict());
870
871 __ Bind(&both_loaded_as_doubles);
872 // The arguments have been converted to doubles and stored in rhs_d and
873 // lhs_d.
874 Label nan;
875 __ Fcmp(lhs_d, rhs_d);
876 __ B(vs, &nan); // Overflow flag set if either is NaN.
877 STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
878 __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
879 __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
880 __ Ret();
881
882 __ Bind(&nan);
883 // Left and/or right is a NaN. Load the result register with whatever makes
884 // the comparison fail, since comparisons with NaN always fail (except ne,
885 // which is filtered out at a higher level.)
886 ASSERT(cond != ne);
887 if ((cond == lt) || (cond == le)) {
888 __ Mov(result, GREATER);
889 } else {
890 __ Mov(result, LESS);
891 }
892 __ Ret();
893
894 __ Bind(&not_smis);
895 // At this point we know we are dealing with two different objects, and
896 // neither of them is a smi. The objects are in rhs_ and lhs_.
897
898 // Load the maps and types of the objects.
899 Register rhs_map = x10;
900 Register rhs_type = x11;
901 Register lhs_map = x12;
902 Register lhs_type = x13;
903 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
904 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
905 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
906 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
907
908 if (strict()) {
909 // This emits a non-equal return sequence for some object types, or falls
910 // through if it was not lucky.
911 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs, lhs_type, rhs_type, x14);
912 }
913
914 Label check_for_internalized_strings;
915 Label flat_string_check;
916 // Check for heap number comparison. Branch to earlier double comparison code
917 // if they are heap numbers, otherwise, branch to internalized string check.
918 __ Cmp(rhs_type, HEAP_NUMBER_TYPE);
919 __ B(ne, &check_for_internalized_strings);
920 __ Cmp(lhs_map, rhs_map);
921
922 // If maps aren't equal, lhs_ and rhs_ are not heap numbers. Branch to flat
923 // string check.
924 __ B(ne, &flat_string_check);
925
926 // Both lhs_ and rhs_ are heap numbers. Load them and branch to the double
927 // comparison code.
928 __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
929 __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
930 __ B(&both_loaded_as_doubles);
931
932 __ Bind(&check_for_internalized_strings);
933 // In the strict case, the EmitStrictTwoHeapObjectCompare already took care
934 // of internalized strings.
935 if ((cond == eq) && !strict()) {
936 // Returns an answer for two internalized strings or two detectable objects.
937 // Otherwise branches to the string case or not both strings case.
938 EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, lhs_map, rhs_map,
939 lhs_type, rhs_type,
940 &flat_string_check, &slow);
941 }
942
943 // Check for both being sequential ASCII strings, and inline if that is the
944 // case.
945 __ Bind(&flat_string_check);
946 __ JumpIfBothInstanceTypesAreNotSequentialAscii(lhs_type, rhs_type, x14,
947 x15, &slow);
948
949 Isolate* isolate = masm->isolate();
950 __ IncrementCounter(isolate->counters()->string_compare_native(), 1, x10,
951 x11);
952 if (cond == eq) {
953 StringCompareStub::GenerateFlatAsciiStringEquals(masm, lhs, rhs,
954 x10, x11, x12);
955 } else {
956 StringCompareStub::GenerateCompareFlatAsciiStrings(masm, lhs, rhs,
957 x10, x11, x12, x13);
958 }
959
960 // Never fall through to here.
961 if (FLAG_debug_code) {
962 __ Unreachable();
963 }
964
965 __ Bind(&slow);
966
967 __ Push(lhs, rhs);
968 // Figure out which native to call and setup the arguments.
969 Builtins::JavaScript native;
970 if (cond == eq) {
971 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
972 } else {
973 native = Builtins::COMPARE;
974 int ncr; // NaN compare result
975 if ((cond == lt) || (cond == le)) {
976 ncr = GREATER;
977 } else {
978 ASSERT((cond == gt) || (cond == ge)); // remaining cases
979 ncr = LESS;
980 }
981 __ Mov(x10, Operand(Smi::FromInt(ncr)));
982 __ Push(x10);
983 }
984
985 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
986 // tagged as a small integer.
987 __ InvokeBuiltin(native, JUMP_FUNCTION);
988
989 __ Bind(&miss);
990 GenerateMiss(masm);
991 }
992
993
994 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
995 // Preserve caller-saved registers x0-x7 and x10-x15. We don't care if x8, x9,
996 // ip0 and ip1 are corrupted by the call into C.
997 CPURegList saved_regs = kCallerSaved;
998 saved_regs.Remove(ip0);
999 saved_regs.Remove(ip1);
1000 saved_regs.Remove(x8);
1001 saved_regs.Remove(x9);
1002
1003 // We don't allow a GC during a store buffer overflow so there is no need to
1004 // store the registers in any particular way, but we do have to store and
1005 // restore them.
1006 __ PushCPURegList(saved_regs);
1007 if (save_doubles_ == kSaveFPRegs) {
1008 __ PushCPURegList(kCallerSavedFP);
1009 }
1010
1011 AllowExternalCallThatCantCauseGC scope(masm);
1012 __ Mov(x0, Operand(ExternalReference::isolate_address(masm->isolate())));
1013 __ CallCFunction(
1014 ExternalReference::store_buffer_overflow_function(masm->isolate()),
1015 1, 0);
1016
1017 if (save_doubles_ == kSaveFPRegs) {
1018 __ PopCPURegList(kCallerSavedFP);
1019 }
1020 __ PopCPURegList(saved_regs);
1021 __ Ret();
1022 }
1023
1024
1025 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
1026 Isolate* isolate) {
1027 StoreBufferOverflowStub stub1(kDontSaveFPRegs);
1028 stub1.GetCode(isolate);
1029 StoreBufferOverflowStub stub2(kSaveFPRegs);
1030 stub2.GetCode(isolate);
1031 }
1032
1033
1034 void MathPowStub::Generate(MacroAssembler* masm) {
1035 // Stack on entry:
1036 // jssp[0]: Exponent (as a tagged value).
1037 // jssp[1]: Base (as a tagged value).
1038 //
1039 // The (tagged) result will be returned in x0, as a heap number.
1040
1041 Register result_tagged = x0;
1042 Register base_tagged = x10;
1043 Register exponent_tagged = x11;
1044 Register exponent_integer = x12;
1045 Register scratch1 = x14;
1046 Register scratch0 = x15;
1047 Register saved_lr = x19;
1048 FPRegister result_double = d0;
1049 FPRegister base_double = d0;
1050 FPRegister exponent_double = d1;
1051 FPRegister base_double_copy = d2;
1052 FPRegister scratch1_double = d6;
1053 FPRegister scratch0_double = d7;
1054
1055 // A fast-path for integer exponents.
1056 Label exponent_is_smi, exponent_is_integer;
1057 // Bail out to runtime.
1058 Label call_runtime;
1059 // Allocate a heap number for the result, and return it.
1060 Label done;
1061
1062 // Unpack the inputs.
1063 if (exponent_type_ == ON_STACK) {
1064 Label base_is_smi;
1065 Label unpack_exponent;
1066
1067 __ Pop(exponent_tagged, base_tagged);
1068
1069 __ JumpIfSmi(base_tagged, &base_is_smi);
1070 __ JumpIfNotHeapNumber(base_tagged, &call_runtime);
1071 // base_tagged is a heap number, so load its double value.
1072 __ Ldr(base_double, FieldMemOperand(base_tagged, HeapNumber::kValueOffset));
1073 __ B(&unpack_exponent);
1074 __ Bind(&base_is_smi);
1075 // base_tagged is a SMI, so untag it and convert it to a double.
1076 __ SmiUntagToDouble(base_double, base_tagged);
1077
1078 __ Bind(&unpack_exponent);
1079 // x10 base_tagged The tagged base (input).
1080 // x11 exponent_tagged The tagged exponent (input).
1081 // d1 base_double The base as a double.
1082 __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
1083 __ JumpIfNotHeapNumber(exponent_tagged, &call_runtime);
1084 // exponent_tagged is a heap number, so load its double value.
1085 __ Ldr(exponent_double,
1086 FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
1087 } else if (exponent_type_ == TAGGED) {
1088 __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
1089 __ Ldr(exponent_double,
1090 FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
1091 }
1092
1093 // Handle double (heap number) exponents.
1094 if (exponent_type_ != INTEGER) {
1095 // Detect integer exponents stored as doubles and handle those in the
1096 // integer fast-path.
1097 __ TryConvertDoubleToInt64(exponent_integer, exponent_double,
1098 scratch0_double, &exponent_is_integer);
1099
1100 if (exponent_type_ == ON_STACK) {
1101 FPRegister half_double = d3;
1102 FPRegister minus_half_double = d4;
1103 FPRegister zero_double = d5;
1104 // Detect square root case. Crankshaft detects constant +/-0.5 at compile
1105 // time and uses DoMathPowHalf instead. We then skip this check for
1106 // non-constant cases of +/-0.5 as these hardly occur.
1107
1108 __ Fmov(minus_half_double, -0.5);
1109 __ Fmov(half_double, 0.5);
1110 __ Fcmp(minus_half_double, exponent_double);
1111 __ Fccmp(half_double, exponent_double, NZFlag, ne);
1112 // Condition flags at this point:
1113 // 0.5; nZCv // Identified by eq && pl
1114 // -0.5: NZcv // Identified by eq && mi
1115 // other: ?z?? // Identified by ne
1116 __ B(ne, &call_runtime);
1117
1118 // The exponent is 0.5 or -0.5.
1119
1120 // Given that exponent is known to be either 0.5 or -0.5, the following
1121 // special cases could apply (according to ECMA-262 15.8.2.13):
1122 //
1123 // base.isNaN(): The result is NaN.
1124 // (base == +INFINITY) || (base == -INFINITY)
1125 // exponent == 0.5: The result is +INFINITY.
1126 // exponent == -0.5: The result is +0.
1127 // (base == +0) || (base == -0)
1128 // exponent == 0.5: The result is +0.
1129 // exponent == -0.5: The result is +INFINITY.
1130 // (base < 0) && base.isFinite(): The result is NaN.
1131 //
1132 // Fsqrt (and Fdiv for the -0.5 case) can handle all of those except
1133 // where base is -INFINITY or -0.
1134
1135 // Add +0 to base. This has no effect other than turning -0 into +0.
1136 __ Fmov(zero_double, 0.0);
1137 __ Fadd(base_double, base_double, zero_double);
1138 // The operation -0+0 results in +0 in all cases except where the
1139 // FPCR rounding mode is 'round towards minus infinity' (RM). The
1140 // A64 simulator does not currently simulate FPCR (where the rounding
1141 // mode is set), so test the operation with some debug code.
1142 if (masm->emit_debug_code()) {
1143 Register temp = masm->Tmp1();
1144 // d5 zero_double The value +0.0 as a double.
1145 __ Fneg(scratch0_double, zero_double);
1146 // Verify that we correctly generated +0.0 and -0.0.
1147 // bits(+0.0) = 0x0000000000000000
1148 // bits(-0.0) = 0x8000000000000000
1149 __ Fmov(temp, zero_double);
1150 __ CheckRegisterIsClear(temp, kCouldNotGenerateZero);
1151 __ Fmov(temp, scratch0_double);
1152 __ Eor(temp, temp, kDSignMask);
1153 __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero);
1154 // Check that -0.0 + 0.0 == +0.0.
1155 __ Fadd(scratch0_double, scratch0_double, zero_double);
1156 __ Fmov(temp, scratch0_double);
1157 __ CheckRegisterIsClear(temp, kExpectedPositiveZero);
1158 }
1159
1160 // If base is -INFINITY, make it +INFINITY.
1161 // * Calculate base - base: All infinities will become NaNs since both
1162 // -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in A64.
1163 // * If the result is NaN, calculate abs(base).
1164 __ Fsub(scratch0_double, base_double, base_double);
1165 __ Fcmp(scratch0_double, 0.0);
1166 __ Fabs(scratch1_double, base_double);
1167 __ Fcsel(base_double, scratch1_double, base_double, vs);
1168
1169 // Calculate the square root of base.
1170 __ Fsqrt(result_double, base_double);
1171 __ Fcmp(exponent_double, 0.0);
1172 __ B(ge, &done); // Finish now for exponents of 0.5.
1173 // Find the inverse for exponents of -0.5.
1174 __ Fmov(scratch0_double, 1.0);
1175 __ Fdiv(result_double, scratch0_double, result_double);
1176 __ B(&done);
1177 }
1178
1179 {
1180 AllowExternalCallThatCantCauseGC scope(masm);
1181 __ Mov(saved_lr, lr);
1182 __ CallCFunction(
1183 ExternalReference::power_double_double_function(masm->isolate()),
1184 0, 2);
1185 __ Mov(lr, saved_lr);
1186 __ B(&done);
1187 }
1188
1189 // Handle SMI exponents.
1190 __ Bind(&exponent_is_smi);
1191 // x10 base_tagged The tagged base (input).
1192 // x11 exponent_tagged The tagged exponent (input).
1193 // d1 base_double The base as a double.
1194 __ SmiUntag(exponent_integer, exponent_tagged);
1195 }
1196
1197 __ Bind(&exponent_is_integer);
1198 // x10 base_tagged The tagged base (input).
1199 // x11 exponent_tagged The tagged exponent (input).
1200 // x12 exponent_integer The exponent as an integer.
1201 // d1 base_double The base as a double.
1202
1203 // Find abs(exponent). For negative exponents, we can find the inverse later.
1204 Register exponent_abs = x13;
1205 __ Cmp(exponent_integer, 0);
1206 __ Cneg(exponent_abs, exponent_integer, mi);
1207 // x13 exponent_abs The value of abs(exponent_integer).
1208
1209 // Repeatedly multiply to calculate the power.
1210 // result = 1.0;
1211 // For each bit n (exponent_integer{n}) {
1212 // if (exponent_integer{n}) {
1213 // result *= base;
1214 // }
1215 // base *= base;
1216 // if (remaining bits in exponent_integer are all zero) {
1217 // break;
1218 // }
1219 // }
1220 Label power_loop, power_loop_entry, power_loop_exit;
1221 __ Fmov(scratch1_double, base_double);
1222 __ Fmov(base_double_copy, base_double);
1223 __ Fmov(result_double, 1.0);
1224 __ B(&power_loop_entry);
1225
1226 __ Bind(&power_loop);
1227 __ Fmul(scratch1_double, scratch1_double, scratch1_double);
1228 __ Lsr(exponent_abs, exponent_abs, 1);
1229 __ Cbz(exponent_abs, &power_loop_exit);
1230
1231 __ Bind(&power_loop_entry);
1232 __ Tbz(exponent_abs, 0, &power_loop);
1233 __ Fmul(result_double, result_double, scratch1_double);
1234 __ B(&power_loop);
1235
1236 __ Bind(&power_loop_exit);
1237
1238 // If the exponent was positive, result_double holds the result.
1239 __ Tbz(exponent_integer, kXSignBit, &done);
1240
1241 // The exponent was negative, so find the inverse.
1242 __ Fmov(scratch0_double, 1.0);
1243 __ Fdiv(result_double, scratch0_double, result_double);
1244 // ECMA-262 only requires Math.pow to return an 'implementation-dependent
1245 // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow
1246 // to calculate the subnormal value 2^-1074. This method of calculating
1247 // negative powers doesn't work because 2^1074 overflows to infinity. To
1248 // catch this corner-case, we bail out if the result was 0. (This can only
1249 // occur if the divisor is infinity or the base is zero.)
1250 __ Fcmp(result_double, 0.0);
1251 __ B(&done, ne);
1252
1253 if (exponent_type_ == ON_STACK) {
1254 // Bail out to runtime code.
1255 __ Bind(&call_runtime);
1256 // Put the arguments back on the stack.
1257 __ Push(base_tagged, exponent_tagged);
1258 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
1259
1260 // Return.
1261 __ Bind(&done);
1262 __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1);
1263 __ Str(result_double,
1264 FieldMemOperand(result_tagged, HeapNumber::kValueOffset));
1265 ASSERT(result_tagged.is(x0));
1266 __ IncrementCounter(
1267 masm->isolate()->counters()->math_pow(), 1, scratch0, scratch1);
1268 __ Ret();
1269 } else {
1270 AllowExternalCallThatCantCauseGC scope(masm);
1271 __ Mov(saved_lr, lr);
1272 __ Fmov(base_double, base_double_copy);
1273 __ Scvtf(exponent_double, exponent_integer);
1274 __ CallCFunction(
1275 ExternalReference::power_double_double_function(masm->isolate()),
1276 0, 2);
1277 __ Mov(lr, saved_lr);
1278 __ Bind(&done);
1279 __ IncrementCounter(
1280 masm->isolate()->counters()->math_pow(), 1, scratch0, scratch1);
1281 __ Ret();
1282 }
1283 }
1284
1285
1286 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
1287 // It is important that the following stubs are generated in this order
1288 // because pregenerated stubs can only call other pregenerated stubs.
1289 // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses
1290 // CEntryStub.
1291 CEntryStub::GenerateAheadOfTime(isolate);
1292 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
1293 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
1294 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
1295 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
1296 BinaryOpICStub::GenerateAheadOfTime(isolate);
1297 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
1298 }
1299
1300
1301 void CodeStub::GenerateFPStubs(Isolate* isolate) {
1302 // Floating-point code doesn't get special handling in A64, so there's
1303 // nothing to do here.
1304 USE(isolate);
1305 }
1306
1307
1308 static void JumpIfOOM(MacroAssembler* masm,
1309 Register value,
1310 Register scratch,
1311 Label* oom_label) {
1312 STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
1313 STATIC_ASSERT(kFailureTag == 3);
1314 __ And(scratch, value, 0xf);
1315 __ Cmp(scratch, 0xf);
1316 __ B(eq, oom_label);
1317 }
1318
1319
1320 bool CEntryStub::NeedsImmovableCode() {
1321 // CEntryStub stores the return address on the stack before calling into
1322 // C++ code. In some cases, the VM accesses this address, but it is not used
1323 // when the C++ code returns to the stub because LR holds the return address
1324 // in AAPCS64. If the stub is moved (perhaps during a GC), we could end up
1325 // returning to dead code.
1326 // TODO(jbramley): Whilst this is the only analysis that makes sense, I can't
1327 // find any comment to confirm this, and I don't hit any crashes whatever
1328 // this function returns. The anaylsis should be properly confirmed.
1329 return true;
1330 }
1331
1332
1333 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1334 CEntryStub stub(1, kDontSaveFPRegs);
1335 stub.GetCode(isolate);
1336 CEntryStub stub_fp(1, kSaveFPRegs);
1337 stub_fp.GetCode(isolate);
1338 }
1339
1340
1341 void CEntryStub::GenerateCore(MacroAssembler* masm,
1342 Label* throw_normal,
1343 Label* throw_termination,
1344 Label* throw_out_of_memory,
1345 bool do_gc,
1346 bool always_allocate) {
1347 // x0 : Result parameter for PerformGC, if do_gc is true.
1348 // x21 : argv
1349 // x22 : argc
1350 // x23 : target
1351 //
1352 // The stack (on entry) holds the arguments and the receiver, with the
1353 // receiver at the highest address:
1354 //
1355 // argv[8]: receiver
1356 // argv -> argv[0]: arg[argc-2]
1357 // ... ...
1358 // argv[...]: arg[1]
1359 // argv[...]: arg[0]
1360 //
1361 // Immediately below (after) this is the exit frame, as constructed by
1362 // EnterExitFrame:
1363 // fp[8]: CallerPC (lr)
1364 // fp -> fp[0]: CallerFP (old fp)
1365 // fp[-8]: Space reserved for SPOffset.
1366 // fp[-16]: CodeObject()
1367 // csp[...]: Saved doubles, if saved_doubles is true.
1368 // csp[32]: Alignment padding, if necessary.
1369 // csp[24]: Preserved x23 (used for target).
1370 // csp[16]: Preserved x22 (used for argc).
1371 // csp[8]: Preserved x21 (used for argv).
1372 // csp -> csp[0]: Space reserved for the return address.
1373 //
1374 // After a successful call, the exit frame, preserved registers (x21-x23) and
1375 // the arguments (including the receiver) are dropped or popped as
1376 // appropriate. The stub then returns.
1377 //
1378 // After an unsuccessful call, the exit frame and suchlike are left
1379 // untouched, and the stub either throws an exception by jumping to one of
1380 // the provided throw_ labels, or it falls through. The failure details are
1381 // passed through in x0.
1382 ASSERT(csp.Is(__ StackPointer()));
1383
1384 Isolate* isolate = masm->isolate();
1385
1386 const Register& argv = x21;
1387 const Register& argc = x22;
1388 const Register& target = x23;
1389
1390 if (do_gc) {
1391 // Call Runtime::PerformGC, passing x0 (the result parameter for
1392 // PerformGC) and x1 (the isolate).
1393 __ Mov(x1, Operand(ExternalReference::isolate_address(masm->isolate())));
1394 __ CallCFunction(
1395 ExternalReference::perform_gc_function(isolate), 2, 0);
1396 }
1397
1398 ExternalReference scope_depth =
1399 ExternalReference::heap_always_allocate_scope_depth(isolate);
1400 if (always_allocate) {
1401 __ Mov(x10, Operand(scope_depth));
1402 __ Ldr(x11, MemOperand(x10));
1403 __ Add(x11, x11, 1);
1404 __ Str(x11, MemOperand(x10));
1405 }
1406
1407 // Prepare AAPCS64 arguments to pass to the builtin.
1408 __ Mov(x0, argc);
1409 __ Mov(x1, argv);
1410 __ Mov(x2, Operand(ExternalReference::isolate_address(isolate)));
1411
1412 // Store the return address on the stack, in the space previously allocated
1413 // by EnterExitFrame. The return address is queried by
1414 // ExitFrame::GetStateForFramePointer.
1415 Label return_location;
1416 __ Adr(x12, &return_location);
1417 __ Poke(x12, 0);
1418 if (__ emit_debug_code()) {
1419 // Verify that the slot below fp[kSPOffset]-8 points to the return location
1420 // (currently in x12).
1421 Register temp = masm->Tmp1();
1422 __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
1423 __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSizeInBytes)));
1424 __ Cmp(temp, x12);
1425 __ Check(eq, kReturnAddressNotFoundInFrame);
1426 }
1427
1428 // Call the builtin.
1429 __ Blr(target);
1430 __ Bind(&return_location);
1431 const Register& result = x0;
1432
1433 if (always_allocate) {
1434 __ Mov(x10, Operand(scope_depth));
1435 __ Ldr(x11, MemOperand(x10));
1436 __ Sub(x11, x11, 1);
1437 __ Str(x11, MemOperand(x10));
1438 }
1439
1440 // x0 result The return code from the call.
1441 // x21 argv
1442 // x22 argc
1443 // x23 target
1444 //
1445 // If all of the result bits matching kFailureTagMask are '1', the result is
1446 // a failure. Otherwise, it's an ordinary tagged object and the call was a
1447 // success.
1448 Label failure;
1449 __ And(x10, result, kFailureTagMask);
1450 __ Cmp(x10, kFailureTagMask);
1451 __ B(&failure, eq);
1452
1453 // The call succeeded, so unwind the stack and return.
1454
1455 // Restore callee-saved registers x21-x23.
1456 __ Mov(x11, argc);
1457
1458 __ Peek(argv, 1 * kPointerSize);
1459 __ Peek(argc, 2 * kPointerSize);
1460 __ Peek(target, 3 * kPointerSize);
1461
1462 __ LeaveExitFrame(save_doubles_, x10, true);
1463 ASSERT(jssp.Is(__ StackPointer()));
1464 // Pop or drop the remaining stack slots and return from the stub.
1465 // jssp[24]: Arguments array (of size argc), including receiver.
1466 // jssp[16]: Preserved x23 (used for target).
1467 // jssp[8]: Preserved x22 (used for argc).
1468 // jssp[0]: Preserved x21 (used for argv).
1469 __ Drop(x11);
1470 __ Ret();
1471
1472 // The stack pointer is still csp if we aren't returning, and the frame
1473 // hasn't changed (except for the return address).
1474 __ SetStackPointer(csp);
1475
1476 __ Bind(&failure);
1477 // The call failed, so check if we need to throw an exception, and fall
1478 // through (to retry) otherwise.
1479
1480 Label retry;
1481 // x0 result The return code from the call, including the failure
1482 // code and details.
1483 // x21 argv
1484 // x22 argc
1485 // x23 target
1486 // Refer to the Failure class for details of the bit layout.
1487 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
1488 __ Tst(result, kFailureTypeTagMask << kFailureTagSize);
1489 __ B(eq, &retry); // RETRY_AFTER_GC
1490
1491 // Special handling of out-of-memory exceptions: Pass the failure result,
1492 // rather than the exception descriptor.
1493 JumpIfOOM(masm, result, x10, throw_out_of_memory);
1494
1495 // Retrieve the pending exception.
1496 const Register& exception = result;
1497 const Register& exception_address = x11;
1498 __ Mov(exception_address,
1499 Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1500 isolate)));
1501 __ Ldr(exception, MemOperand(exception_address));
1502
1503 // See if we just retrieved an OOM exception.
1504 JumpIfOOM(masm, exception, x10, throw_out_of_memory);
1505
1506 // Clear the pending exception.
1507 __ Mov(x10, Operand(isolate->factory()->the_hole_value()));
1508 __ Str(x10, MemOperand(exception_address));
1509
1510 // x0 exception The exception descriptor.
1511 // x21 argv
1512 // x22 argc
1513 // x23 target
1514
1515 // Special handling of termination exceptions, which are uncatchable by
1516 // JavaScript code.
1517 __ Cmp(exception, Operand(isolate->factory()->termination_exception()));
1518 __ B(eq, throw_termination);
1519
1520 // Handle normal exception.
1521 __ B(throw_normal);
1522
1523 __ Bind(&retry);
1524 // The result (x0) is passed through as the next PerformGC parameter.
1525 }
1526
1527
1528 void CEntryStub::Generate(MacroAssembler* masm) {
1529 // The Abort mechanism relies on CallRuntime, which in turn relies on
1530 // CEntryStub, so until this stub has been generated, we have to use a
1531 // fall-back Abort mechanism.
1532 //
1533 // Note that this stub must be generated before any use of Abort.
1534 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
1535
1536 ASM_LOCATION("CEntryStub::Generate entry");
1537 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1538
1539 // Register parameters:
1540 // x0: argc (including receiver, untagged)
1541 // x1: target
1542 //
1543 // The stack on entry holds the arguments and the receiver, with the receiver
1544 // at the highest address:
1545 //
1546 // jssp]argc-1]: receiver
1547 // jssp[argc-2]: arg[argc-2]
1548 // ... ...
1549 // jssp[1]: arg[1]
1550 // jssp[0]: arg[0]
1551 //
1552 // The arguments are in reverse order, so that arg[argc-2] is actually the
1553 // first argument to the target function and arg[0] is the last.
1554 ASSERT(jssp.Is(__ StackPointer()));
1555 const Register& argc_input = x0;
1556 const Register& target_input = x1;
1557
1558 // Calculate argv, argc and the target address, and store them in
1559 // callee-saved registers so we can retry the call without having to reload
1560 // these arguments.
1561 // TODO(jbramley): If the first call attempt succeeds in the common case (as
1562 // it should), then we might be better off putting these parameters directly
1563 // into their argument registers, rather than using callee-saved registers and
1564 // preserving them on the stack.
1565 const Register& argv = x21;
1566 const Register& argc = x22;
1567 const Register& target = x23;
1568
1569 // Derive argv from the stack pointer so that it points to the first argument
1570 // (arg[argc-2]), or just below the receiver in case there are no arguments.
1571 // - Adjust for the arg[] array.
1572 Register temp_argv = x11;
1573 __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
1574 // - Adjust for the receiver.
1575 __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
1576
1577 // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved
1578 // registers.
1579 FrameScope scope(masm, StackFrame::MANUAL);
1580 __ EnterExitFrame(save_doubles_, x10, 3);
1581 ASSERT(csp.Is(__ StackPointer()));
1582
1583 // Poke callee-saved registers into reserved space.
1584 __ Poke(argv, 1 * kPointerSize);
1585 __ Poke(argc, 2 * kPointerSize);
1586 __ Poke(target, 3 * kPointerSize);
1587
1588 // We normally only keep tagged values in callee-saved registers, as they
1589 // could be pushed onto the stack by called stubs and functions, and on the
1590 // stack they can confuse the GC. However, we're only calling C functions
1591 // which can push arbitrary data onto the stack anyway, and so the GC won't
1592 // examine that part of the stack.
1593 __ Mov(argc, argc_input);
1594 __ Mov(target, target_input);
1595 __ Mov(argv, temp_argv);
1596
1597 Label throw_normal;
1598 Label throw_termination;
1599 Label throw_out_of_memory;
1600
1601 // Call the runtime function.
1602 GenerateCore(masm,
1603 &throw_normal,
1604 &throw_termination,
1605 &throw_out_of_memory,
1606 false,
1607 false);
1608
1609 // If successful, the previous GenerateCore will have returned to the
1610 // calling code. Otherwise, we fall through into the following.
1611
1612 // Do space-specific GC and retry runtime call.
1613 GenerateCore(masm,
1614 &throw_normal,
1615 &throw_termination,
1616 &throw_out_of_memory,
1617 true,
1618 false);
1619
1620 // Do full GC and retry runtime call one final time.
1621 __ Mov(x0, reinterpret_cast<uint64_t>(Failure::InternalError()));
1622 GenerateCore(masm,
1623 &throw_normal,
1624 &throw_termination,
1625 &throw_out_of_memory,
1626 true,
1627 true);
1628
1629 // We didn't execute a return case, so the stack frame hasn't been updated
1630 // (except for the return address slot). However, we don't need to initialize
1631 // jssp because the throw method will immediately overwrite it when it
1632 // unwinds the stack.
1633 if (__ emit_debug_code()) {
1634 __ Mov(jssp, kDebugZapValue);
1635 }
1636 __ SetStackPointer(jssp);
1637
1638 // Throw exceptions.
1639 // If we throw an exception, we can end up re-entering CEntryStub before we
1640 // pop the exit frame, so need to ensure that x21-x23 contain GC-safe values
1641 // here.
1642 __ Bind(&throw_out_of_memory);
1643 ASM_LOCATION("Throw out of memory");
1644 __ Mov(argv, 0);
1645 __ Mov(argc, 0);
1646 __ Mov(target, 0);
1647 // Set external caught exception to false.
1648 Isolate* isolate = masm->isolate();
1649 __ Mov(x2, Operand(ExternalReference(Isolate::kExternalCaughtExceptionAddress,
1650 isolate)));
1651 __ Str(xzr, MemOperand(x2));
1652
1653 // Set pending exception and x0 to out of memory exception.
1654 Label already_have_failure;
1655 JumpIfOOM(masm, x0, x10, &already_have_failure);
1656 Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
1657 __ Mov(x0, Operand(reinterpret_cast<uint64_t>(out_of_memory)));
1658 __ Bind(&already_have_failure);
1659 __ Mov(x2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1660 isolate)));
1661 __ Str(x0, MemOperand(x2));
1662 // Fall through to the next label.
1663
1664 __ Bind(&throw_termination);
1665 ASM_LOCATION("Throw termination");
1666 __ Mov(argv, 0);
1667 __ Mov(argc, 0);
1668 __ Mov(target, 0);
1669 __ ThrowUncatchable(x0, x10, x11, x12, x13);
1670
1671 __ Bind(&throw_normal);
1672 ASM_LOCATION("Throw normal");
1673 __ Mov(argv, 0);
1674 __ Mov(argc, 0);
1675 __ Mov(target, 0);
1676 __ Throw(x0, x10, x11, x12, x13);
1677 }
1678
1679
1680 // This is the entry point from C++. 5 arguments are provided in x0-x4.
1681 // See use of the CALL_GENERATED_CODE macro for example in src/execution.cc.
1682 // Input:
1683 // x0: code entry.
1684 // x1: function.
1685 // x2: receiver.
1686 // x3: argc.
1687 // x4: argv.
1688 // Output:
1689 // x0: result.
1690 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
1691 ASSERT(jssp.Is(__ StackPointer()));
1692 Register code_entry = x0;
1693
1694 // Enable instruction instrumentation. This only works on the simulator, and
1695 // will have no effect on the model or real hardware.
1696 __ EnableInstrumentation();
1697
1698 Label invoke, handler_entry, exit;
1699
1700 // Push callee-saved registers and synchronize the system stack pointer (csp)
1701 // and the JavaScript stack pointer (jssp).
1702 //
1703 // We must not write to jssp until after the PushCalleeSavedRegisters()
1704 // call, since jssp is itself a callee-saved register.
1705 __ SetStackPointer(csp);
1706 __ PushCalleeSavedRegisters();
1707 __ Mov(jssp, csp);
1708 __ SetStackPointer(jssp);
1709
1710 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1711
1712 // Build an entry frame (see layout below).
1713 Isolate* isolate = masm->isolate();
1714
1715 // Build an entry frame.
1716 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
1717 int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
1718 __ Mov(x13, bad_frame_pointer);
1719 __ Mov(x12, Operand(Smi::FromInt(marker)));
1720 __ Mov(x11, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
1721 __ Ldr(x10, MemOperand(x11));
1722
1723 // TODO(all): Pushing the marker twice seems unnecessary.
1724 // In this case perhaps we could push xzr in the slot for the context
1725 // (see MAsm::EnterFrame).
1726 __ Push(x13, x12, x12, x10);
1727 // Set up fp.
1728 __ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
1729
1730 // Push the JS entry frame marker. Also set js_entry_sp if this is the
1731 // outermost JS call.
1732 Label non_outermost_js, done;
1733 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
1734 __ Mov(x10, Operand(ExternalReference(js_entry_sp)));
1735 __ Ldr(x11, MemOperand(x10));
1736 __ Cbnz(x11, &non_outermost_js);
1737 __ Str(fp, MemOperand(x10));
1738 __ Mov(x12, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1739 __ Push(x12);
1740 __ B(&done);
1741 __ Bind(&non_outermost_js);
1742 // We spare one instruction by pushing xzr since the marker is 0.
1743 ASSERT(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
1744 __ Push(xzr);
1745 __ Bind(&done);
1746
1747 // The frame set up looks like this:
1748 // jssp[0] : JS entry frame marker.
1749 // jssp[1] : C entry FP.
1750 // jssp[2] : stack frame marker.
1751 // jssp[3] : stack frmae marker.
1752 // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
1753
1754
1755 // Jump to a faked try block that does the invoke, with a faked catch
1756 // block that sets the pending exception.
1757 __ B(&invoke);
1758
1759 // Prevent the constant pool from being emitted between the record of the
1760 // handler_entry position and the first instruction of the sequence here.
1761 // There is no risk because Assembler::Emit() emits the instruction before
1762 // checking for constant pool emission, but we do not want to depend on
1763 // that.
1764 {
1765 Assembler::BlockConstPoolScope block_const_pool(masm);
1766 __ bind(&handler_entry);
1767 handler_offset_ = handler_entry.pos();
1768 // Caught exception: Store result (exception) in the pending exception
1769 // field in the JSEnv and return a failure sentinel. Coming in here the
1770 // fp will be invalid because the PushTryHandler below sets it to 0 to
1771 // signal the existence of the JSEntry frame.
1772 // TODO(jbramley): Do this in the Assembler.
1773 __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1774 isolate)));
1775 }
1776 __ Str(code_entry, MemOperand(x10));
1777 __ Mov(x0, Operand(reinterpret_cast<int64_t>(Failure::Exception())));
1778 __ B(&exit);
1779
1780 // Invoke: Link this frame into the handler chain. There's only one
1781 // handler block in this code object, so its index is 0.
1782 __ Bind(&invoke);
1783 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
1784 // If an exception not caught by another handler occurs, this handler
1785 // returns control to the code after the B(&invoke) above, which
1786 // restores all callee-saved registers (including cp and fp) to their
1787 // saved values before returning a failure to C.
1788
1789 // Clear any pending exceptions.
1790 __ Mov(x10, Operand(isolate->factory()->the_hole_value()));
1791 __ Mov(x11, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1792 isolate)));
1793 __ Str(x10, MemOperand(x11));
1794
1795 // Invoke the function by calling through the JS entry trampoline builtin.
1796 // Notice that we cannot store a reference to the trampoline code directly in
1797 // this stub, because runtime stubs are not traversed when doing GC.
1798
1799 // Expected registers by Builtins::JSEntryTrampoline
1800 // x0: code entry.
1801 // x1: function.
1802 // x2: receiver.
1803 // x3: argc.
1804 // x4: argv.
1805 // TODO(jbramley): The latest ARM code checks is_construct and conditionally
1806 // uses construct_entry. We probably need to do the same here.
1807 ExternalReference entry(is_construct ? Builtins::kJSConstructEntryTrampoline
1808 : Builtins::kJSEntryTrampoline,
1809 isolate);
1810 __ Mov(x10, Operand(entry));
1811
1812 // Call the JSEntryTrampoline.
1813 __ Ldr(x11, MemOperand(x10)); // Dereference the address.
1814 __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag);
1815 __ Blr(x12);
1816
1817 // Unlink this frame from the handler chain.
1818 __ PopTryHandler();
1819
1820
1821 __ Bind(&exit);
1822 // x0 holds the result.
1823 // The stack pointer points to the top of the entry frame pushed on entry from
1824 // C++ (at the beginning of this stub):
1825 // jssp[0] : JS entry frame marker.
1826 // jssp[1] : C entry FP.
1827 // jssp[2] : stack frame marker.
1828 // jssp[3] : stack frmae marker.
1829 // jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
1830
1831 // Check if the current stack frame is marked as the outermost JS frame.
1832 Label non_outermost_js_2;
1833 __ Pop(x10);
1834 __ Cmp(x10, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1835 __ B(ne, &non_outermost_js_2);
1836 __ Mov(x11, Operand(ExternalReference(js_entry_sp)));
1837 __ Str(xzr, MemOperand(x11));
1838 __ Bind(&non_outermost_js_2);
1839
1840 // Restore the top frame descriptors from the stack.
1841 __ Pop(x10);
1842 __ Mov(x11, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
1843 __ Str(x10, MemOperand(x11));
1844
1845 // Reset the stack to the callee saved registers.
1846 __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes);
1847 // Restore the callee-saved registers and return.
1848 ASSERT(jssp.Is(__ StackPointer()));
1849 __ Mov(csp, jssp);
1850 __ SetStackPointer(csp);
1851 __ PopCalleeSavedRegisters();
1852 // After this point, we must not modify jssp because it is a callee-saved
1853 // register which we have just restored.
1854 __ Ret();
1855 }
1856
1857
1858 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1859 Label miss;
1860 Register receiver;
1861 if (kind() == Code::KEYED_LOAD_IC) {
1862 // ----------- S t a t e -------------
1863 // -- lr : return address
1864 // -- x1 : receiver
1865 // -- x0 : key
1866 // -----------------------------------
1867 Register key = x0;
1868 receiver = x1;
1869 __ Cmp(key, Operand(masm->isolate()->factory()->prototype_string()));
1870 __ B(ne, &miss);
1871 } else {
1872 ASSERT(kind() == Code::LOAD_IC);
1873 // ----------- S t a t e -------------
1874 // -- lr : return address
1875 // -- x2 : name
1876 // -- x0 : receiver
1877 // -- sp[0] : receiver
1878 // -----------------------------------
1879 receiver = x0;
1880 }
1881
1882 StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10, x11, &miss);
1883
1884 __ Bind(&miss);
1885 StubCompiler::TailCallBuiltin(masm,
1886 BaseLoadStoreStubCompiler::MissBuiltin(kind()));
1887 }
1888
1889
1890 void StringLengthStub::Generate(MacroAssembler* masm) {
1891 Label miss;
1892 Register receiver;
1893 if (kind() == Code::KEYED_LOAD_IC) {
1894 // ----------- S t a t e -------------
1895 // -- lr : return address
1896 // -- x1 : receiver
1897 // -- x0 : key
1898 // -----------------------------------
1899 Register key = x0;
1900 receiver = x1;
1901 __ Cmp(key, Operand(masm->isolate()->factory()->length_string()));
1902 __ B(ne, &miss);
1903 } else {
1904 ASSERT(kind() == Code::LOAD_IC);
1905 // ----------- S t a t e -------------
1906 // -- lr : return address
1907 // -- x2 : name
1908 // -- x0 : receiver
1909 // -- sp[0] : receiver
1910 // -----------------------------------
1911 receiver = x0;
1912 }
1913
1914 StubCompiler::GenerateLoadStringLength(masm, receiver, x10, x11, &miss);
1915
1916 __ Bind(&miss);
1917 StubCompiler::TailCallBuiltin(masm,
1918 BaseLoadStoreStubCompiler::MissBuiltin(kind()));
1919 }
1920
1921
1922 void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
1923 ASM_LOCATION("StoreArrayLengthStub::Generate");
1924 // This accepts as a receiver anything JSArray::SetElementsLength accepts
1925 // (currently anything except for external arrays which means anything with
1926 // elements of FixedArray type). Value must be a number, but only smis are
1927 // accepted as the most common case.
1928 Label miss;
1929
1930 Register receiver;
1931 Register value;
1932 if (kind() == Code::KEYED_STORE_IC) {
1933 // ----------- S t a t e -------------
1934 // -- lr : return address
1935 // -- x2 : receiver
1936 // -- x1 : key
1937 // -- x0 : value
1938 // -----------------------------------
1939 Register key = x1;
1940 receiver = x2;
1941 value = x0;
1942 __ Cmp(key, Operand(masm->isolate()->factory()->length_string()));
1943 __ B(ne, &miss);
1944 } else {
1945 ASSERT(kind() == Code::STORE_IC);
1946 // ----------- S t a t e -------------
1947 // -- lr : return address
1948 // -- x2 : key
1949 // -- x1 : receiver
1950 // -- x0 : value
1951 // -----------------------------------
1952 receiver = x1;
1953 value = x0;
1954 }
1955
1956 // Check that the receiver isn't a smi.
1957 __ JumpIfSmi(receiver, &miss);
1958
1959 // Check that the object is a JS array.
1960 __ CompareObjectType(receiver, x10, x11, JS_ARRAY_TYPE);
1961 __ B(ne, &miss);
1962
1963 // Check that elements are FixedArray.
1964 // We rely on StoreIC_ArrayLength below to deal with all types of
1965 // fast elements (including COW).
1966 __ Ldr(x10, FieldMemOperand(receiver, JSArray::kElementsOffset));
1967 __ CompareObjectType(x10, x11, x12, FIXED_ARRAY_TYPE);
1968 __ B(ne, &miss);
1969
1970 // Check that the array has fast properties, otherwise the length
1971 // property might have been redefined.
1972 __ Ldr(x10, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
1973 __ Ldr(x10, FieldMemOperand(x10, FixedArray::kMapOffset));
1974 __ CompareRoot(x10, Heap::kHashTableMapRootIndex);
1975 __ B(eq, &miss);
1976
1977 // Check that value is a smi.
1978 __ JumpIfNotSmi(value, &miss);
1979
1980 // Prepare tail call to StoreIC_ArrayLength.
1981 __ Push(receiver, value);
1982
1983 ExternalReference ref =
1984 ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
1985 __ TailCallExternalReference(ref, 2, 1);
1986
1987 __ Bind(&miss);
1988 StubCompiler::TailCallBuiltin(masm,
1989 BaseLoadStoreStubCompiler::MissBuiltin(kind()));
1990 }
1991
1992
1993 void InstanceofStub::Generate(MacroAssembler* masm) {
1994 // Stack on entry:
1995 // jssp[0]: function.
1996 // jssp[8]: object.
1997 //
1998 // Returns result in x0. Zero indicates instanceof, smi 1 indicates not
1999 // instanceof.
2000
2001 Register result = x0;
2002 Register function = right();
2003 Register object = left();
2004 Register scratch1 = x6;
2005 Register scratch2 = x7;
2006 Register res_true = x8;
2007 Register res_false = x9;
2008 // Only used if there was an inline map check site. (See
2009 // LCodeGen::DoInstanceOfKnownGlobal().)
2010 Register map_check_site = x4;
2011 // Delta for the instructions generated between the inline map check and the
2012 // instruction setting the result.
2013 const int32_t kDeltaToLoadBoolResult = 4 * kInstructionSize;
2014
2015 Label not_js_object, slow;
2016
2017 if (!HasArgsInRegisters()) {
2018 __ Pop(function, object);
2019 }
2020
2021 if (ReturnTrueFalseObject()) {
2022 __ LoadTrueFalseRoots(res_true, res_false);
2023 } else {
2024 // This is counter-intuitive, but correct.
2025 __ Mov(res_true, Operand(Smi::FromInt(0)));
2026 __ Mov(res_false, Operand(Smi::FromInt(1)));
2027 }
2028
2029 // Check that the left hand side is a JS object and load its map as a side
2030 // effect.
2031 Register map = x12;
2032 __ JumpIfSmi(object, &not_js_object);
2033 __ IsObjectJSObjectType(object, map, scratch2, &not_js_object);
2034
2035 // If there is a call site cache, don't look in the global cache, but do the
2036 // real lookup and update the call site cache.
2037 if (!HasCallSiteInlineCheck()) {
2038 Label miss;
2039 __ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex, &miss);
2040 __ JumpIfNotRoot(map, Heap::kInstanceofCacheMapRootIndex, &miss);
2041 __ LoadRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
2042 __ Ret();
2043 __ Bind(&miss);
2044 }
2045
2046 // Get the prototype of the function.
2047 Register prototype = x13;
2048 __ TryGetFunctionPrototype(function, prototype, scratch2, &slow,
2049 MacroAssembler::kMissOnBoundFunction);
2050
2051 // Check that the function prototype is a JS object.
2052 __ JumpIfSmi(prototype, &slow);
2053 __ IsObjectJSObjectType(prototype, scratch1, scratch2, &slow);
2054
2055 // Update the global instanceof or call site inlined cache with the current
2056 // map and function. The cached answer will be set when it is known below.
2057 if (HasCallSiteInlineCheck()) {
2058 // Patch the (relocated) inlined map check.
2059 __ GetRelocatedValueLocation(map_check_site, scratch1);
2060 // We have a cell, so need another level of dereferencing.
2061 __ Ldr(scratch1, MemOperand(scratch1));
2062 __ Str(map, FieldMemOperand(scratch1, Cell::kValueOffset));
2063 } else {
2064 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
2065 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
2066 }
2067
2068 Label return_true, return_result;
2069 {
2070 // Loop through the prototype chain looking for the function prototype.
2071 Register chain_map = x1;
2072 Register chain_prototype = x14;
2073 Register null_value = x15;
2074 Label loop;
2075 __ Ldr(chain_prototype, FieldMemOperand(map, Map::kPrototypeOffset));
2076 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
2077 // Speculatively set a result.
2078 __ Mov(result, res_false);
2079
2080 __ Bind(&loop);
2081
2082 // If the chain prototype is the object prototype, return true.
2083 __ Cmp(chain_prototype, prototype);
2084 __ B(eq, &return_true);
2085
2086 // If the chain prototype is null, we've reached the end of the chain, so
2087 // return false.
2088 __ Cmp(chain_prototype, null_value);
2089 __ B(eq, &return_result);
2090
2091 // Otherwise, load the next prototype in the chain, and loop.
2092 __ Ldr(chain_map, FieldMemOperand(chain_prototype, HeapObject::kMapOffset));
2093 __ Ldr(chain_prototype, FieldMemOperand(chain_map, Map::kPrototypeOffset));
2094 __ B(&loop);
2095 }
2096
2097 // Return sequence when no arguments are on the stack.
2098 // We cannot fall through to here.
2099 __ Bind(&return_true);
2100 __ Mov(result, res_true);
2101 __ Bind(&return_result);
2102 if (HasCallSiteInlineCheck()) {
2103 ASSERT(ReturnTrueFalseObject());
2104 __ Add(map_check_site, map_check_site, kDeltaToLoadBoolResult);
2105 __ GetRelocatedValueLocation(map_check_site, scratch2);
2106 __ Str(result, MemOperand(scratch2));
2107 } else {
2108 __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
2109 }
2110 __ Ret();
2111
2112 Label object_not_null, object_not_null_or_smi;
2113
2114 __ Bind(&not_js_object);
2115 Register object_type = x14;
2116 // x0 result result return register (uninit)
2117 // x10 function pointer to function
2118 // x11 object pointer to object
2119 // x14 object_type type of object (uninit)
2120
2121 // Before null, smi and string checks, check that the rhs is a function.
2122 // For a non-function rhs, an exception must be thrown.
2123 __ JumpIfSmi(function, &slow);
2124 __ JumpIfNotObjectType(
2125 function, scratch1, object_type, JS_FUNCTION_TYPE, &slow);
2126
2127 __ Mov(result, res_false);
2128
2129 // Null is not instance of anything.
2130 __ Cmp(object_type, Operand(masm->isolate()->factory()->null_value()));
2131 __ B(ne, &object_not_null);
2132 __ Ret();
2133
2134 __ Bind(&object_not_null);
2135 // Smi values are not instances of anything.
2136 __ JumpIfNotSmi(object, &object_not_null_or_smi);
2137 __ Ret();
2138
2139 __ Bind(&object_not_null_or_smi);
2140 // String values are not instances of anything.
2141 __ IsObjectJSStringType(object, scratch2, &slow);
2142 __ Ret();
2143
2144 // Slow-case. Tail call builtin.
2145 __ Bind(&slow);
2146 {
2147 FrameScope scope(masm, StackFrame::INTERNAL);
2148 // Arguments have either been passed into registers or have been previously
2149 // popped. We need to push them before calling builtin.
2150 __ Push(object, function);
2151 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
2152 }
2153 if (ReturnTrueFalseObject()) {
2154 // Reload true/false because they were clobbered in the builtin call.
2155 __ LoadTrueFalseRoots(res_true, res_false);
2156 __ Cmp(result, 0);
2157 __ Csel(result, res_true, res_false, eq);
2158 }
2159 __ Ret();
2160 }
2161
2162
2163 Register InstanceofStub::left() {
2164 // Object to check (instanceof lhs).
2165 return x11;
2166 }
2167
2168
2169 Register InstanceofStub::right() {
2170 // Constructor function (instanceof rhs).
2171 return x10;
2172 }
2173
2174
2175 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
2176 Register arg_count = x0;
2177 Register key = x1;
2178
2179 // The displacement is the offset of the last parameter (if any) relative
2180 // to the frame pointer.
2181 static const int kDisplacement =
2182 StandardFrameConstants::kCallerSPOffset - kPointerSize;
2183
2184 // Check that the key is a smi.
2185 Label slow;
2186 __ JumpIfNotSmi(key, &slow);
2187
2188 // Check if the calling frame is an arguments adaptor frame.
2189 Register local_fp = x11;
2190 Register caller_fp = x11;
2191 Register caller_ctx = x12;
2192 Label skip_adaptor;
2193 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2194 __ Ldr(caller_ctx, MemOperand(caller_fp,
2195 StandardFrameConstants::kContextOffset));
2196 __ Cmp(caller_ctx, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2197 __ Csel(local_fp, fp, caller_fp, ne);
2198 __ B(ne, &skip_adaptor);
2199
2200 // Load the actual arguments limit found in the arguments adaptor frame.
2201 __ Ldr(arg_count, MemOperand(caller_fp,
2202 ArgumentsAdaptorFrameConstants::kLengthOffset));
2203 __ Bind(&skip_adaptor);
2204
2205 // Check index against formal parameters count limit. Use unsigned comparison
2206 // to get negative check for free: branch if key < 0 or key >= arg_count.
2207 __ Cmp(key, arg_count);
2208 __ B(hs, &slow);
2209
2210 // Read the argument from the stack and return it.
2211 __ Sub(x10, arg_count, key);
2212 __ Add(x10, local_fp, Operand::UntagSmiAndScale(x10, kPointerSizeLog2));
2213 __ Ldr(x0, MemOperand(x10, kDisplacement));
2214 __ Ret();
2215
2216 // Slow case: handle non-smi or out-of-bounds access to arguments by calling
2217 // the runtime system.
2218 __ Bind(&slow);
2219 __ Push(key);
2220 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
2221 }
2222
2223
2224 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
2225 // Stack layout on entry.
2226 // jssp[0]: number of parameters (tagged)
2227 // jssp[8]: address of receiver argument
2228 // jssp[16]: function
2229
2230 // Check if the calling frame is an arguments adaptor frame.
2231 Label runtime;
2232 Register caller_fp = x10;
2233 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2234 // Load and untag the context.
2235 STATIC_ASSERT((kSmiShift / kBitsPerByte) == 4);
2236 __ Ldr(w11, MemOperand(caller_fp, StandardFrameConstants::kContextOffset +
2237 (kSmiShift / kBitsPerByte)));
2238 __ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR);
2239 __ B(ne, &runtime);
2240
2241 // Patch the arguments.length and parameters pointer in the current frame.
2242 __ Ldr(x11, MemOperand(caller_fp,
2243 ArgumentsAdaptorFrameConstants::kLengthOffset));
2244 __ Poke(x11, 0 * kXRegSizeInBytes);
2245 __ Add(x10, caller_fp, Operand::UntagSmiAndScale(x11, kPointerSizeLog2));
2246 __ Add(x10, x10, Operand(StandardFrameConstants::kCallerSPOffset));
2247 __ Poke(x10, 1 * kXRegSizeInBytes);
2248
2249 __ Bind(&runtime);
2250 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
2251 }
2252
2253
2254 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
2255 // Stack layout on entry.
2256 // jssp[0]: number of parameters (tagged)
2257 // jssp[8]: address of receiver argument
2258 // jssp[16]: function
2259 //
2260 // Returns pointer to result object in x0.
2261
2262 // Note: arg_count_smi is an alias of param_count_smi.
2263 Register arg_count_smi = x3;
2264 Register param_count_smi = x3;
2265 Register param_count = x7;
2266 Register recv_arg = x14;
2267 Register function = x4;
2268 __ Pop(param_count_smi, recv_arg, function);
2269 __ SmiUntag(param_count, param_count_smi);
2270
2271 // Check if the calling frame is an arguments adaptor frame.
2272 Register caller_fp = x11;
2273 Register caller_ctx = x12;
2274 Label runtime;
2275 Label adaptor_frame, try_allocate;
2276 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2277 __ Ldr(caller_ctx, MemOperand(caller_fp,
2278 StandardFrameConstants::kContextOffset));
2279 __ Cmp(caller_ctx, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2280 __ B(eq, &adaptor_frame);
2281
2282 // No adaptor, parameter count = argument count.
2283
2284 // x1 mapped_params number of mapped params, min(params, args) (uninit)
2285 // x2 arg_count number of function arguments (uninit)
2286 // x3 arg_count_smi number of function arguments (smi)
2287 // x4 function function pointer
2288 // x7 param_count number of function parameters
2289 // x11 caller_fp caller's frame pointer
2290 // x14 recv_arg pointer to receiver arguments
2291
2292 Register arg_count = x2;
2293 __ Mov(arg_count, param_count);
2294 __ B(&try_allocate);
2295
2296 // We have an adaptor frame. Patch the parameters pointer.
2297 __ Bind(&adaptor_frame);
2298 __ Ldr(arg_count_smi,
2299 MemOperand(caller_fp,
2300 ArgumentsAdaptorFrameConstants::kLengthOffset));
2301 __ SmiUntag(arg_count, arg_count_smi);
2302 __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2));
2303 __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
2304
2305 // Compute the mapped parameter count = min(param_count, arg_count)
2306 Register mapped_params = x1;
2307 __ Cmp(param_count, arg_count);
2308 __ Csel(mapped_params, param_count, arg_count, lt);
2309
2310 __ Bind(&try_allocate);
2311
2312 // x0 alloc_obj pointer to allocated objects: param map, backing
2313 // store, arguments (uninit)
2314 // x1 mapped_params number of mapped parameters, min(params, args)
2315 // x2 arg_count number of function arguments
2316 // x3 arg_count_smi number of function arguments (smi)
2317 // x4 function function pointer
2318 // x7 param_count number of function parameters
2319 // x10 size size of objects to allocate (uninit)
2320 // x14 recv_arg pointer to receiver arguments
2321
2322 // Compute the size of backing store, parameter map, and arguments object.
2323 // 1. Parameter map, has two extra words containing context and backing
2324 // store.
2325 const int kParameterMapHeaderSize =
2326 FixedArray::kHeaderSize + 2 * kPointerSize;
2327
2328 // Calculate the parameter map size, assuming it exists.
2329 Register size = x10;
2330 __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2));
2331 __ Add(size, size, kParameterMapHeaderSize);
2332
2333 // If there are no mapped parameters, set the running size total to zero.
2334 // Otherwise, use the parameter map size calculated earlier.
2335 __ Cmp(mapped_params, 0);
2336 __ CzeroX(size, eq);
2337
2338 // 2. Add the size of the backing store and arguments object.
2339 __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2));
2340 __ Add(size, size, FixedArray::kHeaderSize + Heap::kArgumentsObjectSize);
2341
2342 // Do the allocation of all three objects in one go. Assign this to x0, as it
2343 // will be returned to the caller.
2344 Register alloc_obj = x0;
2345 __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT);
2346
2347 // Get the arguments boilerplate from the current (global) context.
2348
2349 // x0 alloc_obj pointer to allocated objects (param map, backing
2350 // store, arguments)
2351 // x1 mapped_params number of mapped parameters, min(params, args)
2352 // x2 arg_count number of function arguments
2353 // x3 arg_count_smi number of function arguments (smi)
2354 // x4 function function pointer
2355 // x7 param_count number of function parameters
2356 // x11 args_offset offset to args (or aliased args) boilerplate (uninit)
2357 // x14 recv_arg pointer to receiver arguments
2358
2359 Register global_object = x10;
2360 Register global_ctx = x10;
2361 Register args_offset = x11;
2362 Register aliased_args_offset = x10;
2363 __ Ldr(global_object, GlobalObjectMemOperand());
2364 __ Ldr(global_ctx, FieldMemOperand(global_object,
2365 GlobalObject::kNativeContextOffset));
2366
2367 __ Ldr(args_offset, ContextMemOperand(global_ctx,
2368 Context::ARGUMENTS_BOILERPLATE_INDEX));
2369 __ Ldr(aliased_args_offset,
2370 ContextMemOperand(global_ctx,
2371 Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX));
2372 __ Cmp(mapped_params, 0);
2373 __ CmovX(args_offset, aliased_args_offset, ne);
2374
2375 // Copy the JS object part.
2376 __ CopyFields(alloc_obj, args_offset, CPURegList(x10, x12, x13),
2377 JSObject::kHeaderSize / kPointerSize);
2378
2379 // Set up the callee in-object property.
2380 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
2381 const int kCalleeOffset = JSObject::kHeaderSize +
2382 Heap::kArgumentsCalleeIndex * kPointerSize;
2383 __ Str(function, FieldMemOperand(alloc_obj, kCalleeOffset));
2384
2385 // Use the length and set that as an in-object property.
2386 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2387 const int kLengthOffset = JSObject::kHeaderSize +
2388 Heap::kArgumentsLengthIndex * kPointerSize;
2389 __ Str(arg_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
2390
2391 // Set up the elements pointer in the allocated arguments object.
2392 // If we allocated a parameter map, "elements" will point there, otherwise
2393 // it will point to the backing store.
2394
2395 // x0 alloc_obj pointer to allocated objects (param map, backing
2396 // store, arguments)
2397 // x1 mapped_params number of mapped parameters, min(params, args)
2398 // x2 arg_count number of function arguments
2399 // x3 arg_count_smi number of function arguments (smi)
2400 // x4 function function pointer
2401 // x5 elements pointer to parameter map or backing store (uninit)
2402 // x6 backing_store pointer to backing store (uninit)
2403 // x7 param_count number of function parameters
2404 // x14 recv_arg pointer to receiver arguments
2405
2406 Register elements = x5;
2407 __ Add(elements, alloc_obj, Heap::kArgumentsObjectSize);
2408 __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
2409
2410 // Initialize parameter map. If there are no mapped arguments, we're done.
2411 Label skip_parameter_map;
2412 __ Cmp(mapped_params, 0);
2413 // Set up backing store address, because it is needed later for filling in
2414 // the unmapped arguments.
2415 Register backing_store = x6;
2416 __ CmovX(backing_store, elements, eq);
2417 __ B(eq, &skip_parameter_map);
2418
2419 __ LoadRoot(x10, Heap::kNonStrictArgumentsElementsMapRootIndex);
2420 __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
2421 __ Add(x10, mapped_params, 2);
2422 __ SmiTag(x10);
2423 __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
2424 __ Str(cp, FieldMemOperand(elements,
2425 FixedArray::kHeaderSize + 0 * kPointerSize));
2426 __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2));
2427 __ Add(x10, x10, kParameterMapHeaderSize);
2428 __ Str(x10, FieldMemOperand(elements,
2429 FixedArray::kHeaderSize + 1 * kPointerSize));
2430
2431 // Copy the parameter slots and the holes in the arguments.
2432 // We need to fill in mapped_parameter_count slots. Then index the context,
2433 // where parameters are stored in reverse order, at:
2434 //
2435 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1
2436 //
2437 // The mapped parameter thus needs to get indices:
2438 //
2439 // MIN_CONTEXT_SLOTS + parameter_count - 1 ..
2440 // MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count
2441 //
2442 // We loop from right to left.
2443
2444 // x0 alloc_obj pointer to allocated objects (param map, backing
2445 // store, arguments)
2446 // x1 mapped_params number of mapped parameters, min(params, args)
2447 // x2 arg_count number of function arguments
2448 // x3 arg_count_smi number of function arguments (smi)
2449 // x4 function function pointer
2450 // x5 elements pointer to parameter map or backing store (uninit)
2451 // x6 backing_store pointer to backing store (uninit)
2452 // x7 param_count number of function parameters
2453 // x11 loop_count parameter loop counter (uninit)
2454 // x12 index parameter index (smi, uninit)
2455 // x13 the_hole hole value (uninit)
2456 // x14 recv_arg pointer to receiver arguments
2457
2458 Register loop_count = x11;
2459 Register index = x12;
2460 Register the_hole = x13;
2461 Label parameters_loop, parameters_test;
2462 __ Mov(loop_count, mapped_params);
2463 __ Add(index, param_count, Context::MIN_CONTEXT_SLOTS);
2464 __ Sub(index, index, mapped_params);
2465 __ SmiTag(index);
2466 __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
2467 __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2));
2468 __ Add(backing_store, backing_store, kParameterMapHeaderSize);
2469
2470 __ B(&parameters_test);
2471
2472 __ Bind(&parameters_loop);
2473 __ Sub(loop_count, loop_count, 1);
2474 __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2));
2475 __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag);
2476 __ Str(index, MemOperand(elements, x10));
2477 __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize);
2478 __ Str(the_hole, MemOperand(backing_store, x10));
2479 __ Add(index, index, Operand(Smi::FromInt(1)));
2480 __ Bind(&parameters_test);
2481 __ Cbnz(loop_count, &parameters_loop);
2482
2483 __ Bind(&skip_parameter_map);
2484 // Copy arguments header and remaining slots (if there are any.)
2485 __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
2486 __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset));
2487 __ Str(arg_count_smi, FieldMemOperand(backing_store,
2488 FixedArray::kLengthOffset));
2489
2490 // x0 alloc_obj pointer to allocated objects (param map, backing
2491 // store, arguments)
2492 // x1 mapped_params number of mapped parameters, min(params, args)
2493 // x2 arg_count number of function arguments
2494 // x4 function function pointer
2495 // x3 arg_count_smi number of function arguments (smi)
2496 // x6 backing_store pointer to backing store (uninit)
2497 // x14 recv_arg pointer to receiver arguments
2498
2499 Label arguments_loop, arguments_test;
2500 __ Mov(x10, mapped_params);
2501 __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2));
2502 __ B(&arguments_test);
2503
2504 __ Bind(&arguments_loop);
2505 __ Sub(recv_arg, recv_arg, kPointerSize);
2506 __ Ldr(x11, MemOperand(recv_arg));
2507 __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2));
2508 __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
2509 __ Add(x10, x10, 1);
2510
2511 __ Bind(&arguments_test);
2512 __ Cmp(x10, arg_count);
2513 __ B(lt, &arguments_loop);
2514
2515 __ Ret();
2516
2517 // Do the runtime call to allocate the arguments object.
2518 __ Bind(&runtime);
2519 __ Push(function, recv_arg, arg_count_smi);
2520 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
2521 }
2522
2523
2524 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
2525 // Stack layout on entry.
2526 // jssp[0]: number of parameters (tagged)
2527 // jssp[8]: address of receiver argument
2528 // jssp[16]: function
2529 //
2530 // Returns pointer to result object in x0.
2531
2532 // Get the stub arguments from the frame, and make an untagged copy of the
2533 // parameter count.
2534 Register param_count_smi = x1;
2535 Register params = x2;
2536 Register function = x3;
2537 Register param_count = x13;
2538 __ Pop(param_count_smi, params, function);
2539 __ SmiUntag(param_count, param_count_smi);
2540
2541 // Test if arguments adaptor needed.
2542 Register caller_fp = x11;
2543 Register caller_ctx = x12;
2544 Label try_allocate, runtime;
2545 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2546 __ Ldr(caller_ctx, MemOperand(caller_fp,
2547 StandardFrameConstants::kContextOffset));
2548 __ Cmp(caller_ctx, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2549 __ B(ne, &try_allocate);
2550
2551 // x1 param_count_smi number of parameters passed to function (smi)
2552 // x2 params pointer to parameters
2553 // x3 function function pointer
2554 // x11 caller_fp caller's frame pointer
2555 // x13 param_count number of parameters passed to function
2556
2557 // Patch the argument length and parameters pointer.
2558 __ Ldr(param_count_smi,
2559 MemOperand(caller_fp,
2560 ArgumentsAdaptorFrameConstants::kLengthOffset));
2561 __ SmiUntag(param_count, param_count_smi);
2562 __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
2563 __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
2564
2565 // Try the new space allocation. Start out with computing the size of the
2566 // arguments object and the elements array in words.
2567 Register size = x10;
2568 __ Bind(&try_allocate);
2569 __ Add(size, param_count, FixedArray::kHeaderSize / kPointerSize);
2570 __ Cmp(param_count, 0);
2571 __ CzeroX(size, eq);
2572 __ Add(size, size, Heap::kArgumentsObjectSizeStrict / kPointerSize);
2573
2574 // Do the allocation of both objects in one go. Assign this to x0, as it will
2575 // be returned to the caller.
2576 Register alloc_obj = x0;
2577 __ Allocate(size, alloc_obj, x11, x12, &runtime,
2578 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
2579
2580 // Get the arguments boilerplate from the current (native) context.
2581 Register global_object = x10;
2582 Register global_ctx = x10;
2583 Register args_offset = x4;
2584 __ Ldr(global_object, GlobalObjectMemOperand());
2585 __ Ldr(global_ctx, FieldMemOperand(global_object,
2586 GlobalObject::kNativeContextOffset));
2587 __ Ldr(args_offset,
2588 ContextMemOperand(global_ctx,
2589 Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX));
2590
2591 // x0 alloc_obj pointer to allocated objects: parameter array and
2592 // arguments object
2593 // x1 param_count_smi number of parameters passed to function (smi)
2594 // x2 params pointer to parameters
2595 // x3 function function pointer
2596 // x4 args_offset offset to arguments boilerplate
2597 // x13 param_count number of parameters passed to function
2598
2599 // Copy the JS object part.
2600 __ CopyFields(alloc_obj, args_offset, CPURegList(x5, x6, x7),
2601 JSObject::kHeaderSize / kPointerSize);
2602
2603 // Set the smi-tagged length as an in-object property.
2604 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2605 const int kLengthOffset = JSObject::kHeaderSize +
2606 Heap::kArgumentsLengthIndex * kPointerSize;
2607 __ Str(param_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
2608
2609 // If there are no actual arguments, we're done.
2610 Label done;
2611 __ Cbz(param_count, &done);
2612
2613 // Set up the elements pointer in the allocated arguments object and
2614 // initialize the header in the elements fixed array.
2615 Register elements = x5;
2616 __ Add(elements, alloc_obj, Heap::kArgumentsObjectSizeStrict);
2617 __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
2618 __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
2619 __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
2620 __ Str(param_count_smi, FieldMemOperand(elements, FixedArray::kLengthOffset));
2621
2622 // x0 alloc_obj pointer to allocated objects: parameter array and
2623 // arguments object
2624 // x1 param_count_smi number of parameters passed to function (smi)
2625 // x2 params pointer to parameters
2626 // x3 function function pointer
2627 // x4 array pointer to array slot (uninit)
2628 // x5 elements pointer to elements array of alloc_obj
2629 // x13 param_count number of parameters passed to function
2630
2631 // Copy the fixed array slots.
2632 Label loop;
2633 Register array = x4;
2634 // Set up pointer to first array slot.
2635 __ Add(array, elements, FixedArray::kHeaderSize - kHeapObjectTag);
2636
2637 __ Bind(&loop);
2638 // Pre-decrement the parameters pointer by kPointerSize on each iteration.
2639 // Pre-decrement in order to skip receiver.
2640 __ Ldr(x10, MemOperand(params, -kPointerSize, PreIndex));
2641 // Post-increment elements by kPointerSize on each iteration.
2642 __ Str(x10, MemOperand(array, kPointerSize, PostIndex));
2643 __ Sub(param_count, param_count, 1);
2644 __ Cbnz(param_count, &loop);
2645
2646 // Return from stub.
2647 __ Bind(&done);
2648 __ Ret();
2649
2650 // Do the runtime call to allocate the arguments object.
2651 __ Bind(&runtime);
2652 __ Push(function, params, param_count_smi);
2653 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
2654 }
2655
2656
2657 void RegExpExecStub::Generate(MacroAssembler* masm) {
2658 #ifdef V8_INTERPRETED_REGEXP
2659 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
2660 #else // V8_INTERPRETED_REGEXP
2661
2662 // Stack frame on entry.
2663 // jssp[0]: last_match_info (expected JSArray)
2664 // jssp[8]: previous index
2665 // jssp[16]: subject string
2666 // jssp[24]: JSRegExp object
2667 Label runtime;
2668
2669 // Use of registers for this function.
2670
2671 // Variable registers:
2672 // x10-x13 used as scratch registers
2673 // w0 string_type type of subject string
2674 // x2 jsstring_length subject string length
2675 // x3 jsregexp_object JSRegExp object
2676 // w4 string_encoding ASCII or UC16
2677 // w5 sliced_string_offset if the string is a SlicedString
2678 // offset to the underlying string
2679 // w6 string_representation groups attributes of the string:
2680 // - is a string
2681 // - type of the string
2682 // - is a short external string
2683 Register string_type = w0;
2684 Register jsstring_length = x2;
2685 Register jsregexp_object = x3;
2686 Register string_encoding = w4;
2687 Register sliced_string_offset = w5;
2688 Register string_representation = w6;
2689
2690 // These are in callee save registers and will be preserved by the call
2691 // to the native RegExp code, as this code is called using the normal
2692 // C calling convention. When calling directly from generated code the
2693 // native RegExp code will not do a GC and therefore the content of
2694 // these registers are safe to use after the call.
2695
2696 // x19 subject subject string
2697 // x20 regexp_data RegExp data (FixedArray)
2698 // x21 last_match_info_elements info relative to the last match
2699 // (FixedArray)
2700 // x22 code_object generated regexp code
2701 Register subject = x19;
2702 Register regexp_data = x20;
2703 Register last_match_info_elements = x21;
2704 Register code_object = x22;
2705
2706 // TODO(jbramley): Is it necessary to preserve these? I don't think ARM does.
2707 CPURegList used_callee_saved_registers(subject,
2708 regexp_data,
2709 last_match_info_elements,
2710 code_object);
2711 __ PushCPURegList(used_callee_saved_registers);
2712
2713 // Stack frame.
2714 // jssp[0] : x19
2715 // jssp[8] : x20
2716 // jssp[16]: x21
2717 // jssp[24]: x22
2718 // jssp[32]: last_match_info (JSArray)
2719 // jssp[40]: previous index
2720 // jssp[48]: subject string
2721 // jssp[56]: JSRegExp object
2722
2723 const int kLastMatchInfoOffset = 4 * kPointerSize;
2724 const int kPreviousIndexOffset = 5 * kPointerSize;
2725 const int kSubjectOffset = 6 * kPointerSize;
2726 const int kJSRegExpOffset = 7 * kPointerSize;
2727
2728 // Ensure that a RegExp stack is allocated.
2729 Isolate* isolate = masm->isolate();
2730 ExternalReference address_of_regexp_stack_memory_address =
2731 ExternalReference::address_of_regexp_stack_memory_address(isolate);
2732 ExternalReference address_of_regexp_stack_memory_size =
2733 ExternalReference::address_of_regexp_stack_memory_size(isolate);
2734 __ Mov(x10, Operand(address_of_regexp_stack_memory_size));
2735 __ Ldr(x10, MemOperand(x10));
2736 __ Cbz(x10, &runtime);
2737
2738 // Check that the first argument is a JSRegExp object.
2739 ASSERT(jssp.Is(__ StackPointer()));
2740 __ Peek(jsregexp_object, kJSRegExpOffset);
2741 __ JumpIfSmi(jsregexp_object, &runtime);
2742 __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime);
2743
2744 // Check that the RegExp has been compiled (data contains a fixed array).
2745 __ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset));
2746 if (FLAG_debug_code) {
2747 STATIC_ASSERT(kSmiTag == 0);
2748 __ Tst(regexp_data, kSmiTagMask);
2749 __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
2750 __ CompareObjectType(regexp_data, x10, x10, FIXED_ARRAY_TYPE);
2751 __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
2752 }
2753
2754 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2755 __ Ldr(x10, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2756 __ Cmp(x10, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
2757 __ B(ne, &runtime);
2758
2759 // Check that the number of captures fit in the static offsets vector buffer.
2760 // We have always at least one capture for the whole match, plus additional
2761 // ones due to capturing parentheses. A capture takes 2 registers.
2762 // The number of capture registers then is (number_of_captures + 1) * 2.
2763 __ Ldrsw(x10,
2764 UntagSmiFieldMemOperand(regexp_data,
2765 JSRegExp::kIrregexpCaptureCountOffset));
2766 // Check (number_of_captures + 1) * 2 <= offsets vector size
2767 // number_of_captures * 2 <= offsets vector size - 2
2768 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2769 __ Add(x10, x10, x10);
2770 __ Cmp(x10, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
2771 __ B(hi, &runtime);
2772
2773 // Initialize offset for possibly sliced string.
2774 __ Mov(sliced_string_offset, 0);
2775
2776 ASSERT(jssp.Is(__ StackPointer()));
2777 __ Peek(subject, kSubjectOffset);
2778 __ JumpIfSmi(subject, &runtime);
2779
2780 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
2781 __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
2782
2783 __ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset));
2784
2785 // Handle subject string according to its encoding and representation:
2786 // (1) Sequential string? If yes, go to (5).
2787 // (2) Anything but sequential or cons? If yes, go to (6).
2788 // (3) Cons string. If the string is flat, replace subject with first string.
2789 // Otherwise bailout.
2790 // (4) Is subject external? If yes, go to (7).
2791 // (5) Sequential string. Load regexp code according to encoding.
2792 // (E) Carry on.
2793 /// [...]
2794
2795 // Deferred code at the end of the stub:
2796 // (6) Not a long external string? If yes, go to (8).
2797 // (7) External string. Make it, offset-wise, look like a sequential string.
2798 // Go to (5).
2799 // (8) Short external string or not a string? If yes, bail out to runtime.
2800 // (9) Sliced string. Replace subject with parent. Go to (4).
2801
2802 Label check_underlying; // (4)
2803 Label seq_string; // (5)
2804 Label not_seq_nor_cons; // (6)
2805 Label external_string; // (7)
2806 Label not_long_external; // (8)
2807
2808 // (1) Sequential string? If yes, go to (5).
2809 __ And(string_representation,
2810 string_type,
2811 kIsNotStringMask |
2812 kStringRepresentationMask |
2813 kShortExternalStringMask);
2814 // We depend on the fact that Strings of type
2815 // SeqString and not ShortExternalString are defined
2816 // by the following pattern:
2817 // string_type: 0XX0 XX00
2818 // ^ ^ ^^
2819 // | | ||
2820 // | | is a SeqString
2821 // | is not a short external String
2822 // is a String
2823 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
2824 STATIC_ASSERT(kShortExternalStringTag != 0);
2825 __ Cbz(string_representation, &seq_string); // Go to (5).
2826
2827 // (2) Anything but sequential or cons? If yes, go to (6).
2828 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2829 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2830 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2831 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2832 __ Cmp(string_representation, kExternalStringTag);
2833 __ B(ge, &not_seq_nor_cons); // Go to (6).
2834
2835 // (3) Cons string. Check that it's flat.
2836 __ Ldr(x10, FieldMemOperand(subject, ConsString::kSecondOffset));
2837 __ JumpIfNotRoot(x10, Heap::kempty_stringRootIndex, &runtime);
2838 // Replace subject with first string.
2839 __ Ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2840
2841 // (4) Is subject external? If yes, go to (7).
2842 __ Bind(&check_underlying);
2843 // Reload the string type.
2844 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
2845 __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
2846 STATIC_ASSERT(kSeqStringTag == 0);
2847 // The underlying external string is never a short external string.
2848 STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
2849 STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2850 __ TestAndBranchIfAnySet(string_type.X(),
2851 kStringRepresentationMask,
2852 &external_string); // Go to (7).
2853
2854 // (5) Sequential string. Load regexp code according to encoding.
2855 __ Bind(&seq_string);
2856
2857 // Check that the third argument is a positive smi less than the subject
2858 // string length. A negative value will be greater (unsigned comparison).
2859 ASSERT(jssp.Is(__ StackPointer()));
2860 __ Peek(x10, kPreviousIndexOffset);
2861 __ JumpIfNotSmi(x10, &runtime);
2862 __ Cmp(jsstring_length, x10);
2863 __ B(ls, &runtime);
2864
2865 // Argument 2 (x1): We need to load argument 2 (the previous index) into x1
2866 // before entering the exit frame.
2867 __ SmiUntag(x1, x10);
2868
2869 // The third bit determines the string encoding in string_type.
2870 STATIC_ASSERT(kOneByteStringTag == 0x04);
2871 STATIC_ASSERT(kTwoByteStringTag == 0x00);
2872 STATIC_ASSERT(kStringEncodingMask == 0x04);
2873
2874 // Find the code object based on the assumptions above.
2875 // kDataAsciiCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
2876 // of kPointerSize to reach the latter.
2877 ASSERT_EQ(JSRegExp::kDataAsciiCodeOffset + kPointerSize,
2878 JSRegExp::kDataUC16CodeOffset);
2879 __ Mov(x10, kPointerSize);
2880 // We will need the encoding later: ASCII = 0x04
2881 // UC16 = 0x00
2882 __ Ands(string_encoding, string_type, kStringEncodingMask);
2883 __ CzeroX(x10, ne);
2884 __ Add(x10, regexp_data, x10);
2885 __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataAsciiCodeOffset));
2886
2887 // (E) Carry on. String handling is done.
2888
2889 // Check that the irregexp code has been generated for the actual string
2890 // encoding. If it has, the field contains a code object otherwise it contains
2891 // a smi (code flushing support).
2892 __ JumpIfSmi(code_object, &runtime);
2893
2894 // All checks done. Now push arguments for native regexp code.
2895 __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1,
2896 x10,
2897 x11);
2898
2899 // Isolates: note we add an additional parameter here (isolate pointer).
2900 __ EnterExitFrame(false, x10, 1);
2901 ASSERT(csp.Is(__ StackPointer()));
2902
2903 // We have 9 arguments to pass to the regexp code, therefore we have to pass
2904 // one on the stack and the rest as registers.
2905
2906 // Note that the placement of the argument on the stack isn't standard
2907 // AAPCS64:
2908 // csp[0]: Space for the return address placed by DirectCEntryStub.
2909 // csp[8]: Argument 9, the current isolate address.
2910
2911 __ Mov(x10, Operand(ExternalReference::isolate_address(isolate)));
2912 __ Poke(x10, kPointerSize);
2913
2914 Register length = w11;
2915 Register previous_index_in_bytes = w12;
2916 Register start = x13;
2917
2918 // Load start of the subject string.
2919 __ Add(start, subject, SeqString::kHeaderSize - kHeapObjectTag);
2920 // Load the length from the original subject string from the previous stack
2921 // frame. Therefore we have to use fp, which points exactly to two pointer
2922 // sizes below the previous sp. (Because creating a new stack frame pushes
2923 // the previous fp onto the stack and decrements sp by 2 * kPointerSize.)
2924 __ Ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2925 __ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
2926
2927 // Handle UC16 encoding, two bytes make one character.
2928 // string_encoding: if ASCII: 0x04
2929 // if UC16: 0x00
2930 STATIC_ASSERT(kStringEncodingMask == 0x04);
2931 __ Ubfx(string_encoding, string_encoding, 2, 1);
2932 __ Eor(string_encoding, string_encoding, 1);
2933 // string_encoding: if ASCII: 0
2934 // if UC16: 1
2935
2936 // Convert string positions from characters to bytes.
2937 // Previous index is in x1.
2938 __ Lsl(previous_index_in_bytes, w1, string_encoding);
2939 __ Lsl(length, length, string_encoding);
2940 __ Lsl(sliced_string_offset, sliced_string_offset, string_encoding);
2941
2942 // Argument 1 (x0): Subject string.
2943 __ Mov(x0, subject);
2944
2945 // Argument 2 (x1): Previous index, already there.
2946
2947 // Argument 3 (x2): Get the start of input.
2948 // Start of input = start of string + previous index + substring offset
2949 // (0 if the string
2950 // is not sliced).
2951 __ Add(w10, previous_index_in_bytes, sliced_string_offset);
2952 __ Add(x2, start, Operand(w10, UXTW));
2953
2954 // Argument 4 (x3):
2955 // End of input = start of input + (length of input - previous index)
2956 __ Sub(w10, length, previous_index_in_bytes);
2957 __ Add(x3, x2, Operand(w10, UXTW));
2958
2959 // Argument 5 (x4): static offsets vector buffer.
2960 __ Mov(x4,
2961 Operand(ExternalReference::address_of_static_offsets_vector(isolate)));
2962
2963 // Argument 6 (x5): Set the number of capture registers to zero to force
2964 // global regexps to behave as non-global. This stub is not used for global
2965 // regexps.
2966 __ Mov(x5, 0);
2967
2968 // Argument 7 (x6): Start (high end) of backtracking stack memory area.
2969 __ Mov(x10, Operand(address_of_regexp_stack_memory_address));
2970 __ Ldr(x10, MemOperand(x10));
2971 __ Mov(x11, Operand(address_of_regexp_stack_memory_size));
2972 __ Ldr(x11, MemOperand(x11));
2973 __ Add(x6, x10, x11);
2974
2975 // Argument 8 (x7): Indicate that this is a direct call from JavaScript.
2976 __ Mov(x7, 1);
2977
2978 // Locate the code entry and call it.
2979 __ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag);
2980 DirectCEntryStub stub;
2981 stub.GenerateCall(masm, code_object);
2982
2983 __ LeaveExitFrame(false, x10, true);
2984
2985 // The generated regexp code returns an int32 in w0.
2986 Label failure, exception;
2987 __ CompareAndBranch(w0, NativeRegExpMacroAssembler::FAILURE, eq, &failure);
2988 __ CompareAndBranch(w0,
2989 NativeRegExpMacroAssembler::EXCEPTION,
2990 eq,
2991 &exception);
2992 __ CompareAndBranch(w0, NativeRegExpMacroAssembler::RETRY, eq, &runtime);
2993
2994 // Success: process the result from the native regexp code.
2995 Register number_of_capture_registers = x12;
2996
2997 // Calculate number of capture registers (number_of_captures + 1) * 2
2998 // and store it in the last match info.
2999 __ Ldrsw(x10,
3000 UntagSmiFieldMemOperand(regexp_data,
3001 JSRegExp::kIrregexpCaptureCountOffset));
3002 __ Add(x10, x10, x10);
3003 __ Add(number_of_capture_registers, x10, 2);
3004
3005 // Check that the fourth object is a JSArray object.
3006 ASSERT(jssp.Is(__ StackPointer()));
3007 __ Peek(x10, kLastMatchInfoOffset);
3008 __ JumpIfSmi(x10, &runtime);
3009 __ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime);
3010
3011 // Check that the JSArray is the fast case.
3012 __ Ldr(last_match_info_elements,
3013 FieldMemOperand(x10, JSArray::kElementsOffset));
3014 __ Ldr(x10,
3015 FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
3016 __ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime);
3017
3018 // Check that the last match info has space for the capture registers and the
3019 // additional information (overhead).
3020 // (number_of_captures + 1) * 2 + overhead <= last match info size
3021 // (number_of_captures * 2) + 2 + overhead <= last match info size
3022 // number_of_capture_registers + overhead <= last match info size
3023 __ Ldrsw(x10,
3024 UntagSmiFieldMemOperand(last_match_info_elements,
3025 FixedArray::kLengthOffset));
3026 __ Add(x11, number_of_capture_registers, RegExpImpl::kLastMatchOverhead);
3027 __ Cmp(x11, x10);
3028 __ B(gt, &runtime);
3029
3030 // Store the capture count.
3031 __ SmiTag(x10, number_of_capture_registers);
3032 __ Str(x10,
3033 FieldMemOperand(last_match_info_elements,
3034 RegExpImpl::kLastCaptureCountOffset));
3035 // Store last subject and last input.
3036 __ Str(subject,
3037 FieldMemOperand(last_match_info_elements,
3038 RegExpImpl::kLastSubjectOffset));
3039 // Use x10 as the subject string in order to only need
3040 // one RecordWriteStub.
3041 __ Mov(x10, subject);
3042 __ RecordWriteField(last_match_info_elements,
3043 RegExpImpl::kLastSubjectOffset,
3044 x10,
3045 x11,
3046 kLRHasNotBeenSaved,
3047 kDontSaveFPRegs);
3048 __ Str(subject,
3049 FieldMemOperand(last_match_info_elements,
3050 RegExpImpl::kLastInputOffset));
3051 __ Mov(x10, subject);
3052 __ RecordWriteField(last_match_info_elements,
3053 RegExpImpl::kLastInputOffset,
3054 x10,
3055 x11,
3056 kLRHasNotBeenSaved,
3057 kDontSaveFPRegs);
3058
3059 Register last_match_offsets = x13;
3060 Register offsets_vector_index = x14;
3061 Register current_offset = x15;
3062
3063 // Get the static offsets vector filled by the native regexp code
3064 // and fill the last match info.
3065 ExternalReference address_of_static_offsets_vector =
3066 ExternalReference::address_of_static_offsets_vector(isolate);
3067 __ Mov(offsets_vector_index, Operand(address_of_static_offsets_vector));
3068
3069 Label next_capture, done;
3070 // Capture register counter starts from number of capture registers and
3071 // iterates down to zero (inclusive).
3072 __ Add(last_match_offsets,
3073 last_match_info_elements,
3074 RegExpImpl::kFirstCaptureOffset - kHeapObjectTag);
3075 __ Bind(&next_capture);
3076 __ Subs(number_of_capture_registers, number_of_capture_registers, 2);
3077 __ B(mi, &done);
3078 // Read two 32 bit values from the static offsets vector buffer into
3079 // an X register
3080 __ Ldr(current_offset,
3081 MemOperand(offsets_vector_index, kWRegSizeInBytes * 2, PostIndex));
3082 // Store the smi values in the last match info.
3083 __ SmiTag(x10, current_offset);
3084 // Clearing the 32 bottom bits gives us a Smi.
3085 STATIC_ASSERT(kSmiShift == 32);
3086 __ And(x11, current_offset, ~kWRegMask);
3087 __ Stp(x10,
3088 x11,
3089 MemOperand(last_match_offsets, kXRegSizeInBytes * 2, PostIndex));
3090 __ B(&next_capture);
3091 __ Bind(&done);
3092
3093 // Return last match info.
3094 __ Peek(x0, kLastMatchInfoOffset);
3095 __ PopCPURegList(used_callee_saved_registers);
3096 // Drop the 4 arguments of the stub from the stack.
3097 __ Drop(4);
3098 __ Ret();
3099
3100 __ Bind(&exception);
3101 Register exception_value = x0;
3102 // A stack overflow (on the backtrack stack) may have occured
3103 // in the RegExp code but no exception has been created yet.
3104 // If there is no pending exception, handle that in the runtime system.
3105 __ Mov(x10, Operand(isolate->factory()->the_hole_value()));
3106 __ Mov(x11,
3107 Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3108 isolate)));
3109 __ Ldr(exception_value, MemOperand(x11));
3110 __ Cmp(x10, exception_value);
3111 __ B(eq, &runtime);
3112
3113 __ Str(x10, MemOperand(x11)); // Clear pending exception.
3114
3115 // Check if the exception is a termination. If so, throw as uncatchable.
3116 Label termination_exception;
3117 __ JumpIfRoot(exception_value,
3118 Heap::kTerminationExceptionRootIndex,
3119 &termination_exception);
3120
3121 __ Throw(exception_value, x10, x11, x12, x13);
3122
3123 __ Bind(&termination_exception);
3124 __ ThrowUncatchable(exception_value, x10, x11, x12, x13);
3125
3126 __ Bind(&failure);
3127 __ Mov(x0, Operand(masm->isolate()->factory()->null_value()));
3128 __ PopCPURegList(used_callee_saved_registers);
3129 // Drop the 4 arguments of the stub from the stack.
3130 __ Drop(4);
3131 __ Ret();
3132
3133 __ Bind(&runtime);
3134 __ PopCPURegList(used_callee_saved_registers);
3135 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
3136
3137 // Deferred code for string handling.
3138 // (6) Not a long external string? If yes, go to (8).
3139 __ Bind(&not_seq_nor_cons);
3140 // Compare flags are still set.
3141 __ B(ne, &not_long_external); // Go to (8).
3142
3143 // (7) External string. Make it, offset-wise, look like a sequential string.
3144 __ Bind(&external_string);
3145 if (masm->emit_debug_code()) {
3146 // Assert that we do not have a cons or slice (indirect strings) here.
3147 // Sequential strings have already been ruled out.
3148 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
3149 __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
3150 __ Tst(x10, kIsIndirectStringMask);
3151 __ Check(eq, kExternalStringExpectedButNotFound);
3152 __ And(x10, x10, kStringRepresentationMask);
3153 __ Cmp(x10, 0);
3154 __ Check(ne, kExternalStringExpectedButNotFound);
3155 }
3156 __ Ldr(subject,
3157 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
3158 // Move the pointer so that offset-wise, it looks like a sequential string.
3159 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3160 __ Sub(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
3161 __ B(&seq_string); // Go to (5).
3162
3163 // (8) If this is a short external string or not a string, bail out to
3164 // runtime.
3165 __ Bind(&not_long_external);
3166 STATIC_ASSERT(kShortExternalStringTag != 0);
3167 __ TestAndBranchIfAnySet(string_representation,
3168 kShortExternalStringMask | kIsNotStringMask,
3169 &runtime);
3170
3171 // (9) Sliced string. Replace subject with parent.
3172 __ Ldr(sliced_string_offset,
3173 UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset));
3174 __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
3175 __ B(&check_underlying); // Go to (4).
3176 #endif
3177 }
3178
3179
3180 // TODO(jbramley): Don't use static registers here, but take them as arguments.
3181 static void GenerateRecordCallTarget(MacroAssembler* masm) {
3182 ASM_LOCATION("GenerateRecordCallTarget");
3183 // Cache the called function in a feedback vector slot. Cache states are
3184 // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
3185 // x0 : number of arguments to the construct function
3186 // x1 : the function to call
3187 // x2 : feedback vector
3188 // x3 : slot in feedback vector (smi)
3189 Label initialize, done, miss, megamorphic, not_array_function;
3190
3191 ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
3192 masm->isolate()->heap()->undefined_value());
3193 ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
3194 masm->isolate()->heap()->the_hole_value());
3195
3196 // Load the cache state.
3197 __ Add(x4, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
3198 __ Ldr(x4, FieldMemOperand(x4, FixedArray::kHeaderSize));
3199
3200 // A monomorphic cache hit or an already megamorphic state: invoke the
3201 // function without changing the state.
3202 __ Cmp(x4, x1);
3203 __ B(eq, &done);
3204
3205 // If we came here, we need to see if we are the array function.
3206 // If we didn't have a matching function, and we didn't find the megamorph
3207 // sentinel, then we have in the slot either some other function or an
3208 // AllocationSite. Do a map check on the object in ecx.
3209 __ Ldr(x5, FieldMemOperand(x4, AllocationSite::kMapOffset));
3210 __ JumpIfNotRoot(x5, Heap::kAllocationSiteMapRootIndex, &miss);
3211
3212 // Make sure the function is the Array() function
3213 __ LoadArrayFunction(x4);
3214 __ Cmp(x1, x4);
3215 __ B(ne, &megamorphic);
3216 __ B(&done);
3217
3218 __ Bind(&miss);
3219
3220 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
3221 // megamorphic.
3222 __ JumpIfRoot(x4, Heap::kTheHoleValueRootIndex, &initialize);
3223 // MegamorphicSentinel is an immortal immovable object (undefined) so no
3224 // write-barrier is needed.
3225 __ Bind(&megamorphic);
3226 __ Add(x4, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
3227 __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
3228 __ Str(x10, FieldMemOperand(x4, FixedArray::kHeaderSize));
3229 __ B(&done);
3230
3231 // An uninitialized cache is patched with the function or sentinel to
3232 // indicate the ElementsKind if function is the Array constructor.
3233 __ Bind(&initialize);
3234 // Make sure the function is the Array() function
3235 __ LoadArrayFunction(x4);
3236 __ Cmp(x1, x4);
3237 __ B(ne, &not_array_function);
3238
3239 // The target function is the Array constructor,
3240 // Create an AllocationSite if we don't already have it, store it in the slot.
3241 {
3242 FrameScope scope(masm, StackFrame::INTERNAL);
3243 CreateAllocationSiteStub create_stub;
3244
3245 // Arguments register must be smi-tagged to call out.
3246 __ SmiTag(x0);
3247 __ Push(x0, x1, x2, x3);
3248
3249 __ CallStub(&create_stub);
3250
3251 __ Pop(x3, x2, x1, x0);
3252 __ SmiUntag(x0);
3253 }
3254 __ B(&done);
3255
3256 __ Bind(&not_array_function);
3257 // An uninitialized cache is patched with the function.
3258
3259 __ Add(x4, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
3260 // TODO(all): Does the value need to be left in x4? If not, FieldMemOperand
3261 // could be used to avoid this add.
3262 __ Add(x4, x4, FixedArray::kHeaderSize - kHeapObjectTag);
3263 __ Str(x1, MemOperand(x4, 0));
3264
3265 __ Push(x4, x2, x1);
3266 __ RecordWrite(x2, x4, x1, kLRHasNotBeenSaved, kDontSaveFPRegs,
3267 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
3268 __ Pop(x1, x2, x4);
3269
3270 // TODO(all): Are x4, x2 and x1 outputs? This isn't clear.
3271
3272 __ Bind(&done);
3273 }
3274
3275
3276 void CallFunctionStub::Generate(MacroAssembler* masm) {
3277 ASM_LOCATION("CallFunctionStub::Generate");
3278 // x1 function the function to call
3279 // x2 : feedback vector
3280 // x3 : slot in feedback vector (smi) (if x2 is not undefined)
3281 Register function = x1;
3282 Register cache_cell = x2;
3283 Register slot = x3;
3284 Register type = x4;
3285 Label slow, non_function, wrap, cont;
3286
3287 // TODO(jbramley): This function has a lot of unnamed registers. Name them,
3288 // and tidy things up a bit.
3289
3290 if (NeedsChecks()) {
3291 // Check that the function is really a JavaScript function.
3292 __ JumpIfSmi(function, &non_function);
3293
3294 // Goto slow case if we do not have a function.
3295 __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
3296
3297 if (RecordCallTarget()) {
3298 GenerateRecordCallTarget(masm);
3299 }
3300 }
3301
3302 // Fast-case: Invoke the function now.
3303 // x1 function pushed function
3304 ParameterCount actual(argc_);
3305
3306 if (CallAsMethod()) {
3307 if (NeedsChecks()) {
3308 // Do not transform the receiver for strict mode functions.
3309 __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
3310 __ Ldr(w4, FieldMemOperand(x3, SharedFunctionInfo::kCompilerHintsOffset));
3311 __ Tbnz(w4, SharedFunctionInfo::kStrictModeFunction, &cont);
3312
3313 // Do not transform the receiver for native (Compilerhints already in x3).
3314 __ Tbnz(w4, SharedFunctionInfo::kNative, &cont);
3315 }
3316
3317 // Compute the receiver in non-strict mode.
3318 __ Peek(x3, argc_ * kPointerSize);
3319
3320 if (NeedsChecks()) {
3321 __ JumpIfSmi(x3, &wrap);
3322 __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt);
3323 } else {
3324 __ B(&wrap);
3325 }
3326
3327 __ Bind(&cont);
3328 }
3329 __ InvokeFunction(function,
3330 actual,
3331 JUMP_FUNCTION,
3332 NullCallWrapper());
3333
3334 if (NeedsChecks()) {
3335 // Slow-case: Non-function called.
3336 __ Bind(&slow);
3337 if (RecordCallTarget()) {
3338 // If there is a call target cache, mark it megamorphic in the
3339 // non-function case. MegamorphicSentinel is an immortal immovable object
3340 // (undefined) so no write barrier is needed.
3341 ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
3342 masm->isolate()->heap()->undefined_value());
3343 __ Add(x12, cache_cell, Operand::UntagSmiAndScale(slot,
3344 kPointerSizeLog2));
3345 __ LoadRoot(x11, Heap::kUndefinedValueRootIndex);
3346 __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
3347 }
3348 // Check for function proxy.
3349 // x10 : function type.
3350 __ CompareAndBranch(type, JS_FUNCTION_PROXY_TYPE, ne, &non_function);
3351 __ Push(function); // put proxy as additional argument
3352 __ Mov(x0, argc_ + 1);
3353 __ Mov(x2, 0);
3354 __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
3355 {
3356 Handle<Code> adaptor =
3357 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
3358 __ Jump(adaptor, RelocInfo::CODE_TARGET);
3359 }
3360
3361 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
3362 // of the original receiver from the call site).
3363 __ Bind(&non_function);
3364 __ Poke(function, argc_ * kXRegSizeInBytes);
3365 __ Mov(x0, argc_); // Set up the number of arguments.
3366 __ Mov(x2, 0);
3367 __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION);
3368 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3369 RelocInfo::CODE_TARGET);
3370 }
3371
3372 if (CallAsMethod()) {
3373 __ Bind(&wrap);
3374 // Wrap the receiver and patch it back onto the stack.
3375 { FrameScope frame_scope(masm, StackFrame::INTERNAL);
3376 __ Push(x1, x3);
3377 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
3378 __ Pop(x1);
3379 }
3380 __ Poke(x0, argc_ * kPointerSize);
3381 __ B(&cont);
3382 }
3383 }
3384
3385
3386 void CallConstructStub::Generate(MacroAssembler* masm) {
3387 ASM_LOCATION("CallConstructStub::Generate");
3388 // x0 : number of arguments
3389 // x1 : the function to call
3390 // x2 : feedback vector
3391 // x3 : slot in feedback vector (smi) (if r2 is not undefined)
3392 Register function = x1;
3393 Label slow, non_function_call;
3394
3395 // Check that the function is not a smi.
3396 __ JumpIfSmi(function, &non_function_call);
3397 // Check that the function is a JSFunction.
3398 Register object_type = x10;
3399 __ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
3400 &slow);
3401
3402 if (RecordCallTarget()) {
3403 GenerateRecordCallTarget(masm);
3404 }
3405
3406 // Jump to the function-specific construct stub.
3407 Register jump_reg = x4;
3408 Register shared_func_info = jump_reg;
3409 Register cons_stub = jump_reg;
3410 Register cons_stub_code = jump_reg;
3411 __ Ldr(shared_func_info,
3412 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3413 __ Ldr(cons_stub,
3414 FieldMemOperand(shared_func_info,
3415 SharedFunctionInfo::kConstructStubOffset));
3416 __ Add(cons_stub_code, cons_stub, Code::kHeaderSize - kHeapObjectTag);
3417 __ Br(cons_stub_code);
3418
3419 Label do_call;
3420 __ Bind(&slow);
3421 __ Cmp(object_type, JS_FUNCTION_PROXY_TYPE);
3422 __ B(ne, &non_function_call);
3423 __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
3424 __ B(&do_call);
3425
3426 __ Bind(&non_function_call);
3427 __ GetBuiltinFunction(x1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
3428
3429 __ Bind(&do_call);
3430 // Set expected number of arguments to zero (not changing x0).
3431 __ Mov(x2, 0);
3432 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3433 RelocInfo::CODE_TARGET);
3434 }
3435
3436
3437 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
3438 // If the receiver is a smi trigger the non-string case.
3439 __ JumpIfSmi(object_, receiver_not_string_);
3440
3441 // Fetch the instance type of the receiver into result register.
3442 __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3443 __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3444
3445 // If the receiver is not a string trigger the non-string case.
3446 __ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_);
3447
3448 // If the index is non-smi trigger the non-smi case.
3449 __ JumpIfNotSmi(index_, &index_not_smi_);
3450
3451 __ Bind(&got_smi_index_);
3452 // Check for index out of range.
3453 __ Ldrsw(result_, UntagSmiFieldMemOperand(object_, String::kLengthOffset));
3454 __ Cmp(result_, Operand::UntagSmi(index_));
3455 __ B(ls, index_out_of_range_);
3456
3457 __ SmiUntag(index_);
3458
3459 StringCharLoadGenerator::Generate(masm,
3460 object_,
3461 index_,
3462 result_,
3463 &call_runtime_);
3464 __ SmiTag(result_);
3465 __ Bind(&exit_);
3466 }
3467
3468
3469 void StringCharCodeAtGenerator::GenerateSlow(
3470 MacroAssembler* masm,
3471 const RuntimeCallHelper& call_helper) {
3472 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
3473
3474 __ Bind(&index_not_smi_);
3475 // If index is a heap number, try converting it to an integer.
3476 __ CheckMap(index_,
3477 result_,
3478 Heap::kHeapNumberMapRootIndex,
3479 index_not_number_,
3480 DONT_DO_SMI_CHECK);
3481 call_helper.BeforeCall(masm);
3482 // Save object_ on the stack and pass index_ as argument for runtime call.
3483 __ Push(object_, index_);
3484 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3485 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3486 } else {
3487 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3488 // NumberToSmi discards numbers that are not exact integers.
3489 __ CallRuntime(Runtime::kNumberToSmi, 1);
3490 }
3491 // Save the conversion result before the pop instructions below
3492 // have a chance to overwrite it.
3493 __ Mov(index_, x0);
3494 __ Pop(object_);
3495 // Reload the instance type.
3496 __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3497 __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3498 call_helper.AfterCall(masm);
3499
3500 // If index is still not a smi, it must be out of range.
3501 __ JumpIfNotSmi(index_, index_out_of_range_);
3502 // Otherwise, return to the fast path.
3503 __ B(&got_smi_index_);
3504
3505 // Call runtime. We get here when the receiver is a string and the
3506 // index is a number, but the code of getting the actual character
3507 // is too complex (e.g., when the string needs to be flattened).
3508 __ Bind(&call_runtime_);
3509 call_helper.BeforeCall(masm);
3510 __ SmiTag(index_);
3511 __ Push(object_, index_);
3512 __ CallRuntime(Runtime::kStringCharCodeAt, 2);
3513 __ Mov(result_, x0);
3514 call_helper.AfterCall(masm);
3515 __ B(&exit_);
3516
3517 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3518 }
3519
3520
3521 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3522 __ JumpIfNotSmi(code_, &slow_case_);
3523 __ Cmp(code_, Operand(Smi::FromInt(String::kMaxOneByteCharCode)));
3524 __ B(hi, &slow_case_);
3525
3526 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3527 // At this point code register contains smi tagged ASCII char code.
3528 STATIC_ASSERT(kSmiShift > kPointerSizeLog2);
3529 __ Add(result_, result_, Operand(code_, LSR, kSmiShift - kPointerSizeLog2));
3530 __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3531 __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
3532 __ Bind(&exit_);
3533 }
3534
3535
3536 void StringCharFromCodeGenerator::GenerateSlow(
3537 MacroAssembler* masm,
3538 const RuntimeCallHelper& call_helper) {
3539 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3540
3541 __ Bind(&slow_case_);
3542 call_helper.BeforeCall(masm);
3543 __ Push(code_);
3544 __ CallRuntime(Runtime::kCharFromCode, 1);
3545 __ Mov(result_, x0);
3546 call_helper.AfterCall(masm);
3547 __ B(&exit_);
3548
3549 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3550 }
3551
3552
3553 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
3554 // Inputs are in x0 (lhs) and x1 (rhs).
3555 ASSERT(state_ == CompareIC::SMI);
3556 ASM_LOCATION("ICCompareStub[Smis]");
3557 Label miss;
3558 // Bail out (to 'miss') unless both x0 and x1 are smis.
3559 __ JumpIfEitherNotSmi(x0, x1, &miss);
3560
3561 // TODO(jbramley): Why do we only set the flags for EQ?
3562 if (GetCondition() == eq) {
3563 // For equality we do not care about the sign of the result.
3564 __ Subs(x0, x0, x1);
3565 } else {
3566 // Untag before subtracting to avoid handling overflow.
3567 __ SmiUntag(x1);
3568 __ Sub(x0, x1, Operand::UntagSmi(x0));
3569 }
3570 __ Ret();
3571
3572 __ Bind(&miss);
3573 GenerateMiss(masm);
3574 }
3575
3576
3577 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
3578 ASSERT(state_ == CompareIC::NUMBER);
3579 ASM_LOCATION("ICCompareStub[HeapNumbers]");
3580
3581 Label unordered, maybe_undefined1, maybe_undefined2;
3582 Label miss, handle_lhs, values_in_d_regs;
3583 Label untag_rhs, untag_lhs;
3584
3585 Register result = x0;
3586 Register rhs = x0;
3587 Register lhs = x1;
3588 FPRegister rhs_d = d0;
3589 FPRegister lhs_d = d1;
3590
3591 if (left_ == CompareIC::SMI) {
3592 __ JumpIfNotSmi(lhs, &miss);
3593 }
3594 if (right_ == CompareIC::SMI) {
3595 __ JumpIfNotSmi(rhs, &miss);
3596 }
3597
3598 __ SmiUntagToDouble(rhs_d, rhs, kSpeculativeUntag);
3599 __ SmiUntagToDouble(lhs_d, lhs, kSpeculativeUntag);
3600
3601 // Load rhs if it's a heap number.
3602 __ JumpIfSmi(rhs, &handle_lhs);
3603 __ CheckMap(rhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
3604 DONT_DO_SMI_CHECK);
3605 __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
3606
3607 // Load lhs if it's a heap number.
3608 __ Bind(&handle_lhs);
3609 __ JumpIfSmi(lhs, &values_in_d_regs);
3610 __ CheckMap(lhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
3611 DONT_DO_SMI_CHECK);
3612 __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
3613
3614 __ Bind(&values_in_d_regs);
3615 __ Fcmp(lhs_d, rhs_d);
3616 __ B(vs, &unordered); // Overflow flag set if either is NaN.
3617 STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
3618 __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
3619 __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0.
3620 __ Ret();
3621
3622 __ Bind(&unordered);
3623 ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
3624 CompareIC::GENERIC);
3625 __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
3626
3627 __ Bind(&maybe_undefined1);
3628 if (Token::IsOrderedRelationalCompareOp(op_)) {
3629 __ JumpIfNotRoot(rhs, Heap::kUndefinedValueRootIndex, &miss);
3630 __ JumpIfSmi(lhs, &unordered);
3631 __ JumpIfNotObjectType(lhs, x10, x10, HEAP_NUMBER_TYPE, &maybe_undefined2);
3632 __ B(&unordered);
3633 }
3634
3635 __ Bind(&maybe_undefined2);
3636 if (Token::IsOrderedRelationalCompareOp(op_)) {
3637 __ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered);
3638 }
3639
3640 __ Bind(&miss);
3641 GenerateMiss(masm);
3642 }
3643
3644
3645 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3646 ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
3647 ASM_LOCATION("ICCompareStub[InternalizedStrings]");
3648 Label miss;
3649
3650 Register result = x0;
3651 Register rhs = x0;
3652 Register lhs = x1;
3653
3654 // Check that both operands are heap objects.
3655 __ JumpIfEitherSmi(lhs, rhs, &miss);
3656
3657 // Check that both operands are internalized strings.
3658 Register rhs_map = x10;
3659 Register lhs_map = x11;
3660 Register rhs_type = x10;
3661 Register lhs_type = x11;
3662 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
3663 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
3664 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
3665 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
3666
3667 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
3668 __ Orr(x12, lhs_type, rhs_type);
3669 __ TestAndBranchIfAnySet(
3670 x12, kIsNotStringMask | kIsNotInternalizedMask, &miss);
3671
3672 // Internalized strings are compared by identity.
3673 STATIC_ASSERT(EQUAL == 0);
3674 __ Cmp(lhs, rhs);
3675 __ Cset(result, ne);
3676 __ Ret();
3677
3678 __ Bind(&miss);
3679 GenerateMiss(masm);
3680 }
3681
3682
3683 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
3684 ASSERT(state_ == CompareIC::UNIQUE_NAME);
3685 ASM_LOCATION("ICCompareStub[UniqueNames]");
3686 ASSERT(GetCondition() == eq);
3687 Label miss;
3688
3689 Register result = x0;
3690 Register rhs = x0;
3691 Register lhs = x1;
3692
3693 Register lhs_instance_type = w2;
3694 Register rhs_instance_type = w3;
3695
3696 // Check that both operands are heap objects.
3697 __ JumpIfEitherSmi(lhs, rhs, &miss);
3698
3699 // Check that both operands are unique names. This leaves the instance
3700 // types loaded in tmp1 and tmp2.
3701 __ Ldr(x10, FieldMemOperand(lhs, HeapObject::kMapOffset));
3702 __ Ldr(x11, FieldMemOperand(rhs, HeapObject::kMapOffset));
3703 __ Ldrb(lhs_instance_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
3704 __ Ldrb(rhs_instance_type, FieldMemOperand(x11, Map::kInstanceTypeOffset));
3705
3706 // To avoid a miss, each instance type should be either SYMBOL_TYPE or it
3707 // should have kInternalizedTag set.
3708 __ JumpIfNotUniqueName(lhs_instance_type, &miss);
3709 __ JumpIfNotUniqueName(rhs_instance_type, &miss);
3710
3711 // Unique names are compared by identity.
3712 STATIC_ASSERT(EQUAL == 0);
3713 __ Cmp(lhs, rhs);
3714 __ Cset(result, ne);
3715 __ Ret();
3716
3717 __ Bind(&miss);
3718 GenerateMiss(masm);
3719 }
3720
3721
3722 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
3723 ASSERT(state_ == CompareIC::STRING);
3724 ASM_LOCATION("ICCompareStub[Strings]");
3725
3726 Label miss;
3727
3728 bool equality = Token::IsEqualityOp(op_);
3729
3730 Register result = x0;
3731 Register rhs = x0;
3732 Register lhs = x1;
3733
3734 // Check that both operands are heap objects.
3735 __ JumpIfEitherSmi(rhs, lhs, &miss);
3736
3737 // Check that both operands are strings.
3738 Register rhs_map = x10;
3739 Register lhs_map = x11;
3740 Register rhs_type = x10;
3741 Register lhs_type = x11;
3742 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
3743 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
3744 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
3745 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
3746 STATIC_ASSERT(kNotStringTag != 0);
3747 __ Orr(x12, lhs_type, rhs_type);
3748 __ Tbnz(x12, MaskToBit(kIsNotStringMask), &miss);
3749
3750 // Fast check for identical strings.
3751 Label not_equal;
3752 __ Cmp(lhs, rhs);
3753 __ B(ne, &not_equal);
3754 __ Mov(result, EQUAL);
3755 __ Ret();
3756
3757 __ Bind(&not_equal);
3758 // Handle not identical strings
3759
3760 // Check that both strings are internalized strings. If they are, we're done
3761 // because we already know they are not identical. We know they are both
3762 // strings.
3763 if (equality) {
3764 ASSERT(GetCondition() == eq);
3765 STATIC_ASSERT(kInternalizedTag == 0);
3766 Label not_internalized_strings;
3767 __ Orr(x12, lhs_type, rhs_type);
3768 __ TestAndBranchIfAnySet(
3769 x12, kIsNotInternalizedMask, &not_internalized_strings);
3770 // Result is in rhs (x0), and not EQUAL, as rhs is not a smi.
3771 __ Ret();
3772 __ Bind(&not_internalized_strings);
3773 }
3774
3775 // Check that both strings are sequential ASCII.
3776 Label runtime;
3777 __ JumpIfBothInstanceTypesAreNotSequentialAscii(
3778 lhs_type, rhs_type, x12, x13, &runtime);
3779
3780 // Compare flat ASCII strings. Returns when done.
3781 if (equality) {
3782 StringCompareStub::GenerateFlatAsciiStringEquals(
3783 masm, lhs, rhs, x10, x11, x12);
3784 } else {
3785 StringCompareStub::GenerateCompareFlatAsciiStrings(
3786 masm, lhs, rhs, x10, x11, x12, x13);
3787 }
3788
3789 // Handle more complex cases in runtime.
3790 __ Bind(&runtime);
3791 __ Push(lhs, rhs);
3792 if (equality) {
3793 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
3794 } else {
3795 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3796 }
3797
3798 __ Bind(&miss);
3799 GenerateMiss(masm);
3800 }
3801
3802
3803 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
3804 ASSERT(state_ == CompareIC::OBJECT);
3805 ASM_LOCATION("ICCompareStub[Objects]");
3806
3807 Label miss;
3808
3809 Register result = x0;
3810 Register rhs = x0;
3811 Register lhs = x1;
3812
3813 __ JumpIfEitherSmi(rhs, lhs, &miss);
3814
3815 __ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss);
3816 __ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss);
3817
3818 ASSERT(GetCondition() == eq);
3819 __ Sub(result, rhs, lhs);
3820 __ Ret();
3821
3822 __ Bind(&miss);
3823 GenerateMiss(masm);
3824 }
3825
3826
3827 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
3828 ASM_LOCATION("ICCompareStub[KnownObjects]");
3829
3830 Label miss;
3831
3832 Register result = x0;
3833 Register rhs = x0;
3834 Register lhs = x1;
3835
3836 __ JumpIfEitherSmi(rhs, lhs, &miss);
3837
3838 Register rhs_map = x10;
3839 Register lhs_map = x11;
3840 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
3841 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
3842 __ Cmp(rhs_map, Operand(known_map_));
3843 __ B(ne, &miss);
3844 __ Cmp(lhs_map, Operand(known_map_));
3845 __ B(ne, &miss);
3846
3847 __ Sub(result, rhs, lhs);
3848 __ Ret();
3849
3850 __ Bind(&miss);
3851 GenerateMiss(masm);
3852 }
3853
3854
3855 // This method handles the case where a compare stub had the wrong
3856 // implementation. It calls a miss handler, which re-writes the stub. All other
3857 // ICCompareStub::Generate* methods should fall back into this one if their
3858 // operands were not the expected types.
3859 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
3860 ASM_LOCATION("ICCompareStub[Miss]");
3861
3862 Register stub_entry = x11;
3863 {
3864 ExternalReference miss =
3865 ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
3866
3867 FrameScope scope(masm, StackFrame::INTERNAL);
3868 Register op = x10;
3869 Register left = x1;
3870 Register right = x0;
3871 // Preserve some caller-saved registers.
3872 __ Push(x1, x0, lr);
3873 // Push the arguments.
3874 __ Mov(op, Operand(Smi::FromInt(op_)));
3875 __ Push(left, right, op);
3876
3877 // Call the miss handler. This also pops the arguments.
3878 __ CallExternalReference(miss, 3);
3879
3880 // Compute the entry point of the rewritten stub.
3881 __ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag);
3882 // Restore caller-saved registers.
3883 __ Pop(lr, x0, x1);
3884 }
3885
3886 // Tail-call to the new stub.
3887 __ Jump(stub_entry);
3888 }
3889
3890
3891 void StringHelper::GenerateHashInit(MacroAssembler* masm,
3892 Register hash,
3893 Register character) {
3894 ASSERT(!AreAliased(hash, character));
3895
3896 // hash = character + (character << 10);
3897 __ LoadRoot(hash, Heap::kHashSeedRootIndex);
3898 // Untag smi seed and add the character.
3899 __ Add(hash, character, Operand(hash, LSR, kSmiShift));
3900
3901 // Compute hashes modulo 2^32 using a 32-bit W register.
3902 Register hash_w = hash.W();
3903
3904 // hash += hash << 10;
3905 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
3906 // hash ^= hash >> 6;
3907 __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
3908 }
3909
3910
3911 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
3912 Register hash,
3913 Register character) {
3914 ASSERT(!AreAliased(hash, character));
3915
3916 // hash += character;
3917 __ Add(hash, hash, character);
3918
3919 // Compute hashes modulo 2^32 using a 32-bit W register.
3920 Register hash_w = hash.W();
3921
3922 // hash += hash << 10;
3923 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
3924 // hash ^= hash >> 6;
3925 __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
3926 }
3927
3928
3929 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
3930 Register hash,
3931 Register scratch) {
3932 // Compute hashes modulo 2^32 using a 32-bit W register.
3933 Register hash_w = hash.W();
3934 Register scratch_w = scratch.W();
3935 ASSERT(!AreAliased(hash_w, scratch_w));
3936
3937 // hash += hash << 3;
3938 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 3));
3939 // hash ^= hash >> 11;
3940 __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 11));
3941 // hash += hash << 15;
3942 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 15));
3943
3944 __ Ands(hash_w, hash_w, String::kHashBitMask);
3945
3946 // if (hash == 0) hash = 27;
3947 __ Mov(scratch_w, StringHasher::kZeroHash);
3948 __ Csel(hash_w, scratch_w, hash_w, eq);
3949 }
3950
3951
3952 void SubStringStub::Generate(MacroAssembler* masm) {
3953 ASM_LOCATION("SubStringStub::Generate");
3954 Label runtime;
3955
3956 // Stack frame on entry.
3957 // lr: return address
3958 // jssp[0]: substring "to" offset
3959 // jssp[8]: substring "from" offset
3960 // jssp[16]: pointer to string object
3961
3962 // This stub is called from the native-call %_SubString(...), so
3963 // nothing can be assumed about the arguments. It is tested that:
3964 // "string" is a sequential string,
3965 // both "from" and "to" are smis, and
3966 // 0 <= from <= to <= string.length (in debug mode.)
3967 // If any of these assumptions fail, we call the runtime system.
3968
3969 static const int kToOffset = 0 * kPointerSize;
3970 static const int kFromOffset = 1 * kPointerSize;
3971 static const int kStringOffset = 2 * kPointerSize;
3972
3973 Register to = x0;
3974 Register from = x15;
3975 Register input_string = x10;
3976 Register input_length = x11;
3977 Register input_type = x12;
3978 Register result_string = x0;
3979 Register result_length = x1;
3980 Register temp = x3;
3981
3982 __ Peek(to, kToOffset);
3983 __ Peek(from, kFromOffset);
3984
3985 // Check that both from and to are smis. If not, jump to runtime.
3986 __ JumpIfEitherNotSmi(from, to, &runtime);
3987 __ SmiUntag(from);
3988 __ SmiUntag(to);
3989
3990 // Calculate difference between from and to. If to < from, branch to runtime.
3991 __ Subs(result_length, to, from);
3992 __ B(mi, &runtime);
3993
3994 // Check from is positive.
3995 __ Tbnz(from, kWSignBit, &runtime);
3996
3997 // Make sure first argument is a string.
3998 __ Peek(input_string, kStringOffset);
3999 __ JumpIfSmi(input_string, &runtime);
4000 __ IsObjectJSStringType(input_string, input_type, &runtime);
4001
4002 Label single_char;
4003 __ Cmp(result_length, 1);
4004 __ B(eq, &single_char);
4005
4006 // Short-cut for the case of trivial substring.
4007 Label return_x0;
4008 __ Ldrsw(input_length,
4009 UntagSmiFieldMemOperand(input_string, String::kLengthOffset));
4010
4011 __ Cmp(result_length, input_length);
4012 __ CmovX(x0, input_string, eq);
4013 // Return original string.
4014 __ B(eq, &return_x0);
4015
4016 // Longer than original string's length or negative: unsafe arguments.
4017 __ B(hi, &runtime);
4018
4019 // Shorter than original string's length: an actual substring.
4020
4021 // x0 to substring end character offset
4022 // x1 result_length length of substring result
4023 // x10 input_string pointer to input string object
4024 // x10 unpacked_string pointer to unpacked string object
4025 // x11 input_length length of input string
4026 // x12 input_type instance type of input string
4027 // x15 from substring start character offset
4028
4029 // Deal with different string types: update the index if necessary and put
4030 // the underlying string into register unpacked_string.
4031 Label underlying_unpacked, sliced_string, seq_or_external_string;
4032 Label update_instance_type;
4033 // If the string is not indirect, it can only be sequential or external.
4034 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
4035 STATIC_ASSERT(kIsIndirectStringMask != 0);
4036
4037 // Test for string types, and branch/fall through to appropriate unpacking
4038 // code.
4039 __ Tst(input_type, kIsIndirectStringMask);
4040 __ B(eq, &seq_or_external_string);
4041 __ Tst(input_type, kSlicedNotConsMask);
4042 __ B(ne, &sliced_string);
4043
4044 Register unpacked_string = input_string;
4045
4046 // Cons string. Check whether it is flat, then fetch first part.
4047 __ Ldr(temp, FieldMemOperand(input_string, ConsString::kSecondOffset));
4048 __ JumpIfNotRoot(temp, Heap::kempty_stringRootIndex, &runtime);
4049 __ Ldr(unpacked_string,
4050 FieldMemOperand(input_string, ConsString::kFirstOffset));
4051 __ B(&update_instance_type);
4052
4053 __ Bind(&sliced_string);
4054 // Sliced string. Fetch parent and correct start index by offset.
4055 __ Ldrsw(temp,
4056 UntagSmiFieldMemOperand(input_string, SlicedString::kOffsetOffset));
4057 __ Add(from, from, temp);
4058 __ Ldr(unpacked_string,
4059 FieldMemOperand(input_string, SlicedString::kParentOffset));
4060
4061 __ Bind(&update_instance_type);
4062 __ Ldr(temp, FieldMemOperand(unpacked_string, HeapObject::kMapOffset));
4063 __ Ldrb(input_type, FieldMemOperand(temp, Map::kInstanceTypeOffset));
4064 // TODO(all): This generates "b #+0x4". Can these be optimised out?
4065 __ B(&underlying_unpacked);
4066
4067 __ Bind(&seq_or_external_string);
4068 // Sequential or external string. Registers unpacked_string and input_string
4069 // alias, so there's nothing to do here.
4070
4071 // x0 result_string pointer to result string object (uninit)
4072 // x1 result_length length of substring result
4073 // x10 unpacked_string pointer to unpacked string object
4074 // x11 input_length length of input string
4075 // x12 input_type instance type of input string
4076 // x15 from substring start character offset
4077 __ Bind(&underlying_unpacked);
4078
4079 if (FLAG_string_slices) {
4080 Label copy_routine;
4081 __ Cmp(result_length, SlicedString::kMinLength);
4082 // Short slice. Copy instead of slicing.
4083 __ B(lt, &copy_routine);
4084 // Allocate new sliced string. At this point we do not reload the instance
4085 // type including the string encoding because we simply rely on the info
4086 // provided by the original string. It does not matter if the original
4087 // string's encoding is wrong because we always have to recheck encoding of
4088 // the newly created string's parent anyway due to externalized strings.
4089 Label two_byte_slice, set_slice_header;
4090 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
4091 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
4092 __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_slice);
4093 __ AllocateAsciiSlicedString(result_string, result_length, x3, x4,
4094 &runtime);
4095 __ B(&set_slice_header);
4096
4097 __ Bind(&two_byte_slice);
4098 __ AllocateTwoByteSlicedString(result_string, result_length, x3, x4,
4099 &runtime);
4100
4101 __ Bind(&set_slice_header);
4102 __ SmiTag(from);
4103 __ Str(from, FieldMemOperand(result_string, SlicedString::kOffsetOffset));
4104 __ Str(unpacked_string,
4105 FieldMemOperand(result_string, SlicedString::kParentOffset));
4106 __ B(&return_x0);
4107
4108 __ Bind(&copy_routine);
4109 }
4110
4111 // x0 result_string pointer to result string object (uninit)
4112 // x1 result_length length of substring result
4113 // x10 unpacked_string pointer to unpacked string object
4114 // x11 input_length length of input string
4115 // x12 input_type instance type of input string
4116 // x13 unpacked_char0 pointer to first char of unpacked string (uninit)
4117 // x13 substring_char0 pointer to first char of substring (uninit)
4118 // x14 result_char0 pointer to first char of result (uninit)
4119 // x15 from substring start character offset
4120 Register unpacked_char0 = x13;
4121 Register substring_char0 = x13;
4122 Register result_char0 = x14;
4123 Label two_byte_sequential, sequential_string, allocate_result;
4124 STATIC_ASSERT(kExternalStringTag != 0);
4125 STATIC_ASSERT(kSeqStringTag == 0);
4126
4127 __ Tst(input_type, kExternalStringTag);
4128 __ B(eq, &sequential_string);
4129
4130 __ Tst(input_type, kShortExternalStringTag);
4131 __ B(ne, &runtime);
4132 __ Ldr(unpacked_char0,
4133 FieldMemOperand(unpacked_string, ExternalString::kResourceDataOffset));
4134 // unpacked_char0 points to the first character of the underlying string.
4135 __ B(&allocate_result);
4136
4137 __ Bind(&sequential_string);
4138 // Locate first character of underlying subject string.
4139 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
4140 __ Add(unpacked_char0, unpacked_string,
4141 SeqOneByteString::kHeaderSize - kHeapObjectTag);
4142
4143 __ Bind(&allocate_result);
4144 // Sequential ASCII string. Allocate the result.
4145 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
4146 __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_sequential);
4147
4148 // Allocate and copy the resulting ASCII string.
4149 __ AllocateAsciiString(result_string, result_length, x3, x4, x5, &runtime);
4150
4151 // Locate first character of substring to copy.
4152 __ Add(substring_char0, unpacked_char0, from);
4153
4154 // Locate first character of result.
4155 __ Add(result_char0, result_string,
4156 SeqOneByteString::kHeaderSize - kHeapObjectTag);
4157
4158 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
4159 __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
4160 __ B(&return_x0);
4161
4162 // Allocate and copy the resulting two-byte string.
4163 __ Bind(&two_byte_sequential);
4164 __ AllocateTwoByteString(result_string, result_length, x3, x4, x5, &runtime);
4165
4166 // Locate first character of substring to copy.
4167 __ Add(substring_char0, unpacked_char0, Operand(from, LSL, 1));
4168
4169 // Locate first character of result.
4170 __ Add(result_char0, result_string,
4171 SeqTwoByteString::kHeaderSize - kHeapObjectTag);
4172
4173 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
4174 __ Add(result_length, result_length, result_length);
4175 __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong);
4176
4177 __ Bind(&return_x0);
4178 Counters* counters = masm->isolate()->counters();
4179 __ IncrementCounter(counters->sub_string_native(), 1, x3, x4);
4180 __ Drop(3);
4181 __ Ret();
4182
4183 __ Bind(&runtime);
4184 __ TailCallRuntime(Runtime::kSubString, 3, 1);
4185
4186 __ bind(&single_char);
4187 // x1: result_length
4188 // x10: input_string
4189 // x12: input_type
4190 // x15: from (untagged)
4191 __ SmiTag(from);
4192 StringCharAtGenerator generator(
4193 input_string, from, result_length, x0,
4194 &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
4195 generator.GenerateFast(masm);
4196 // TODO(jbramley): Why doesn't this jump to return_x0?
4197 __ Drop(3);
4198 __ Ret();
4199 generator.SkipSlow(masm, &runtime);
4200 }
4201
4202
4203 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
4204 Register left,
4205 Register right,
4206 Register scratch1,
4207 Register scratch2,
4208 Register scratch3) {
4209 ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3));
4210 Register result = x0;
4211 Register left_length = scratch1;
4212 Register right_length = scratch2;
4213
4214 // Compare lengths. If lengths differ, strings can't be equal. Lengths are
4215 // smis, and don't need to be untagged.
4216 Label strings_not_equal, check_zero_length;
4217 __ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset));
4218 __ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset));
4219 __ Cmp(left_length, right_length);
4220 __ B(eq, &check_zero_length);
4221
4222 __ Bind(&strings_not_equal);
4223 __ Mov(result, Operand(Smi::FromInt(NOT_EQUAL)));
4224 __ Ret();
4225
4226 // Check if the length is zero. If so, the strings must be equal (and empty.)
4227 Label compare_chars;
4228 __ Bind(&check_zero_length);
4229 STATIC_ASSERT(kSmiTag == 0);
4230 __ Cbnz(left_length, &compare_chars);
4231 __ Mov(result, Operand(Smi::FromInt(EQUAL)));
4232 __ Ret();
4233
4234 // Compare characters. Falls through if all characters are equal.
4235 __ Bind(&compare_chars);
4236 GenerateAsciiCharsCompareLoop(masm, left, right, left_length, scratch2,
4237 scratch3, &strings_not_equal);
4238
4239 // Characters in strings are equal.
4240 __ Mov(result, Operand(Smi::FromInt(EQUAL)));
4241 __ Ret();
4242 }
4243
4244
4245 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
4246 Register left,
4247 Register right,
4248 Register scratch1,
4249 Register scratch2,
4250 Register scratch3,
4251 Register scratch4) {
4252 ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
4253 Label result_not_equal, compare_lengths;
4254
4255 // Find minimum length and length difference.
4256 Register length_delta = scratch3;
4257 __ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
4258 __ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
4259 __ Subs(length_delta, scratch1, scratch2);
4260
4261 Register min_length = scratch1;
4262 __ Csel(min_length, scratch2, scratch1, gt);
4263 __ Cbz(min_length, &compare_lengths);
4264
4265 // Compare loop.
4266 GenerateAsciiCharsCompareLoop(masm,
4267 left, right, min_length, scratch2, scratch4,
4268 &result_not_equal);
4269
4270 // Compare lengths - strings up to min-length are equal.
4271 __ Bind(&compare_lengths);
4272
4273 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
4274
4275 // Use length_delta as result if it's zero.
4276 Register result = x0;
4277 __ Subs(result, length_delta, 0);
4278
4279 __ Bind(&result_not_equal);
4280 Register greater = x10;
4281 Register less = x11;
4282 __ Mov(greater, Operand(Smi::FromInt(GREATER)));
4283 __ Mov(less, Operand(Smi::FromInt(LESS)));
4284 __ CmovX(result, greater, gt);
4285 __ CmovX(result, less, lt);
4286 __ Ret();
4287 }
4288
4289
4290 void StringCompareStub::GenerateAsciiCharsCompareLoop(
4291 MacroAssembler* masm,
4292 Register left,
4293 Register right,
4294 Register length,
4295 Register scratch1,
4296 Register scratch2,
4297 Label* chars_not_equal) {
4298 ASSERT(!AreAliased(left, right, length, scratch1, scratch2));
4299
4300 // Change index to run from -length to -1 by adding length to string
4301 // start. This means that loop ends when index reaches zero, which
4302 // doesn't need an additional compare.
4303 __ SmiUntag(length);
4304 __ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag);
4305 __ Add(left, left, scratch1);
4306 __ Add(right, right, scratch1);
4307
4308 Register index = length;
4309 __ Neg(index, length); // index = -length;
4310
4311 // Compare loop
4312 Label loop;
4313 __ Bind(&loop);
4314 __ Ldrb(scratch1, MemOperand(left, index));
4315 __ Ldrb(scratch2, MemOperand(right, index));
4316 __ Cmp(scratch1, scratch2);
4317 __ B(ne, chars_not_equal);
4318 __ Add(index, index, 1);
4319 __ Cbnz(index, &loop);
4320 }
4321
4322
4323 void StringCompareStub::Generate(MacroAssembler* masm) {
4324 Label runtime;
4325
4326 Counters* counters = masm->isolate()->counters();
4327
4328 // Stack frame on entry.
4329 // sp[0]: right string
4330 // sp[8]: left string
4331 Register right = x10;
4332 Register left = x11;
4333 Register result = x0;
4334 __ Pop(right, left);
4335
4336 Label not_same;
4337 __ Subs(result, right, left);
4338 __ B(ne, &not_same);
4339 STATIC_ASSERT(EQUAL == 0);
4340 __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
4341 __ Ret();
4342
4343 __ Bind(&not_same);
4344
4345 // Check that both objects are sequential ASCII strings.
4346 __ JumpIfEitherIsNotSequentialAsciiStrings(left, right, x12, x13, &runtime);
4347
4348 // Compare flat ASCII strings natively. Remove arguments from stack first,
4349 // as this function will generate a return.
4350 __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
4351 GenerateCompareFlatAsciiStrings(masm, left, right, x12, x13, x14, x15);
4352
4353 __ Bind(&runtime);
4354
4355 // Push arguments back on to the stack.
4356 // sp[0] = right string
4357 // sp[8] = left string.
4358 __ Push(left, right);
4359
4360 // Call the runtime.
4361 // Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
4362 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
4363 }
4364
4365
4366 void ArrayPushStub::Generate(MacroAssembler* masm) {
4367 Register receiver = x0;
4368
4369 int argc = arguments_count();
4370
4371 if (argc == 0) {
4372 // Nothing to do, just return the length.
4373 __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset));
4374 __ Drop(argc + 1);
4375 __ Ret();
4376 return;
4377 }
4378
4379 Isolate* isolate = masm->isolate();
4380
4381 if (argc != 1) {
4382 __ TailCallExternalReference(
4383 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
4384 return;
4385 }
4386
4387 Label call_builtin, attempt_to_grow_elements, with_write_barrier;
4388
4389 Register elements_length = x8;
4390 Register length = x7;
4391 Register elements = x6;
4392 Register end_elements = x5;
4393 Register value = x4;
4394 // Get the elements array of the object.
4395 __ Ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
4396
4397 if (IsFastSmiOrObjectElementsKind(elements_kind())) {
4398 // Check that the elements are in fast mode and writable.
4399 __ CheckMap(elements,
4400 x10,
4401 Heap::kFixedArrayMapRootIndex,
4402 &call_builtin,
4403 DONT_DO_SMI_CHECK);
4404 }
4405
4406 // Get the array's length and calculate new length.
4407 __ Ldr(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
4408 STATIC_ASSERT(kSmiTag == 0);
4409 __ Add(length, length, Operand(Smi::FromInt(argc)));
4410
4411 // Check if we could survive without allocation.
4412 __ Ldr(elements_length,
4413 FieldMemOperand(elements, FixedArray::kLengthOffset));
4414 __ Cmp(length, elements_length);
4415
4416 const int kEndElementsOffset =
4417 FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
4418
4419 if (IsFastSmiOrObjectElementsKind(elements_kind())) {
4420 __ B(gt, &attempt_to_grow_elements);
4421
4422 // Check if value is a smi.
4423 __ Peek(value, (argc - 1) * kPointerSize);
4424 __ JumpIfNotSmi(value, &with_write_barrier);
4425
4426 // Store the value.
4427 // We may need a register containing the address end_elements below,
4428 // so write back the value in end_elements.
4429 __ Add(end_elements, elements,
4430 Operand::UntagSmiAndScale(length, kPointerSizeLog2));
4431 __ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex));
4432 } else {
4433 // TODO(all): ARM has a redundant cmp here.
4434 __ B(gt, &call_builtin);
4435
4436 __ Peek(value, (argc - 1) * kPointerSize);
4437 __ StoreNumberToDoubleElements(value, length, elements, x10, d0, d1,
4438 &call_builtin, argc * kDoubleSize);
4439 }
4440
4441 // Save new length.
4442 __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
4443
4444 // Return length.
4445 __ Drop(argc + 1);
4446 __ Mov(x0, length);
4447 __ Ret();
4448
4449 if (IsFastDoubleElementsKind(elements_kind())) {
4450 __ Bind(&call_builtin);
4451 __ TailCallExternalReference(
4452 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
4453 return;
4454 }
4455
4456 __ Bind(&with_write_barrier);
4457
4458 if (IsFastSmiElementsKind(elements_kind())) {
4459 if (FLAG_trace_elements_transitions) {
4460 __ B(&call_builtin);
4461 }
4462
4463 __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
4464 __ JumpIfHeapNumber(x10, &call_builtin);
4465
4466 ElementsKind target_kind = IsHoleyElementsKind(elements_kind())
4467 ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
4468 __ Ldr(x10, GlobalObjectMemOperand());
4469 __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kNativeContextOffset));
4470 __ Ldr(x10, ContextMemOperand(x10, Context::JS_ARRAY_MAPS_INDEX));
4471 const int header_size = FixedArrayBase::kHeaderSize;
4472 // Verify that the object can be transitioned in place.
4473 const int origin_offset = header_size + elements_kind() * kPointerSize;
4474 __ ldr(x11, FieldMemOperand(receiver, origin_offset));
4475 __ ldr(x12, FieldMemOperand(x10, HeapObject::kMapOffset));
4476 __ cmp(x11, x12);
4477 __ B(ne, &call_builtin);
4478
4479 const int target_offset = header_size + target_kind * kPointerSize;
4480 __ Ldr(x10, FieldMemOperand(x10, target_offset));
4481 __ Mov(x11, receiver);
4482 ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
4483 masm, DONT_TRACK_ALLOCATION_SITE, NULL);
4484 }
4485
4486 // Save new length.
4487 __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
4488
4489 // Store the value.
4490 // We may need a register containing the address end_elements below,
4491 // so write back the value in end_elements.
4492 __ Add(end_elements, elements,
4493 Operand::UntagSmiAndScale(length, kPointerSizeLog2));
4494 __ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex));
4495
4496 __ RecordWrite(elements,
4497 end_elements,
4498 value,
4499 kLRHasNotBeenSaved,
4500 kDontSaveFPRegs,
4501 EMIT_REMEMBERED_SET,
4502 OMIT_SMI_CHECK);
4503 __ Drop(argc + 1);
4504 __ Mov(x0, length);
4505 __ Ret();
4506
4507 __ Bind(&attempt_to_grow_elements);
4508
4509 if (!FLAG_inline_new) {
4510 __ B(&call_builtin);
4511 }
4512
4513 Register argument = x2;
4514 __ Peek(argument, (argc - 1) * kPointerSize);
4515 // Growing elements that are SMI-only requires special handling in case
4516 // the new element is non-Smi. For now, delegate to the builtin.
4517 if (IsFastSmiElementsKind(elements_kind())) {
4518 __ JumpIfNotSmi(argument, &call_builtin);
4519 }
4520
4521 // We could be lucky and the elements array could be at the top of new-space.
4522 // In this case we can just grow it in place by moving the allocation pointer
4523 // up.
4524 ExternalReference new_space_allocation_top =
4525 ExternalReference::new_space_allocation_top_address(isolate);
4526 ExternalReference new_space_allocation_limit =
4527 ExternalReference::new_space_allocation_limit_address(isolate);
4528
4529 const int kAllocationDelta = 4;
4530 ASSERT(kAllocationDelta >= argc);
4531 Register allocation_top_addr = x5;
4532 Register allocation_top = x9;
4533 // Load top and check if it is the end of elements.
4534 __ Add(end_elements, elements,
4535 Operand::UntagSmiAndScale(length, kPointerSizeLog2));
4536 __ Add(end_elements, end_elements, kEndElementsOffset);
4537 __ Mov(allocation_top_addr, Operand(new_space_allocation_top));
4538 __ Ldr(allocation_top, MemOperand(allocation_top_addr));
4539 __ Cmp(end_elements, allocation_top);
4540 __ B(ne, &call_builtin);
4541
4542 __ Mov(x10, Operand(new_space_allocation_limit));
4543 __ Ldr(x10, MemOperand(x10));
4544 __ Add(allocation_top, allocation_top, kAllocationDelta * kPointerSize);
4545 __ Cmp(allocation_top, x10);
4546 __ B(hi, &call_builtin);
4547
4548 // We fit and could grow elements.
4549 // Update new_space_allocation_top.
4550 __ Str(allocation_top, MemOperand(allocation_top_addr));
4551 // Push the argument.
4552 __ Str(argument, MemOperand(end_elements));
4553 // Fill the rest with holes.
4554 __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
4555 for (int i = 1; i < kAllocationDelta; i++) {
4556 // TODO(all): Try to use stp here.
4557 __ Str(x10, MemOperand(end_elements, i * kPointerSize));
4558 }
4559
4560 // Update elements' and array's sizes.
4561 __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
4562 __ Add(elements_length,
4563 elements_length,
4564 Operand(Smi::FromInt(kAllocationDelta)));
4565 __ Str(elements_length,
4566 FieldMemOperand(elements, FixedArray::kLengthOffset));
4567
4568 // Elements are in new space, so write barrier is not required.
4569 __ Drop(argc + 1);
4570 __ Mov(x0, length);
4571 __ Ret();
4572
4573 __ Bind(&call_builtin);
4574 __ TailCallExternalReference(
4575 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1);
4576 }
4577
4578
4579 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
4580 // ----------- S t a t e -------------
4581 // -- x1 : left
4582 // -- x0 : right
4583 // -- lr : return address
4584 // -----------------------------------
4585 Isolate* isolate = masm->isolate();
4586
4587 // Load x2 with the allocation site. We stick an undefined dummy value here
4588 // and replace it with the real allocation site later when we instantiate this
4589 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
4590 __ LoadObject(x2, handle(isolate->heap()->undefined_value()));
4591
4592 // Make sure that we actually patched the allocation site.
4593 if (FLAG_debug_code) {
4594 __ AssertNotSmi(x2, kExpectedAllocationSite);
4595 __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
4596 __ AssertRegisterIsRoot(x10, Heap::kAllocationSiteMapRootIndex,
4597 kExpectedAllocationSite);
4598 }
4599
4600 // Tail call into the stub that handles binary operations with allocation
4601 // sites.
4602 BinaryOpWithAllocationSiteStub stub(state_);
4603 __ TailCallStub(&stub);
4604 }
4605
4606
4607 bool CodeStub::CanUseFPRegisters() {
4608 // FP registers always available on A64.
4609 return true;
4610 }
4611
4612
4613 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4614 // We need some extra registers for this stub, they have been allocated
4615 // but we need to save them before using them.
4616 regs_.Save(masm);
4617
4618 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4619 Label dont_need_remembered_set;
4620
4621 Register value = regs_.scratch0();
4622 __ Ldr(value, MemOperand(regs_.address()));
4623 __ JumpIfNotInNewSpace(value, &dont_need_remembered_set);
4624
4625 __ CheckPageFlagSet(regs_.object(),
4626 value,
4627 1 << MemoryChunk::SCAN_ON_SCAVENGE,
4628 &dont_need_remembered_set);
4629
4630 // First notify the incremental marker if necessary, then update the
4631 // remembered set.
4632 CheckNeedsToInformIncrementalMarker(
4633 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4634 InformIncrementalMarker(masm, mode);
4635 regs_.Restore(masm); // Restore the extra scratch registers we used.
4636 __ RememberedSetHelper(object_,
4637 address_,
4638 value_,
4639 save_fp_regs_mode_,
4640 MacroAssembler::kReturnAtEnd);
4641
4642 __ Bind(&dont_need_remembered_set);
4643 }
4644
4645 CheckNeedsToInformIncrementalMarker(
4646 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4647 InformIncrementalMarker(masm, mode);
4648 regs_.Restore(masm); // Restore the extra scratch registers we used.
4649 __ Ret();
4650 }
4651
4652
4653 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
4654 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
4655 Register address =
4656 x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
4657 ASSERT(!address.Is(regs_.object()));
4658 ASSERT(!address.Is(x0));
4659 __ Mov(address, regs_.address());
4660 __ Mov(x0, regs_.object());
4661 __ Mov(x1, address);
4662 __ Mov(x2, Operand(ExternalReference::isolate_address(masm->isolate())));
4663
4664 AllowExternalCallThatCantCauseGC scope(masm);
4665 ExternalReference function = (mode == INCREMENTAL_COMPACTION)
4666 ? ExternalReference::incremental_evacuation_record_write_function(
4667 masm->isolate())
4668 : ExternalReference::incremental_marking_record_write_function(
4669 masm->isolate());
4670 __ CallCFunction(function, 3, 0);
4671
4672 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
4673 }
4674
4675
4676 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4677 MacroAssembler* masm,
4678 OnNoNeedToInformIncrementalMarker on_no_need,
4679 Mode mode) {
4680 Label on_black;
4681 Label need_incremental;
4682 Label need_incremental_pop_scratch;
4683
4684 Register mem_chunk = regs_.scratch0();
4685 Register counter = regs_.scratch1();
4686 __ Bic(mem_chunk, regs_.object(), Page::kPageAlignmentMask);
4687 __ Ldr(counter,
4688 MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
4689 __ Subs(counter, counter, 1);
4690 __ Str(counter,
4691 MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
4692 __ B(mi, &need_incremental);
4693
4694 // If the object is not black we don't have to inform the incremental marker.
4695 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4696
4697 regs_.Restore(masm); // Restore the extra scratch registers we used.
4698 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4699 __ RememberedSetHelper(object_,
4700 address_,
4701 value_,
4702 save_fp_regs_mode_,
4703 MacroAssembler::kReturnAtEnd);
4704 } else {
4705 __ Ret();
4706 }
4707
4708 __ Bind(&on_black);
4709 // Get the value from the slot.
4710 Register value = regs_.scratch0();
4711 __ Ldr(value, MemOperand(regs_.address()));
4712
4713 if (mode == INCREMENTAL_COMPACTION) {
4714 Label ensure_not_white;
4715
4716 __ CheckPageFlagClear(value,
4717 regs_.scratch1(),
4718 MemoryChunk::kEvacuationCandidateMask,
4719 &ensure_not_white);
4720
4721 __ CheckPageFlagClear(regs_.object(),
4722 regs_.scratch1(),
4723 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4724 &need_incremental);
4725
4726 __ Bind(&ensure_not_white);
4727 }
4728
4729 // We need extra registers for this, so we push the object and the address
4730 // register temporarily.
4731 __ Push(regs_.address(), regs_.object());
4732 __ EnsureNotWhite(value,
4733 regs_.scratch1(), // Scratch.
4734 regs_.object(), // Scratch.
4735 regs_.address(), // Scratch.
4736 regs_.scratch2(), // Scratch.
4737 &need_incremental_pop_scratch);
4738 __ Pop(regs_.object(), regs_.address());
4739
4740 regs_.Restore(masm); // Restore the extra scratch registers we used.
4741 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4742 __ RememberedSetHelper(object_,
4743 address_,
4744 value_,
4745 save_fp_regs_mode_,
4746 MacroAssembler::kReturnAtEnd);
4747 } else {
4748 __ Ret();
4749 }
4750
4751 __ Bind(&need_incremental_pop_scratch);
4752 __ Pop(regs_.object(), regs_.address());
4753
4754 __ Bind(&need_incremental);
4755 // Fall through when we need to inform the incremental marker.
4756 }
4757
4758
4759 void RecordWriteStub::Generate(MacroAssembler* masm) {
4760 Label skip_to_incremental_noncompacting;
4761 Label skip_to_incremental_compacting;
4762
4763 // We patch these two first instructions back and forth between a nop and
4764 // real branch when we start and stop incremental heap marking.
4765 // Initially the stub is expected to be in STORE_BUFFER_ONLY mode, so 2 nops
4766 // are generated.
4767 // See RecordWriteStub::Patch for details.
4768 {
4769 InstructionAccurateScope scope(masm, 2);
4770 __ adr(xzr, &skip_to_incremental_noncompacting);
4771 __ adr(xzr, &skip_to_incremental_compacting);
4772 }
4773
4774 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4775 __ RememberedSetHelper(object_,
4776 address_,
4777 value_,
4778 save_fp_regs_mode_,
4779 MacroAssembler::kReturnAtEnd);
4780 }
4781 __ Ret();
4782
4783 __ Bind(&skip_to_incremental_noncompacting);
4784 GenerateIncremental(masm, INCREMENTAL);
4785
4786 __ Bind(&skip_to_incremental_compacting);
4787 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4788 }
4789
4790
4791 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4792 // TODO(all): Possible optimisations in this function:
4793 // 1. Merge CheckFastElements and CheckFastSmiElements, so that the map
4794 // bitfield is loaded only once.
4795 // 2. Refactor the Ldr/Add sequence at the start of fast_elements and
4796 // smi_element.
4797
4798 // x0 value element value to store
4799 // x3 index_smi element index as smi
4800 // sp[0] array_index_smi array literal index in function as smi
4801 // sp[1] array array literal
4802
4803 Register value = x0;
4804 Register index_smi = x3;
4805
4806 Register array = x1;
4807 Register array_map = x2;
4808 Register array_index_smi = x4;
4809 __ PeekPair(array_index_smi, array, 0);
4810 __ Ldr(array_map, FieldMemOperand(array, JSObject::kMapOffset));
4811
4812 Label double_elements, smi_element, fast_elements, slow_elements;
4813 __ CheckFastElements(array_map, x10, &double_elements);
4814 __ JumpIfSmi(value, &smi_element);
4815 __ CheckFastSmiElements(array_map, x10, &fast_elements);
4816
4817 // Store into the array literal requires an elements transition. Call into
4818 // the runtime.
4819 __ Bind(&slow_elements);
4820 __ Push(array, index_smi, value);
4821 __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4822 __ Ldr(x11, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
4823 __ Push(x11, array_index_smi);
4824 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4825
4826 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4827 __ Bind(&fast_elements);
4828 __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
4829 __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
4830 __ Add(x11, x11, FixedArray::kHeaderSize - kHeapObjectTag);
4831 __ Str(value, MemOperand(x11));
4832 // Update the write barrier for the array store.
4833 __ RecordWrite(x10, x11, value, kLRHasNotBeenSaved, kDontSaveFPRegs,
4834 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
4835 __ Ret();
4836
4837 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
4838 // and value is Smi.
4839 __ Bind(&smi_element);
4840 __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
4841 __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
4842 __ Str(value, FieldMemOperand(x11, FixedArray::kHeaderSize));
4843 __ Ret();
4844
4845 __ Bind(&double_elements);
4846 __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
4847 __ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0, d1,
4848 &slow_elements);
4849 __ Ret();
4850 }
4851
4852
4853 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4854 // TODO(jbramley): The ARM code leaves the (shifted) offset in r1. Why?
4855 CEntryStub ces(1, kSaveFPRegs);
4856 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
4857 int parameter_count_offset =
4858 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4859 __ Ldr(x1, MemOperand(fp, parameter_count_offset));
4860 if (function_mode_ == JS_FUNCTION_STUB_MODE) {
4861 __ Add(x1, x1, 1);
4862 }
4863 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4864 __ Drop(x1);
4865 // Return to IC Miss stub, continuation still on stack.
4866 __ Ret();
4867 }
4868
4869
4870 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4871 if (masm->isolate()->function_entry_hook() != NULL) {
4872 // TODO(all): This needs to be reliably consistent with
4873 // kReturnAddressDistanceFromFunctionStart in ::Generate.
4874 Assembler::BlockConstPoolScope no_const_pools(masm);
4875 ProfileEntryHookStub stub;
4876 __ Push(lr);
4877 __ CallStub(&stub);
4878 __ Pop(lr);
4879 }
4880 }
4881
4882
4883 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4884 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
4885 // The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
4886 // a "Push lr" instruction, followed by a call.
4887 // TODO(jbramley): Verify that this call is always made with relocation.
4888 static const int kReturnAddressDistanceFromFunctionStart =
4889 Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
4890
4891 // Save all kCallerSaved registers (including lr), since this can be called
4892 // from anywhere.
4893 // TODO(jbramley): What about FP registers?
4894 __ PushCPURegList(kCallerSaved);
4895 ASSERT(kCallerSaved.IncludesAliasOf(lr));
4896 const int kNumSavedRegs = kCallerSaved.Count();
4897
4898 // Compute the function's address as the first argument.
4899 __ Sub(x0, lr, kReturnAddressDistanceFromFunctionStart);
4900
4901 #if V8_HOST_ARCH_A64
4902 uintptr_t entry_hook =
4903 reinterpret_cast<uintptr_t>(masm->isolate()->function_entry_hook());
4904 __ Mov(x10, entry_hook);
4905 #else
4906 // Under the simulator we need to indirect the entry hook through a trampoline
4907 // function at a known address.
4908 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4909 __ Mov(x10, Operand(ExternalReference(&dispatcher,
4910 ExternalReference::BUILTIN_CALL,
4911 masm->isolate())));
4912 // It additionally takes an isolate as a third parameter
4913 __ Mov(x2, Operand(ExternalReference::isolate_address(masm->isolate())));
4914 #endif
4915
4916 // The caller's return address is above the saved temporaries.
4917 // Grab its location for the second argument to the hook.
4918 __ Add(x1, __ StackPointer(), kNumSavedRegs * kPointerSize);
4919
4920 {
4921 // Create a dummy frame, as CallCFunction requires this.
4922 FrameScope frame(masm, StackFrame::MANUAL);
4923 __ CallCFunction(x10, 2, 0);
4924 }
4925
4926 __ PopCPURegList(kCallerSaved);
4927 __ Ret();
4928 }
4929
4930
4931 void DirectCEntryStub::Generate(MacroAssembler* masm) {
4932 // When calling into C++ code the stack pointer must be csp.
4933 // Therefore this code must use csp for peek/poke operations when the
4934 // stub is generated. When the stub is called
4935 // (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame
4936 // and configure the stack pointer *before* doing the call.
4937 const Register old_stack_pointer = __ StackPointer();
4938 __ SetStackPointer(csp);
4939
4940 // Put return address on the stack (accessible to GC through exit frame pc).
4941 __ Poke(lr, 0);
4942 // Call the C++ function.
4943 __ Blr(x10);
4944 // Return to calling code.
4945 __ Peek(lr, 0);
4946 __ Ret();
4947
4948 __ SetStackPointer(old_stack_pointer);
4949 }
4950
4951 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
4952 Register target) {
4953 // Make sure the caller configured the stack pointer (see comment in
4954 // DirectCEntryStub::Generate).
4955 ASSERT(csp.Is(__ StackPointer()));
4956
4957 intptr_t code =
4958 reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
4959 __ Mov(lr, Operand(code, RelocInfo::CODE_TARGET));
4960 __ Mov(x10, target);
4961 // Branch to the stub.
4962 __ Blr(lr);
4963 }
4964
4965
4966 // Probe the name dictionary in the 'elements' register.
4967 // Jump to the 'done' label if a property with the given name is found.
4968 // Jump to the 'miss' label otherwise.
4969 //
4970 // If lookup was successful 'scratch2' will be equal to elements + 4 * index.
4971 // 'elements' and 'name' registers are preserved on miss.
4972 void NameDictionaryLookupStub::GeneratePositiveLookup(
4973 MacroAssembler* masm,
4974 Label* miss,
4975 Label* done,
4976 Register elements,
4977 Register name,
4978 Register scratch1,
4979 Register scratch2) {
4980 ASSERT(!AreAliased(elements, name, scratch1, scratch2));
4981
4982 // Assert that name contains a string.
4983 __ AssertName(name);
4984
4985 // Compute the capacity mask.
4986 __ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset));
4987 __ Sub(scratch1, scratch1, 1);
4988
4989 // Generate an unrolled loop that performs a few probes before giving up.
4990 for (int i = 0; i < kInlinedProbes; i++) {
4991 // Compute the masked index: (hash + i + i * i) & mask.
4992 __ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
4993 if (i > 0) {
4994 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4995 // the hash in a separate instruction. The value hash + i + i * i is right
4996 // shifted in the following and instruction.
4997 ASSERT(NameDictionary::GetProbeOffset(i) <
4998 1 << (32 - Name::kHashFieldOffset));
4999 __ Add(scratch2, scratch2, Operand(
5000 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
5001 }
5002 __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
5003
5004 // Scale the index by multiplying by the element size.
5005 ASSERT(NameDictionary::kEntrySize == 3);
5006 __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
5007
5008 // Check if the key is identical to the name.
5009 __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
5010 // TODO(jbramley): We need another scratch here, but some callers can't
5011 // provide a scratch3 so we have to use Tmp1(). We should find a clean way
5012 // to make it unavailable to the MacroAssembler for a short time.
5013 __ Ldr(__ Tmp1(), FieldMemOperand(scratch2, kElementsStartOffset));
5014 __ Cmp(name, __ Tmp1());
5015 __ B(eq, done);
5016 }
5017
5018 // The inlined probes didn't find the entry.
5019 // Call the complete stub to scan the whole dictionary.
5020
5021 CPURegList spill_list(CPURegister::kRegister, kXRegSize, 0, 6);
5022 spill_list.Combine(lr);
5023 spill_list.Remove(scratch1);
5024 spill_list.Remove(scratch2);
5025
5026 __ PushCPURegList(spill_list);
5027
5028 if (name.is(x0)) {
5029 ASSERT(!elements.is(x1));
5030 __ Mov(x1, name);
5031 __ Mov(x0, elements);
5032 } else {
5033 __ Mov(x0, elements);
5034 __ Mov(x1, name);
5035 }
5036
5037 Label not_found;
5038 NameDictionaryLookupStub stub(POSITIVE_LOOKUP);
5039 __ CallStub(&stub);
5040 __ Cbz(x0, &not_found);
5041 __ Mov(scratch2, x2); // Move entry index into scratch2.
5042 __ PopCPURegList(spill_list);
5043 __ B(done);
5044
5045 __ Bind(&not_found);
5046 __ PopCPURegList(spill_list);
5047 __ B(miss);
5048 }
5049
5050
5051 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
5052 Label* miss,
5053 Label* done,
5054 Register receiver,
5055 Register properties,
5056 Handle<Name> name,
5057 Register scratch0) {
5058 ASSERT(!AreAliased(receiver, properties, scratch0));
5059 ASSERT(name->IsUniqueName());
5060 // If names of slots in range from 1 to kProbes - 1 for the hash value are
5061 // not equal to the name and kProbes-th slot is not used (its name is the
5062 // undefined value), it guarantees the hash table doesn't contain the
5063 // property. It's true even if some slots represent deleted properties
5064 // (their names are the hole value).
5065 for (int i = 0; i < kInlinedProbes; i++) {
5066 // scratch0 points to properties hash.
5067 // Compute the masked index: (hash + i + i * i) & mask.
5068 Register index = scratch0;
5069 // Capacity is smi 2^n.
5070 __ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset));
5071 __ Sub(index, index, 1);
5072 __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
5073
5074 // Scale the index by multiplying by the entry size.
5075 ASSERT(NameDictionary::kEntrySize == 3);
5076 __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
5077
5078 Register entity_name = scratch0;
5079 // Having undefined at this place means the name is not contained.
5080 Register tmp = index;
5081 __ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2));
5082 __ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
5083
5084 __ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done);
5085
5086 // Stop if found the property.
5087 __ Cmp(entity_name, Operand(name));
5088 __ B(eq, miss);
5089
5090 Label good;
5091 __ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good);
5092
5093 // Check if the entry name is not a unique name.
5094 __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
5095 __ Ldrb(entity_name,
5096 FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
5097 __ JumpIfNotUniqueName(entity_name, miss);
5098 __ Bind(&good);
5099 }
5100
5101 CPURegList spill_list(CPURegister::kRegister, kXRegSize, 0, 6);
5102 spill_list.Combine(lr);
5103 spill_list.Remove(scratch0); // Scratch registers don't need to be preserved.
5104
5105 __ PushCPURegList(spill_list);
5106
5107 __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
5108 __ Mov(x1, Operand(name));
5109 NameDictionaryLookupStub stub(NEGATIVE_LOOKUP);
5110 __ CallStub(&stub);
5111 // Move stub return value to scratch0. Note that scratch0 is not included in
5112 // spill_list and won't be clobbered by PopCPURegList.
5113 __ Mov(scratch0, x0);
5114 __ PopCPURegList(spill_list);
5115
5116 __ Cbz(scratch0, done);
5117 __ B(miss);
5118 }
5119
5120
5121 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
5122 // This stub overrides SometimesSetsUpAFrame() to return false. That means
5123 // we cannot call anything that could cause a GC from this stub.
5124 //
5125 // Arguments are in x0 and x1:
5126 // x0: property dictionary.
5127 // x1: the name of the property we are looking for.
5128 //
5129 // Return value is in x0 and is zero if lookup failed, non zero otherwise.
5130 // If the lookup is successful, x2 will contains the index of the entry.
5131
5132 Register result = x0;
5133 Register dictionary = x0;
5134 Register key = x1;
5135 Register index = x2;
5136 Register mask = x3;
5137 Register hash = x4;
5138 Register undefined = x5;
5139 Register entry_key = x6;
5140
5141 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
5142
5143 __ Ldrsw(mask, UntagSmiFieldMemOperand(dictionary, kCapacityOffset));
5144 __ Sub(mask, mask, 1);
5145
5146 __ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
5147 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
5148
5149 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
5150 // Compute the masked index: (hash + i + i * i) & mask.
5151 // Capacity is smi 2^n.
5152 if (i > 0) {
5153 // Add the probe offset (i + i * i) left shifted to avoid right shifting
5154 // the hash in a separate instruction. The value hash + i + i * i is right
5155 // shifted in the following and instruction.
5156 ASSERT(NameDictionary::GetProbeOffset(i) <
5157 1 << (32 - Name::kHashFieldOffset));
5158 __ Add(index, hash,
5159 NameDictionary::GetProbeOffset(i) << Name::kHashShift);
5160 } else {
5161 __ Mov(index, hash);
5162 }
5163 __ And(index, mask, Operand(index, LSR, Name::kHashShift));
5164
5165 // Scale the index by multiplying by the entry size.
5166 ASSERT(NameDictionary::kEntrySize == 3);
5167 __ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
5168
5169 __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
5170 __ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
5171
5172 // Having undefined at this place means the name is not contained.
5173 __ Cmp(entry_key, undefined);
5174 __ B(eq, &not_in_dictionary);
5175
5176 // Stop if found the property.
5177 __ Cmp(entry_key, key);
5178 __ B(eq, &in_dictionary);
5179
5180 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
5181 // Check if the entry name is not a unique name.
5182 __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
5183 __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
5184 __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
5185 }
5186 }
5187
5188 __ Bind(&maybe_in_dictionary);
5189 // If we are doing negative lookup then probing failure should be
5190 // treated as a lookup success. For positive lookup, probing failure
5191 // should be treated as lookup failure.
5192 if (mode_ == POSITIVE_LOOKUP) {
5193 __ Mov(result, 0);
5194 __ Ret();
5195 }
5196
5197 __ Bind(&in_dictionary);
5198 __ Mov(result, 1);
5199 __ Ret();
5200
5201 __ Bind(&not_in_dictionary);
5202 __ Mov(result, 0);
5203 __ Ret();
5204 }
5205
5206
5207 template<class T>
5208 static void CreateArrayDispatch(MacroAssembler* masm,
5209 AllocationSiteOverrideMode mode) {
5210 ASM_LOCATION("CreateArrayDispatch");
5211 if (mode == DISABLE_ALLOCATION_SITES) {
5212 T stub(GetInitialFastElementsKind(), mode);
5213 __ TailCallStub(&stub);
5214
5215 } else if (mode == DONT_OVERRIDE) {
5216 Register kind = x3;
5217 int last_index =
5218 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
5219 for (int i = 0; i <= last_index; ++i) {
5220 Label next;
5221 ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
5222 // TODO(jbramley): Is this the best way to handle this? Can we make the
5223 // tail calls conditional, rather than hopping over each one?
5224 __ CompareAndBranch(kind, candidate_kind, ne, &next);
5225 T stub(candidate_kind);
5226 __ TailCallStub(&stub);
5227 __ Bind(&next);
5228 }
5229
5230 // If we reached this point there is a problem.
5231 __ Abort(kUnexpectedElementsKindInArrayConstructor);
5232
5233 } else {
5234 UNREACHABLE();
5235 }
5236 }
5237
5238
5239 // TODO(jbramley): If this needs to be a special case, make it a proper template
5240 // specialization, and not a separate function.
5241 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
5242 AllocationSiteOverrideMode mode) {
5243 ASM_LOCATION("CreateArrayDispatchOneArgument");
5244 // x0 - argc
5245 // x1 - constructor?
5246 // x2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
5247 // x3 - kind (if mode != DISABLE_ALLOCATION_SITES)
5248 // sp[0] - last argument
5249
5250 Register allocation_site = x2;
5251 Register kind = x3;
5252
5253 Label normal_sequence;
5254 if (mode == DONT_OVERRIDE) {
5255 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
5256 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
5257 STATIC_ASSERT(FAST_ELEMENTS == 2);
5258 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
5259 STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
5260 STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
5261
5262 // Is the low bit set? If so, the array is holey.
5263 __ Tbnz(kind, 0, &normal_sequence);
5264 }
5265
5266 // Look at the last argument.
5267 // TODO(jbramley): What does a 0 argument represent?
5268 __ Peek(x10, 0);
5269 __ Cbz(x10, &normal_sequence);
5270
5271 if (mode == DISABLE_ALLOCATION_SITES) {
5272 ElementsKind initial = GetInitialFastElementsKind();
5273 ElementsKind holey_initial = GetHoleyElementsKind(initial);
5274
5275 ArraySingleArgumentConstructorStub stub_holey(holey_initial,
5276 DISABLE_ALLOCATION_SITES);
5277 __ TailCallStub(&stub_holey);
5278
5279 __ Bind(&normal_sequence);
5280 ArraySingleArgumentConstructorStub stub(initial,
5281 DISABLE_ALLOCATION_SITES);
5282 __ TailCallStub(&stub);
5283 } else if (mode == DONT_OVERRIDE) {
5284 // We are going to create a holey array, but our kind is non-holey.
5285 // Fix kind and retry (only if we have an allocation site in the slot).
5286 __ Orr(kind, kind, 1);
5287
5288 if (FLAG_debug_code) {
5289 __ Ldr(x10, FieldMemOperand(allocation_site, 0));
5290 __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex,
5291 &normal_sequence);
5292 __ Assert(eq, kExpectedAllocationSite);
5293 }
5294
5295 // Save the resulting elements kind in type info. We can't just store 'kind'
5296 // in the AllocationSite::transition_info field because elements kind is
5297 // restricted to a portion of the field; upper bits need to be left alone.
5298 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
5299 __ Ldr(x11, FieldMemOperand(allocation_site,
5300 AllocationSite::kTransitionInfoOffset));
5301 __ Add(x11, x11, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
5302 __ Str(x11, FieldMemOperand(allocation_site,
5303 AllocationSite::kTransitionInfoOffset));
5304
5305 __ Bind(&normal_sequence);
5306 int last_index =
5307 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
5308 for (int i = 0; i <= last_index; ++i) {
5309 Label next;
5310 ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
5311 // TODO(jbramley): Is this the best way to handle this? Can we make the
5312 // tail calls conditional, rather than hopping over each one?
5313 __ CompareAndBranch(kind, candidate_kind, ne, &next);
5314 ArraySingleArgumentConstructorStub stub(candidate_kind);
5315 __ TailCallStub(&stub);
5316 __ Bind(&next);
5317 }
5318
5319 // If we reached this point there is a problem.
5320 __ Abort(kUnexpectedElementsKindInArrayConstructor);
5321 } else {
5322 UNREACHABLE();
5323 }
5324 }
5325
5326
5327 template<class T>
5328 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
5329 int to_index = GetSequenceIndexFromFastElementsKind(
5330 TERMINAL_FAST_ELEMENTS_KIND);
5331 for (int i = 0; i <= to_index; ++i) {
5332 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5333 T stub(kind);
5334 stub.GetCode(isolate);
5335 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
5336 T stub1(kind, DISABLE_ALLOCATION_SITES);
5337 stub1.GetCode(isolate);
5338 }
5339 }
5340 }
5341
5342
5343 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
5344 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
5345 isolate);
5346 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
5347 isolate);
5348 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
5349 isolate);
5350 }
5351
5352
5353 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
5354 Isolate* isolate) {
5355 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
5356 for (int i = 0; i < 2; i++) {
5357 // For internal arrays we only need a few things
5358 InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
5359 stubh1.GetCode(isolate);
5360 InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
5361 stubh2.GetCode(isolate);
5362 InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
5363 stubh3.GetCode(isolate);
5364 }
5365 }
5366
5367
5368 void ArrayConstructorStub::GenerateDispatchToArrayStub(
5369 MacroAssembler* masm,
5370 AllocationSiteOverrideMode mode) {
5371 Register argc = x0;
5372 if (argument_count_ == ANY) {
5373 Label zero_case, n_case;
5374 __ Cbz(argc, &zero_case);
5375 __ Cmp(argc, 1);
5376 __ B(ne, &n_case);
5377
5378 // One argument.
5379 CreateArrayDispatchOneArgument(masm, mode);
5380
5381 __ Bind(&zero_case);
5382 // No arguments.
5383 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5384
5385 __ Bind(&n_case);
5386 // N arguments.
5387 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5388
5389 } else if (argument_count_ == NONE) {
5390 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5391 } else if (argument_count_ == ONE) {
5392 CreateArrayDispatchOneArgument(masm, mode);
5393 } else if (argument_count_ == MORE_THAN_ONE) {
5394 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5395 } else {
5396 UNREACHABLE();
5397 }
5398 }
5399
5400
5401 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
5402 ASM_LOCATION("ArrayConstructorStub::Generate");
5403 // ----------- S t a t e -------------
5404 // -- x0 : argc (only if argument_count_ == ANY)
5405 // -- x1 : constructor
5406 // -- x2 : feedback vector (fixed array or undefined)
5407 // -- x3 : slot index (if x2 is fixed array)
5408 // -- sp[0] : return address
5409 // -- sp[4] : last argument
5410 // -----------------------------------
5411 Register constructor = x1;
5412 Register feedback_vector = x2;
5413 Register slot_index = x3;
5414
5415 if (FLAG_debug_code) {
5416 // The array construct code is only set for the global and natives
5417 // builtin Array functions which always have maps.
5418
5419 Label unexpected_map, map_ok;
5420 // Initial map for the builtin Array function should be a map.
5421 __ Ldr(x10, FieldMemOperand(constructor,
5422 JSFunction::kPrototypeOrInitialMapOffset));
5423 // Will both indicate a NULL and a Smi.
5424 __ JumpIfSmi(x10, &unexpected_map);
5425 __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
5426 __ Bind(&unexpected_map);
5427 __ Abort(kUnexpectedInitialMapForArrayFunction);
5428 __ Bind(&map_ok);
5429
5430 // In feedback_vector, we expect either undefined or a valid fixed array.
5431 Label okay_here;
5432 Handle<Map> fixed_array_map = masm->isolate()->factory()->fixed_array_map();
5433 __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex, &okay_here);
5434 __ Ldr(x10, FieldMemOperand(feedback_vector, FixedArray::kMapOffset));
5435 __ Cmp(x10, Operand(fixed_array_map));
5436 __ Assert(eq, kExpectedFixedArrayInFeedbackVector);
5437
5438 // slot_index should be a smi if we don't have undefined in feedback_vector.
5439 __ AssertSmi(slot_index);
5440
5441 __ Bind(&okay_here);
5442 }
5443
5444 Register allocation_site = x2; // Overwrites feedback_vector.
5445 Register kind = x3;
5446 Label no_info;
5447 // Get the elements kind and case on that.
5448 __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex, &no_info);
5449 __ Add(feedback_vector, feedback_vector,
5450 Operand::UntagSmiAndScale(slot_index, kPointerSizeLog2));
5451 __ Ldr(allocation_site, FieldMemOperand(feedback_vector,
5452 FixedArray::kHeaderSize));
5453
5454 // If the feedback vector is undefined, or contains anything other than an
5455 // AllocationSite, call an array constructor that doesn't use AllocationSites.
5456 __ Ldr(x10, FieldMemOperand(allocation_site, AllocationSite::kMapOffset));
5457 __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex, &no_info);
5458
5459 __ Ldrsw(kind,
5460 UntagSmiFieldMemOperand(allocation_site,
5461 AllocationSite::kTransitionInfoOffset));
5462 __ And(kind, kind, AllocationSite::ElementsKindBits::kMask);
5463 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
5464
5465 __ Bind(&no_info);
5466 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
5467 }
5468
5469
5470 void InternalArrayConstructorStub::GenerateCase(
5471 MacroAssembler* masm, ElementsKind kind) {
5472 Label zero_case, n_case;
5473 Register argc = x0;
5474
5475 __ Cbz(argc, &zero_case);
5476 __ CompareAndBranch(argc, 1, ne, &n_case);
5477
5478 // One argument.
5479 if (IsFastPackedElementsKind(kind)) {
5480 Label packed_case;
5481
5482 // We might need to create a holey array; look at the first argument.
5483 __ Peek(x10, 0);
5484 __ Cbz(x10, &packed_case);
5485
5486 InternalArraySingleArgumentConstructorStub
5487 stub1_holey(GetHoleyElementsKind(kind));
5488 __ TailCallStub(&stub1_holey);
5489
5490 __ Bind(&packed_case);
5491 }
5492 InternalArraySingleArgumentConstructorStub stub1(kind);
5493 __ TailCallStub(&stub1);
5494
5495 __ Bind(&zero_case);
5496 // No arguments.
5497 InternalArrayNoArgumentConstructorStub stub0(kind);
5498 __ TailCallStub(&stub0);
5499
5500 __ Bind(&n_case);
5501 // N arguments.
5502 InternalArrayNArgumentsConstructorStub stubN(kind);
5503 __ TailCallStub(&stubN);
5504 }
5505
5506
5507 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
5508 // ----------- S t a t e -------------
5509 // -- x0 : argc
5510 // -- x1 : constructor
5511 // -- sp[0] : return address
5512 // -- sp[4] : last argument
5513 // -----------------------------------
5514 Handle<Object> undefined_sentinel(
5515 masm->isolate()->heap()->undefined_value(), masm->isolate());
5516
5517 Register constructor = x1;
5518
5519 if (FLAG_debug_code) {
5520 // The array construct code is only set for the global and natives
5521 // builtin Array functions which always have maps.
5522
5523 Label unexpected_map, map_ok;
5524 // Initial map for the builtin Array function should be a map.
5525 __ Ldr(x10, FieldMemOperand(constructor,
5526 JSFunction::kPrototypeOrInitialMapOffset));
5527 // Will both indicate a NULL and a Smi.
5528 __ JumpIfSmi(x10, &unexpected_map);
5529 __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
5530 __ Bind(&unexpected_map);
5531 __ Abort(kUnexpectedInitialMapForArrayFunction);
5532 __ Bind(&map_ok);
5533 }
5534
5535 Register kind = w3;
5536 // Figure out the right elements kind
5537 __ Ldr(x10, FieldMemOperand(constructor,
5538 JSFunction::kPrototypeOrInitialMapOffset));
5539
5540 // TODO(jbramley): Add a helper function to read elements kind from an
5541 // existing map.
5542 // Load the map's "bit field 2" into result.
5543 __ Ldr(kind, FieldMemOperand(x10, Map::kBitField2Offset));
5544 // Retrieve elements_kind from bit field 2.
5545 __ Ubfx(kind, kind, Map::kElementsKindShift, Map::kElementsKindBitCount);
5546
5547 if (FLAG_debug_code) {
5548 Label done;
5549 __ Cmp(x3, FAST_ELEMENTS);
5550 __ Ccmp(x3, FAST_HOLEY_ELEMENTS, ZFlag, ne);
5551 __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
5552 }
5553
5554 Label fast_elements_case;
5555 __ CompareAndBranch(kind, FAST_ELEMENTS, eq, &fast_elements_case);
5556 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
5557
5558 __ Bind(&fast_elements_case);
5559 GenerateCase(masm, FAST_ELEMENTS);
5560 }
5561
5562
5563 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
5564 // ----------- S t a t e -------------
5565 // -- x0 : callee
5566 // -- x4 : call_data
5567 // -- x2 : holder
5568 // -- x1 : api_function_address
5569 // -- cp : context
5570 // --
5571 // -- sp[0] : last argument
5572 // -- ...
5573 // -- sp[(argc - 1) * 8] : first argument
5574 // -- sp[argc * 8] : receiver
5575 // -----------------------------------
5576
5577 Register callee = x0;
5578 Register call_data = x4;
5579 Register holder = x2;
5580 Register api_function_address = x1;
5581 Register context = cp;
5582
5583 int argc = ArgumentBits::decode(bit_field_);
5584 bool restore_context = RestoreContextBits::decode(bit_field_);
5585 bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
5586
5587 typedef FunctionCallbackArguments FCA;
5588
5589 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
5590 STATIC_ASSERT(FCA::kCalleeIndex == 5);
5591 STATIC_ASSERT(FCA::kDataIndex == 4);
5592 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
5593 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
5594 STATIC_ASSERT(FCA::kIsolateIndex == 1);
5595 STATIC_ASSERT(FCA::kHolderIndex == 0);
5596 STATIC_ASSERT(FCA::kArgsLength == 7);
5597
5598 Isolate* isolate = masm->isolate();
5599
5600 // FunctionCallbackArguments: context, callee and call data.
5601 __ Push(context, callee, call_data);
5602
5603 // Load context from callee
5604 __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
5605
5606 if (!call_data_undefined) {
5607 __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
5608 }
5609 Register isolate_reg = x5;
5610 __ Mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate)));
5611
5612 // FunctionCallbackArguments:
5613 // return value, return value default, isolate, holder.
5614 __ Push(call_data, call_data, isolate_reg, holder);
5615
5616 // Prepare arguments.
5617 Register args = x6;
5618 __ Mov(args, masm->StackPointer());
5619
5620 // Allocate the v8::Arguments structure in the arguments' space, since it's
5621 // not controlled by GC.
5622 const int kApiStackSpace = 4;
5623
5624 // Allocate space for CallApiFunctionAndReturn can store some scratch
5625 // registeres on the stack.
5626 const int kCallApiFunctionSpillSpace = 4;
5627
5628 FrameScope frame_scope(masm, StackFrame::MANUAL);
5629 __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
5630
5631 // TODO(all): Optimize this with stp and suchlike.
5632 ASSERT(!AreAliased(x0, api_function_address));
5633 // x0 = FunctionCallbackInfo&
5634 // Arguments is after the return address.
5635 __ Add(x0, masm->StackPointer(), 1 * kPointerSize);
5636 // FunctionCallbackInfo::implicit_args_
5637 __ Str(args, MemOperand(x0, 0 * kPointerSize));
5638 // FunctionCallbackInfo::values_
5639 __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
5640 __ Str(x10, MemOperand(x0, 1 * kPointerSize));
5641 // FunctionCallbackInfo::length_ = argc
5642 __ Mov(x10, argc);
5643 __ Str(x10, MemOperand(x0, 2 * kPointerSize));
5644 // FunctionCallbackInfo::is_construct_call = 0
5645 __ Str(xzr, MemOperand(x0, 3 * kPointerSize));
5646
5647 const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
5648 Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
5649 ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
5650 ApiFunction thunk_fun(thunk_address);
5651 ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
5652 masm->isolate());
5653
5654 AllowExternalCallThatCantCauseGC scope(masm);
5655 MemOperand context_restore_operand(
5656 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
5657 MemOperand return_value_operand(fp,
5658 (2 + FCA::kReturnValueOffset) * kPointerSize);
5659
5660 const int spill_offset = 1 + kApiStackSpace;
5661 __ CallApiFunctionAndReturn(api_function_address,
5662 thunk_ref,
5663 kStackUnwindSpace,
5664 spill_offset,
5665 return_value_operand,
5666 restore_context ?
5667 &context_restore_operand : NULL);
5668 }
5669
5670
5671 void CallApiGetterStub::Generate(MacroAssembler* masm) {
5672 // ----------- S t a t e -------------
5673 // -- sp[0] : name
5674 // -- sp[8 - kArgsLength*8] : PropertyCallbackArguments object
5675 // -- ...
5676 // -- x2 : api_function_address
5677 // -----------------------------------
5678
5679 Register api_function_address = x2;
5680
5681 __ Mov(x0, masm->StackPointer()); // x0 = Handle<Name>
5682 __ Add(x1, x0, 1 * kPointerSize); // x1 = PCA
5683
5684 const int kApiStackSpace = 1;
5685
5686 // Allocate space for CallApiFunctionAndReturn can store some scratch
5687 // registeres on the stack.
5688 const int kCallApiFunctionSpillSpace = 4;
5689
5690 FrameScope frame_scope(masm, StackFrame::MANUAL);
5691 __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
5692
5693 // Create PropertyAccessorInfo instance on the stack above the exit frame with
5694 // x1 (internal::Object** args_) as the data.
5695 __ Poke(x1, 1 * kPointerSize);
5696 __ Add(x1, masm->StackPointer(), 1 * kPointerSize); // x1 = AccessorInfo&
5697
5698 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5699
5700 Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
5701 ExternalReference::Type thunk_type =
5702 ExternalReference::PROFILING_GETTER_CALL;
5703 ApiFunction thunk_fun(thunk_address);
5704 ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
5705 masm->isolate());
5706
5707 const int spill_offset = 1 + kApiStackSpace;
5708 __ CallApiFunctionAndReturn(api_function_address,
5709 thunk_ref,
5710 kStackUnwindSpace,
5711 spill_offset,
5712 MemOperand(fp, 6 * kPointerSize),
5713 NULL);
5714 }
5715
5716
5717 #undef __
5718
5719 } } // namespace v8::internal
5720
5721 #endif // V8_TARGET_ARCH_A64
OLDNEW
« no previous file with comments | « src/a64/code-stubs-a64.h ('k') | src/a64/codegen-a64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698