Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(834)

Side by Side Diff: src/a64/lithium-codegen-a64.cc

Issue 144963003: A64: add missing files. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/a64
Patch Set: Created 6 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/a64/lithium-codegen-a64.h ('k') | src/a64/lithium-gap-resolver-a64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "a64/lithium-codegen-a64.h"
31 #include "a64/lithium-gap-resolver-a64.h"
32 #include "code-stubs.h"
33 #include "stub-cache.h"
34
35 namespace v8 {
36 namespace internal {
37
38
39 class SafepointGenerator : public CallWrapper {
40 public:
41 SafepointGenerator(LCodeGen* codegen,
42 LPointerMap* pointers,
43 Safepoint::DeoptMode mode)
44 : codegen_(codegen),
45 pointers_(pointers),
46 deopt_mode_(mode) { }
47 virtual ~SafepointGenerator() { }
48
49 virtual void BeforeCall(int call_size) const { }
50
51 virtual void AfterCall() const {
52 codegen_->RecordSafepoint(pointers_, deopt_mode_);
53 }
54
55 private:
56 LCodeGen* codegen_;
57 LPointerMap* pointers_;
58 Safepoint::DeoptMode deopt_mode_;
59 };
60
61
62 #define __ masm()->
63
64 // Emit code to branch if the given condition holds.
65 // The code generated here doesn't modify the flags and they must have
66 // been set by some prior instructions.
67 //
68 // The EmitInverted function simply inverts the condition.
69 class BranchOnCondition : public BranchGenerator {
70 public:
71 BranchOnCondition(LCodeGen* codegen, Condition cond)
72 : BranchGenerator(codegen),
73 cond_(cond) { }
74
75 virtual void Emit(Label* label) const {
76 __ B(cond_, label);
77 }
78
79 virtual void EmitInverted(Label* label) const {
80 __ B(InvertCondition(cond_), label);
81 }
82
83 private:
84 Condition cond_;
85 };
86
87
88 // Emit code to compare lhs and rhs and branch if the condition holds.
89 // This uses MacroAssembler's CompareAndBranch function so it will handle
90 // converting the comparison to Cbz/Cbnz if the right-hand side is 0.
91 //
92 // EmitInverted still compares the two operands but inverts the condition.
93 class CompareAndBranch : public BranchGenerator {
94 public:
95 CompareAndBranch(LCodeGen* codegen,
96 Condition cond,
97 const Register& lhs,
98 const Operand& rhs)
99 : BranchGenerator(codegen),
100 cond_(cond),
101 lhs_(lhs),
102 rhs_(rhs) { }
103
104 virtual void Emit(Label* label) const {
105 __ CompareAndBranch(lhs_, rhs_, cond_, label);
106 }
107
108 virtual void EmitInverted(Label* label) const {
109 __ CompareAndBranch(lhs_, rhs_, InvertCondition(cond_), label);
110 }
111
112 private:
113 Condition cond_;
114 const Register& lhs_;
115 const Operand& rhs_;
116 };
117
118
119 // Test the input with the given mask and branch if the condition holds.
120 // If the condition is 'eq' or 'ne' this will use MacroAssembler's
121 // TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the
122 // conversion to Tbz/Tbnz when possible.
123 class TestAndBranch : public BranchGenerator {
124 public:
125 TestAndBranch(LCodeGen* codegen,
126 Condition cond,
127 const Register& value,
128 uint64_t mask)
129 : BranchGenerator(codegen),
130 cond_(cond),
131 value_(value),
132 mask_(mask) { }
133
134 virtual void Emit(Label* label) const {
135 switch (cond_) {
136 case eq:
137 __ TestAndBranchIfAllClear(value_, mask_, label);
138 break;
139 case ne:
140 __ TestAndBranchIfAnySet(value_, mask_, label);
141 break;
142 default:
143 __ Tst(value_, mask_);
144 __ B(cond_, label);
145 }
146 }
147
148 virtual void EmitInverted(Label* label) const {
149 // The inverse of "all clear" is "any set" and vice versa.
150 switch (cond_) {
151 case eq:
152 __ TestAndBranchIfAnySet(value_, mask_, label);
153 break;
154 case ne:
155 __ TestAndBranchIfAllClear(value_, mask_, label);
156 break;
157 default:
158 __ Tst(value_, mask_);
159 __ B(InvertCondition(cond_), label);
160 }
161 }
162
163 private:
164 Condition cond_;
165 const Register& value_;
166 uint64_t mask_;
167 };
168
169
170 void LCodeGen::WriteTranslation(LEnvironment* environment,
171 Translation* translation,
172 int* pushed_arguments_index,
173 int* pushed_arguments_count) {
174 if (environment == NULL) return;
175
176 // The translation includes one command per value in the environment.
177 int translation_size = environment->values()->length();
178 // The output frame height does not include the parameters.
179 int height = translation_size - environment->parameter_count();
180
181 // Function parameters are arguments to the outermost environment. The
182 // arguments index points to the first element of a sequence of tagged
183 // values on the stack that represent the arguments. This needs to be
184 // kept in sync with the LArgumentsElements implementation.
185 *pushed_arguments_index = -environment->parameter_count();
186 *pushed_arguments_count = environment->parameter_count();
187
188 WriteTranslation(environment->outer(),
189 translation,
190 pushed_arguments_index,
191 pushed_arguments_count);
192 bool has_closure_id = !info()->closure().is_null() &&
193 !info()->closure().is_identical_to(environment->closure());
194 int closure_id = has_closure_id
195 ? DefineDeoptimizationLiteral(environment->closure())
196 : Translation::kSelfLiteralId;
197
198 switch (environment->frame_type()) {
199 case JS_FUNCTION:
200 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
201 break;
202 case JS_CONSTRUCT:
203 translation->BeginConstructStubFrame(closure_id, translation_size);
204 break;
205 case JS_GETTER:
206 ASSERT(translation_size == 1);
207 ASSERT(height == 0);
208 translation->BeginGetterStubFrame(closure_id);
209 break;
210 case JS_SETTER:
211 ASSERT(translation_size == 2);
212 ASSERT(height == 0);
213 translation->BeginSetterStubFrame(closure_id);
214 break;
215 case STUB:
216 translation->BeginCompiledStubFrame();
217 break;
218 case ARGUMENTS_ADAPTOR:
219 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
220 break;
221 default:
222 UNREACHABLE();
223 }
224
225 // Inlined frames which push their arguments cause the index to be
226 // bumped and another stack area to be used for materialization,
227 // otherwise actual argument values are unknown for inlined frames.
228 bool arguments_known = true;
229 int arguments_index = *pushed_arguments_index;
230 int arguments_count = *pushed_arguments_count;
231 if (environment->entry() != NULL) {
232 arguments_known = environment->entry()->arguments_pushed();
233 arguments_index = arguments_index < 0
234 ? GetStackSlotCount() : arguments_index + arguments_count;
235 arguments_count = environment->entry()->arguments_count() + 1;
236 if (environment->entry()->arguments_pushed()) {
237 *pushed_arguments_index = arguments_index;
238 *pushed_arguments_count = arguments_count;
239 }
240 }
241
242 for (int i = 0; i < translation_size; ++i) {
243 LOperand* value = environment->values()->at(i);
244 // spilled_registers_ and spilled_double_registers_ are either
245 // both NULL or both set.
246 if ((environment->spilled_registers() != NULL) && (value != NULL)) {
247 if (value->IsRegister() &&
248 (environment->spilled_registers()[value->index()] != NULL)) {
249 translation->MarkDuplicate();
250 AddToTranslation(translation,
251 environment->spilled_registers()[value->index()],
252 environment->HasTaggedValueAt(i),
253 environment->HasUint32ValueAt(i),
254 arguments_known,
255 arguments_index,
256 arguments_count);
257 } else if (
258 value->IsDoubleRegister() &&
259 (environment->spilled_double_registers()[value->index()] != NULL)) {
260 translation->MarkDuplicate();
261 AddToTranslation(
262 translation,
263 environment->spilled_double_registers()[value->index()],
264 false,
265 false,
266 arguments_known,
267 arguments_index,
268 arguments_count);
269 }
270 }
271
272 AddToTranslation(translation,
273 value,
274 environment->HasTaggedValueAt(i),
275 environment->HasUint32ValueAt(i),
276 arguments_known,
277 arguments_index,
278 arguments_count);
279 }
280 }
281
282
283 void LCodeGen::AddToTranslation(Translation* translation,
284 LOperand* op,
285 bool is_tagged,
286 bool is_uint32,
287 bool arguments_known,
288 int arguments_index,
289 int arguments_count) {
290 if (op == NULL) {
291 // TODO(twuerthinger): Introduce marker operands to indicate that this value
292 // is not present and must be reconstructed from the deoptimizer. Currently
293 // this is only used for the arguments object.
294 translation->StoreArgumentsObject(
295 arguments_known, arguments_index, arguments_count);
296 } else if (op->IsStackSlot()) {
297 if (is_tagged) {
298 translation->StoreStackSlot(op->index());
299 } else if (is_uint32) {
300 translation->StoreUint32StackSlot(op->index());
301 } else {
302 translation->StoreInt32StackSlot(op->index());
303 }
304 } else if (op->IsDoubleStackSlot()) {
305 translation->StoreDoubleStackSlot(op->index());
306 } else if (op->IsArgument()) {
307 ASSERT(is_tagged);
308 int src_index = GetStackSlotCount() + op->index();
309 translation->StoreStackSlot(src_index);
310 } else if (op->IsRegister()) {
311 Register reg = ToRegister(op);
312 if (is_tagged) {
313 translation->StoreRegister(reg);
314 } else if (is_uint32) {
315 translation->StoreUint32Register(reg);
316 } else {
317 translation->StoreInt32Register(reg);
318 }
319 } else if (op->IsDoubleRegister()) {
320 DoubleRegister reg = ToDoubleRegister(op);
321 translation->StoreDoubleRegister(reg);
322 } else if (op->IsConstantOperand()) {
323 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
324 int src_index = DefineDeoptimizationLiteral(constant->handle());
325 translation->StoreLiteral(src_index);
326 } else {
327 UNREACHABLE();
328 }
329 }
330
331
332 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
333 int result = deoptimization_literals_.length();
334 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
335 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
336 }
337 deoptimization_literals_.Add(literal, zone());
338 return result;
339 }
340
341
342 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
343 Safepoint::DeoptMode mode) {
344 if (!environment->HasBeenRegistered()) {
345 int frame_count = 0;
346 int jsframe_count = 0;
347 int args_index = 0;
348 int args_count = 0;
349 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
350 ++frame_count;
351 if (e->frame_type() == JS_FUNCTION) {
352 ++jsframe_count;
353 }
354 }
355 Translation translation(&translations_, frame_count, jsframe_count, zone());
356 WriteTranslation(environment, &translation, &args_index, &args_count);
357 int deoptimization_index = deoptimizations_.length();
358 int pc_offset = masm()->pc_offset();
359 environment->Register(deoptimization_index,
360 translation.index(),
361 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
362 deoptimizations_.Add(environment, zone());
363 }
364 }
365
366
367 void LCodeGen::CallCode(Handle<Code> code,
368 RelocInfo::Mode mode,
369 LInstruction* instr) {
370 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
371 }
372
373
374 void LCodeGen::CallCodeGeneric(Handle<Code> code,
375 RelocInfo::Mode mode,
376 LInstruction* instr,
377 SafepointMode safepoint_mode) {
378 ASSERT(instr != NULL);
379
380 Assembler::BlockConstPoolScope scope(masm_);
381 LPointerMap* pointers = instr->pointer_map();
382 RecordPosition(pointers->position());
383 __ Call(code, mode);
384 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
385
386 if ((code->kind() == Code::BINARY_OP_IC) ||
387 (code->kind() == Code::COMPARE_IC)) {
388 // Signal that we don't inline smi code before these stubs in the
389 // optimizing code generator.
390 InlineSmiCheckInfo::EmitNotInlined(masm());
391 }
392 }
393
394
395 void LCodeGen::DoCallFunction(LCallFunction* instr) {
396 ASSERT(ToRegister(instr->function()).Is(x1));
397 ASSERT(ToRegister(instr->result()).Is(x0));
398
399 int arity = instr->arity();
400 CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
401 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
402 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
403 }
404
405
406 void LCodeGen::DoCallNew(LCallNew* instr) {
407 ASSERT(instr->IsMarkedAsCall());
408 ASSERT(ToRegister(instr->constructor()).is(x1));
409
410 __ Mov(x0, instr->arity());
411 if (FLAG_optimize_constructed_arrays) {
412 // No cell in x2 for construct type feedback in optimized code.
413 Handle<Object> undefined_value(isolate()->heap()->undefined_value(),
414 isolate());
415 __ Mov(x2, Operand(undefined_value));
416 }
417
418 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
419 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
420
421 ASSERT(ToRegister(instr->result()).is(x0));
422 }
423
424
425 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
426 ASSERT(instr->IsMarkedAsCall());
427 ASSERT(ToRegister(instr->constructor()).is(x1));
428 ASSERT(FLAG_optimize_constructed_arrays);
429
430 __ Mov(x0, Operand(instr->arity()));
431 __ Mov(x2, Operand(instr->hydrogen()->property_cell()));
432
433 ElementsKind kind = instr->hydrogen()->elements_kind();
434 bool disable_allocation_sites =
435 (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE);
436
437 if (instr->arity() == 0) {
438 ArrayNoArgumentConstructorStub stub(kind, disable_allocation_sites);
439 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
440 } else if (instr->arity() == 1) {
441 ArraySingleArgumentConstructorStub stub(kind, disable_allocation_sites);
442 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
443 } else {
444 ArrayNArgumentsConstructorStub stub(kind, disable_allocation_sites);
445 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
446 }
447
448 ASSERT(ToRegister(instr->result()).is(x0));
449 }
450
451
452 void LCodeGen::CallRuntime(const Runtime::Function* function,
453 int num_arguments,
454 LInstruction* instr) {
455 ASSERT(instr != NULL);
456 LPointerMap* pointers = instr->pointer_map();
457 ASSERT(pointers != NULL);
458 RecordPosition(pointers->position());
459
460 __ CallRuntime(function, num_arguments);
461 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
462 }
463
464
465 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
466 int argc,
467 LInstruction* instr) {
468 __ CallRuntimeSaveDoubles(id);
469 RecordSafepointWithRegisters(
470 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
471 }
472
473
474 void LCodeGen::RecordPosition(int position) {
475 if (position == RelocInfo::kNoPosition) return;
476 masm()->positions_recorder()->RecordPosition(position);
477 }
478
479
480 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
481 SafepointMode safepoint_mode) {
482 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
483 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
484 } else {
485 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
486 RecordSafepointWithRegisters(
487 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
488 }
489 }
490
491
492 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
493 Safepoint::Kind kind,
494 int arguments,
495 Safepoint::DeoptMode deopt_mode) {
496 ASSERT(expected_safepoint_kind_ == kind);
497
498 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
499 Safepoint safepoint = safepoints_.DefineSafepoint(
500 masm(), kind, arguments, deopt_mode);
501
502 for (int i = 0; i < operands->length(); i++) {
503 LOperand* pointer = operands->at(i);
504 if (pointer->IsStackSlot()) {
505 safepoint.DefinePointerSlot(pointer->index(), zone());
506 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
507 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
508 }
509 }
510
511 if (kind & Safepoint::kWithRegisters) {
512 // Register cp always contains a pointer to the context.
513 safepoint.DefinePointerRegister(cp, zone());
514 }
515 }
516
517 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
518 Safepoint::DeoptMode deopt_mode) {
519 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
520 }
521
522
523 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
524 LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
525 RecordSafepoint(&empty_pointers, deopt_mode);
526 }
527
528
529 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
530 int arguments,
531 Safepoint::DeoptMode deopt_mode) {
532 RecordSafepoint(
533 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
534 }
535
536
537 bool LCodeGen::GenerateCode() {
538 HPhase phase("Z_Code generation", chunk());
539 ASSERT(is_unused());
540 status_ = GENERATING;
541
542 // Open a frame scope to indicate that there is a frame on the stack. The
543 // NONE indicates that the scope shouldn't actually generate code to set up
544 // the frame (that is done in GeneratePrologue).
545 FrameScope frame_scope(masm_, StackFrame::NONE);
546
547 return GeneratePrologue() &&
548 GenerateBody() &&
549 GenerateDeferredCode() &&
550 GenerateDeoptJumpTable() &&
551 GenerateSafepointTable();
552 }
553
554
555 bool LCodeGen::GeneratePrologue() {
556 ASSERT(is_generating());
557
558 if (info()->IsOptimizing()) {
559 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
560
561 // TODO(all): Add support for stop_t FLAG in DEBUG mode.
562
563 // Strict mode functions and builtins need to replace the receiver
564 // with undefined when called as functions (without an explicit
565 // receiver object).
566 // x5 holds the call kind and is zero for method calls and non-zero for
567 // function calls.
568 if (!info_->is_classic_mode() || info_->is_native()) {
569 Label ok;
570 __ Cbz(x5, &ok);
571 int receiver_offset = scope()->num_parameters() * kPointerSize;
572 __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
573 __ Poke(x10, receiver_offset);
574 __ Bind(&ok);
575 }
576 }
577
578 ASSERT(__ StackPointer().Is(jssp));
579 info()->set_prologue_offset(masm_->pc_offset());
580 if (NeedsEagerFrame()) {
581 if (info()->IsStub()) {
582 // TODO(jbramley): Does x1 contain a JSFunction here, or does it already
583 // have the special STUB smi?
584 __ Mov(x10, Operand(Smi::FromInt(StackFrame::STUB)));
585 // Compiled stubs don't age, and so they don't need the predictable code
586 // ageing sequence.
587 __ Push(lr, fp, cp, x10);
588 __ Add(fp, jssp, 2 * kPointerSize);
589 } else {
590 // This call emits the following sequence in a way that can be patched for
591 // code ageing support:
592 // Push(lr, fp, cp, x1);
593 // Add(fp, jssp, 2 * kPointerSize);
594 __ EmitFrameSetupForCodeAgePatching();
595 }
596 frame_is_built_ = true;
597 info_->AddNoFrameRange(0, masm_->pc_offset());
598 }
599
600 // Reserve space for the stack slots needed by the code.
601 int slots = GetStackSlotCount();
602 if (slots > 0) {
603 __ Claim(slots, kPointerSize);
604 }
605
606 if (info()->saves_caller_doubles()) {
607 Comment(";;; Save clobbered callee double registers");
608 ASSERT(NeedsEagerFrame());
609 BitVector* doubles = chunk()->allocated_double_registers();
610 BitVector::Iterator iterator(doubles);
611 int count = 0;
612 while (!iterator.Done()) {
613 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
614 // TODO(jbramley): Make Poke support FPRegisters.
615 __ Str(value, MemOperand(__ StackPointer(), count * kDoubleSize));
616 iterator.Advance();
617 count++;
618 }
619 }
620
621 // Allocate a local context if needed.
622 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
623 if (heap_slots > 0) {
624 Comment(";;; Allocate local context");
625 // Argument to NewContext is the function, which is in x1.
626 __ Push(x1);
627 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
628 FastNewContextStub stub(heap_slots);
629 __ CallStub(&stub);
630 } else {
631 __ CallRuntime(Runtime::kNewFunctionContext, 1);
632 }
633 RecordSafepoint(Safepoint::kNoLazyDeopt);
634 // Context is returned in both x0 and cp. It replaces the context passed to
635 // us. It's saved in the stack and kept live in cp.
636 __ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
637 // Copy any necessary parameters into the context.
638 int num_parameters = scope()->num_parameters();
639 for (int i = 0; i < num_parameters; i++) {
640 Variable* var = scope()->parameter(i);
641 if (var->IsContextSlot()) {
642 Register value = x0;
643 Register scratch = x3;
644
645 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
646 (num_parameters - 1 - i) * kPointerSize;
647 // Load parameter from stack.
648 __ Ldr(value, MemOperand(fp, parameter_offset));
649 // Store it in the context.
650 MemOperand target = ContextMemOperand(cp, var->index());
651 __ Str(value, target);
652 // Update the write barrier. This clobbers value and scratch.
653 __ RecordWriteContextSlot(cp, target.offset(), value, scratch,
654 GetLinkRegisterState(), kSaveFPRegs);
655 }
656 }
657 Comment(";;; End allocate local context");
658 }
659
660 // Trace the call.
661 if (FLAG_trace && info()->IsOptimizing()) {
662 __ CallRuntime(Runtime::kTraceEnter, 0);
663 }
664
665 return !is_aborted();
666 }
667
668
669 bool LCodeGen::GenerateBody() {
670 ASSERT(is_generating());
671 bool emit_instructions = true;
672
673 for (current_instruction_ = 0;
674 !is_aborted() && (current_instruction_ < instructions_->length());
675 current_instruction_++) {
676 LInstruction* instr = instructions_->at(current_instruction_);
677
678 // Don't emit code for basic blocks with a replacement.
679 if (instr->IsLabel()) {
680 emit_instructions = !LLabel::cast(instr)->HasReplacement();
681 }
682 if (!emit_instructions) continue;
683
684 if (FLAG_code_comments && instr->HasInterestingComment(this)) {
685 Comment(";;; <@%d,#%d> %s",
686 current_instruction_,
687 instr->hydrogen_value()->id(),
688 instr->Mnemonic());
689 }
690
691 instr->CompileToNative(this);
692 }
693 EnsureSpaceForLazyDeopt();
694 return !is_aborted();
695 }
696
697
698 bool LCodeGen::GenerateDeferredCode() {
699 ASSERT(is_generating());
700 if (deferred_.length() > 0) {
701 for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
702 LDeferredCode* code = deferred_[i];
703
704 Comment(";;; <@%d,#%d> "
705 "-------------------- Deferred %s --------------------",
706 code->instruction_index(),
707 code->instr()->hydrogen_value()->id(),
708 code->instr()->Mnemonic());
709
710 __ Bind(code->entry());
711
712 if (NeedsDeferredFrame()) {
713 Comment(";;; Build frame");
714 ASSERT(!frame_is_built_);
715 ASSERT(info()->IsStub());
716 frame_is_built_ = true;
717 __ Push(lr, fp, cp);
718 __ Mov(fp, Operand(Smi::FromInt(StackFrame::STUB)));
719 __ Push(fp);
720 __ Add(fp, __ StackPointer(), 2 * kPointerSize);
721 Comment(";;; Deferred code");
722 }
723
724 code->Generate();
725
726 if (NeedsDeferredFrame()) {
727 Comment(";;; Destroy frame");
728 ASSERT(frame_is_built_);
729 __ Pop(xzr, cp, fp, lr);
730 frame_is_built_ = false;
731 }
732
733 __ B(code->exit());
734 }
735 }
736
737 // Force constant pool emission at the end of the deferred code to make
738 // sure that no constant pools are emitted after deferred code because
739 // deferred code generation is the last step which generates code. The two
740 // following steps will only output data used by crakshaft.
741 masm()->CheckConstPool(true, false);
742
743 return !is_aborted();
744 }
745
746
747 bool LCodeGen::GenerateDeoptJumpTable() {
748 TODO_UNIMPLEMENTED("generate level 1 deopt table");
749
750 // TODO(jbramley): On ARM, the deopt entry for stubs is different in that it
751 // inserts a special marker instead of a function pointer. We need to do that
752 // same on A64, but since we don't use the jump table, we have to do it
753 // in LCodeGen::Deoptimize().
754
755 // The deoptimization jump table is the last part of the instruction
756 // sequence. Mark the generated code as done unless we bailed out.
757 if (!is_aborted()) status_ = DONE;
758 return !is_aborted();
759 }
760
761
762 bool LCodeGen::GenerateSafepointTable() {
763 ASSERT(is_done());
764 safepoints_.Emit(masm(), GetStackSlotCount());
765 return !is_aborted();
766 }
767
768
769 void LCodeGen::FinishCode(Handle<Code> code) {
770 ASSERT(is_done());
771 code->set_stack_slots(GetStackSlotCount());
772 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
773 if (FLAG_weak_embedded_maps_in_optimized_code) {
774 RegisterDependentCodeForEmbeddedMaps(code);
775 }
776 PopulateDeoptimizationData(code);
777 info()->CommitDependentMaps(code);
778 }
779
780
781 void LCodeGen::Abort(const char* reason) {
782 info()->set_bailout_reason(reason);
783 status_ = ABORTED;
784 }
785
786
787 void LCodeGen::Comment(const char* format, ...) {
788 if (!FLAG_code_comments) return;
789 char buffer[4 * KB];
790 StringBuilder builder(buffer, ARRAY_SIZE(buffer));
791 va_list arguments;
792 va_start(arguments, format);
793 builder.AddFormattedList(format, arguments);
794 va_end(arguments);
795
796 // Copy the string before recording it in the assembler to avoid
797 // issues when the stack allocated buffer goes out of scope.
798 size_t length = builder.position();
799 Vector<char> copy = Vector<char>::New(length + 1);
800 memcpy(copy.start(), builder.Finalize(), copy.length());
801 masm()->RecordComment(copy.start());
802 }
803
804
805 void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
806 ZoneList<Handle<Map> > maps(1, zone());
807 int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
808 for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
809 RelocInfo::Mode mode = it.rinfo()->rmode();
810 if (mode == RelocInfo::EMBEDDED_OBJECT &&
811 it.rinfo()->target_object()->IsMap()) {
812 Handle<Map> map(Map::cast(it.rinfo()->target_object()));
813 if (map->CanTransition()) {
814 maps.Add(map, zone());
815 }
816 }
817 }
818 #ifdef VERIFY_HEAP
819 // This disables verification of weak embedded maps after full GC.
820 // AddDependentCode can cause a GC, which would observe the state where
821 // this code is not yet in the depended code lists of the embedded maps.
822 NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
823 #endif
824 for (int i = 0; i < maps.length(); i++) {
825 maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
826 }
827 }
828
829
830 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
831 int length = deoptimizations_.length();
832 if (length == 0) return;
833
834 Handle<DeoptimizationInputData> data =
835 factory()->NewDeoptimizationInputData(length, TENURED);
836
837 Handle<ByteArray> translations =
838 translations_.CreateByteArray(isolate()->factory());
839 data->SetTranslationByteArray(*translations);
840 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
841
842 Handle<FixedArray> literals =
843 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
844 { AllowDeferredHandleDereference copy_handles;
845 for (int i = 0; i < deoptimization_literals_.length(); i++) {
846 literals->set(i, *deoptimization_literals_[i]);
847 }
848 data->SetLiteralArray(*literals);
849 }
850
851 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
852 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
853
854 // Populate the deoptimization entries.
855 for (int i = 0; i < length; i++) {
856 LEnvironment* env = deoptimizations_[i];
857 data->SetAstId(i, env->ast_id());
858 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
859 data->SetArgumentsStackHeight(i,
860 Smi::FromInt(env->arguments_stack_height()));
861 data->SetPc(i, Smi::FromInt(env->pc_offset()));
862 }
863
864 code->set_deoptimization_data(*data);
865 }
866
867
868 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
869 ASSERT(deoptimization_literals_.length() == 0);
870
871 const ZoneList<Handle<JSFunction> >* inlined_closures =
872 chunk()->inlined_closures();
873
874 for (int i = 0, length = inlined_closures->length(); i < length; i++) {
875 DefineDeoptimizationLiteral(inlined_closures->at(i));
876 }
877
878 inlined_function_count_ = deoptimization_literals_.length();
879 }
880
881
882 void LCodeGen::Deoptimize(LEnvironment* environment,
883 Deoptimizer::BailoutType bailout_type) {
884 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
885 ASSERT(environment->HasBeenRegistered());
886 ASSERT(info()->IsOptimizing() || info()->IsStub());
887 int id = environment->deoptimization_index();
888 Address entry =
889 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
890
891 if (entry == NULL) {
892 Abort("bailout was not prepared");
893 return;
894 }
895
896 TODO_UNIMPLEMENTED("Add support for deopt_every_n_times flag.");
897 TODO_UNIMPLEMENTED("Add support for trap_on_deopt flag.");
898
899 // TODO(all): Currently this code directly jump to the second level deopt
900 // table entry. This code need to be updated if we decide to use the
901 // 2 levels of table.
902 ASSERT(info()->IsStub() || frame_is_built_);
903 bool needs_lazy_deopt = info()->IsStub();
904 if (frame_is_built_) {
905 if (needs_lazy_deopt) {
906 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
907 } else {
908 __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
909 }
910 } else {
911 // We need to build a frame to deoptimize a stub. Because stubs don't have a
912 // function pointer to put in the frame, put a special marker there instead.
913 // TODO(jbramley): In other architectures, this happens in the jump table.
914 // This is a temporary hack until we implement jump tables in A64.
915 __ Mov(__ Tmp1(), Operand(Smi::FromInt(StackFrame::STUB)));
916 __ Push(lr, fp, cp, __ Tmp1());
917 __ Add(fp, __ StackPointer(), 2 * kPointerSize);
918 // TODO(jbramley): Can this be a jump, rather than a call?
919 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
920 }
921 }
922
923
924 void LCodeGen::Deoptimize(LEnvironment* environment) {
925 Deoptimizer::BailoutType bailout_type = info()->IsStub() ? Deoptimizer::LAZY
926 : Deoptimizer::EAGER;
927 Deoptimize(environment, bailout_type);
928 }
929
930
931 void LCodeGen::SoftDeoptimize(LEnvironment* environment) {
932 ASSERT(!info()->IsStub());
933 Deoptimize(environment, Deoptimizer::SOFT);
934 }
935
936
937 void LCodeGen::DeoptimizeIf(Condition cond, LEnvironment* environment) {
938 Label dont_deopt;
939 __ B(InvertCondition(cond), &dont_deopt);
940 Deoptimize(environment);
941 __ Bind(&dont_deopt);
942 }
943
944
945 void LCodeGen::DeoptimizeIfZero(Register rt, LEnvironment* environment) {
946 Label dont_deopt;
947 __ Cbnz(rt, &dont_deopt);
948 Deoptimize(environment);
949 __ Bind(&dont_deopt);
950 }
951
952
953 void LCodeGen::DeoptimizeIfNegative(Register rt, LEnvironment* environment) {
954 Label dont_deopt;
955 __ Tbz(rt, rt.Is64Bits() ? kXSignBit : kWSignBit, &dont_deopt);
956 Deoptimize(environment);
957 __ Bind(&dont_deopt);
958 }
959
960
961 void LCodeGen::DeoptimizeIfSmi(Register rt,
962 LEnvironment* environment) {
963 Label dont_deopt;
964 __ JumpIfNotSmi(rt, &dont_deopt);
965 Deoptimize(environment);
966 __ Bind(&dont_deopt);
967 }
968
969
970 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LEnvironment* environment) {
971 Label dont_deopt;
972 __ JumpIfSmi(rt, &dont_deopt);
973 Deoptimize(environment);
974 __ Bind(&dont_deopt);
975 }
976
977
978 void LCodeGen::DeoptimizeIfRoot(Register rt,
979 Heap::RootListIndex index,
980 LEnvironment* environment) {
981 Label dont_deopt;
982 __ JumpIfNotRoot(rt, index, &dont_deopt);
983 Deoptimize(environment);
984 __ Bind(&dont_deopt);
985 }
986
987
988 void LCodeGen::DeoptimizeIfNotRoot(Register rt,
989 Heap::RootListIndex index,
990 LEnvironment* environment) {
991 Label dont_deopt;
992 __ JumpIfRoot(rt, index, &dont_deopt);
993 Deoptimize(environment);
994 __ Bind(&dont_deopt);
995 }
996
997
998 void LCodeGen::EnsureSpaceForLazyDeopt() {
999 if (info()->IsStub()) return;
1000 // Ensure that we have enough space after the previous lazy-bailout
1001 // instruction for patching the code here.
1002 intptr_t current_pc = masm()->pc_offset();
1003 int patch_size = Deoptimizer::patch_size();
1004
1005 if (current_pc < (last_lazy_deopt_pc_ + patch_size)) {
1006 intptr_t padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
1007 ASSERT((padding_size % kInstructionSize) == 0);
1008 InstructionAccurateScope instruction_accurate(
1009 masm(), padding_size / kInstructionSize);
1010
1011 while (padding_size > 0) {
1012 __ nop();
1013 padding_size -= kInstructionSize;
1014 }
1015 }
1016 last_lazy_deopt_pc_ = masm()->pc_offset();
1017 }
1018
1019
1020 Register LCodeGen::ToRegister(LOperand* op) const {
1021 // TODO(all): support zero register results, as ToRegister32.
1022 ASSERT((op != NULL) && op->IsRegister());
1023 return Register::FromAllocationIndex(op->index());
1024 }
1025
1026
1027 Register LCodeGen::ToRegister32(LOperand* op) const {
1028 ASSERT(op != NULL);
1029 if (op->IsConstantOperand()) {
1030 // If this is a constant operand, the result must be the zero register.
1031 ASSERT(ToInteger32(LConstantOperand::cast(op)) == 0);
1032 return wzr;
1033 } else {
1034 return ToRegister(op).W();
1035 }
1036 }
1037
1038
1039 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
1040 HConstant* constant = chunk_->LookupConstant(op);
1041 return Smi::FromInt(constant->Integer32Value());
1042 }
1043
1044
1045 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
1046 ASSERT((op != NULL) && op->IsDoubleRegister());
1047 return DoubleRegister::FromAllocationIndex(op->index());
1048 }
1049
1050
1051 Operand LCodeGen::ToOperand(LOperand* op) {
1052 ASSERT(op != NULL);
1053 if (op->IsConstantOperand()) {
1054 LConstantOperand* const_op = LConstantOperand::cast(op);
1055 HConstant* constant = chunk()->LookupConstant(const_op);
1056 Representation r = chunk_->LookupLiteralRepresentation(const_op);
1057 if (r.IsInteger32()) {
1058 ASSERT(constant->HasInteger32Value());
1059 return Operand(constant->Integer32Value());
1060 } else if (r.IsDouble()) {
1061 Abort("ToOperand unsupported double immediate.");
1062 }
1063 ASSERT(r.IsTagged());
1064 return Operand(constant->handle());
1065 } else if (op->IsRegister()) {
1066 return Operand(ToRegister(op));
1067 } else if (op->IsDoubleRegister()) {
1068 Abort("ToOperand IsDoubleRegister unimplemented");
1069 return Operand(0);
1070 }
1071 // Stack slots not implemented, use ToMemOperand instead.
1072 UNREACHABLE();
1073 return Operand(0);
1074 }
1075
1076
1077 Operand LCodeGen::ToOperand32(LOperand* op) {
1078 ASSERT(op != NULL);
1079 if (op->IsRegister()) {
1080 return Operand(ToRegister32(op));
1081 } else if (op->IsConstantOperand()) {
1082 LConstantOperand* const_op = LConstantOperand::cast(op);
1083 HConstant* constant = chunk()->LookupConstant(const_op);
1084 Representation r = chunk_->LookupLiteralRepresentation(const_op);
1085 if (r.IsInteger32()) {
1086 ASSERT(constant->HasInteger32Value());
1087 return Operand(constant->Integer32Value());
1088 } else {
1089 // Other constants not implemented.
1090 Abort("ToOperand32 unsupported immediate.");
1091 }
1092 }
1093 // Other cases are not implemented.
1094 UNREACHABLE();
1095 return Operand(0);
1096 }
1097
1098
1099 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
1100 ASSERT(op != NULL);
1101 ASSERT(!op->IsRegister());
1102 ASSERT(!op->IsDoubleRegister());
1103 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
1104 return MemOperand(fp, StackSlotOffset(op->index()));
1105 }
1106
1107
1108 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
1109 HConstant* constant = chunk_->LookupConstant(op);
1110 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
1111 return constant->handle();
1112 }
1113
1114
1115 bool LCodeGen::IsSmi(LConstantOperand* op) const {
1116 return chunk_->LookupLiteralRepresentation(op).IsSmi();
1117 }
1118
1119
1120 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
1121 return op->IsConstantOperand() &&
1122 chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
1123 }
1124
1125
1126 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
1127 HConstant* constant = chunk_->LookupConstant(op);
1128 return constant->Integer32Value();
1129 }
1130
1131
1132 double LCodeGen::ToDouble(LConstantOperand* op) const {
1133 HConstant* constant = chunk_->LookupConstant(op);
1134 ASSERT(constant->HasDoubleValue());
1135 return constant->DoubleValue();
1136 }
1137
1138
1139 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1140 Condition cond = nv;
1141 switch (op) {
1142 case Token::EQ:
1143 case Token::EQ_STRICT:
1144 cond = eq;
1145 break;
1146 case Token::LT:
1147 cond = is_unsigned ? lo : lt;
1148 break;
1149 case Token::GT:
1150 cond = is_unsigned ? hi : gt;
1151 break;
1152 case Token::LTE:
1153 cond = is_unsigned ? ls : le;
1154 break;
1155 case Token::GTE:
1156 cond = is_unsigned ? hs : ge;
1157 break;
1158 case Token::IN:
1159 case Token::INSTANCEOF:
1160 default:
1161 UNREACHABLE();
1162 }
1163 return cond;
1164 }
1165
1166
1167 template<class InstrType>
1168 void LCodeGen::EmitBranchGeneric(InstrType instr,
1169 const BranchGenerator& branch) {
1170 int left_block = instr->TrueDestination(chunk_);
1171 int right_block = instr->FalseDestination(chunk_);
1172
1173 int next_block = GetNextEmittedBlock();
1174
1175 if (right_block == left_block) {
1176 EmitGoto(left_block);
1177 } else if (left_block == next_block) {
1178 branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
1179 } else if (right_block == next_block) {
1180 branch.Emit(chunk_->GetAssemblyLabel(left_block));
1181 } else {
1182 branch.Emit(chunk_->GetAssemblyLabel(left_block));
1183 __ B(chunk_->GetAssemblyLabel(right_block));
1184 }
1185 }
1186
1187
1188 template<class InstrType>
1189 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
1190 BranchOnCondition branch(this, condition);
1191 EmitBranchGeneric(instr, branch);
1192 }
1193
1194
1195 template<class InstrType>
1196 void LCodeGen::EmitCompareAndBranch(InstrType instr,
1197 Condition condition,
1198 const Register& lhs,
1199 const Operand& rhs) {
1200 CompareAndBranch branch(this, condition, lhs, rhs);
1201 EmitBranchGeneric(instr, branch);
1202 }
1203
1204
1205 template<class InstrType>
1206 void LCodeGen::EmitTestAndBranch(InstrType instr,
1207 Condition condition,
1208 const Register& value,
1209 uint64_t mask) {
1210 TestAndBranch branch(this, condition, value, mask);
1211 EmitBranchGeneric(instr, branch);
1212 }
1213
1214
1215 void LCodeGen::DoGap(LGap* gap) {
1216 for (int i = LGap::FIRST_INNER_POSITION;
1217 i <= LGap::LAST_INNER_POSITION;
1218 i++) {
1219 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1220 LParallelMove* move = gap->GetParallelMove(inner_pos);
1221 if (move != NULL) {
1222 resolver_.Resolve(move);
1223 }
1224 }
1225 }
1226
1227
1228 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
1229 Register arguments = ToRegister(instr->arguments());
1230 Register result = ToRegister(instr->result());
1231
1232 if (instr->length()->IsConstantOperand() &&
1233 instr->index()->IsConstantOperand()) {
1234 ASSERT(instr->temp() == NULL);
1235 int index = ToInteger32(LConstantOperand::cast(instr->index()));
1236 int length = ToInteger32(LConstantOperand::cast(instr->length()));
1237 int offset = ((length - index) + 1) * kPointerSize;
1238 __ Ldr(result, MemOperand(arguments, offset));
1239 } else {
1240 ASSERT(instr->temp() != NULL);
1241 Register temp = ToRegister32(instr->temp());
1242 Register length = ToRegister32(instr->length());
1243 Operand index = ToOperand32(instr->index());
1244 // There are two words between the frame pointer and the last arguments.
1245 // Subtracting from length accounts for only one, so we add one more.
1246 __ Sub(temp, length, index);
1247 __ Add(temp, temp, 1);
1248 __ Ldr(result, MemOperand(arguments, temp, UXTW, kPointerSizeLog2));
1249 }
1250 }
1251
1252
1253 void LCodeGen::DoAddI(LAddI* instr) {
1254 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1255 Register result = ToRegister32(instr->result());
1256 Register left = ToRegister32(instr->left());
1257 Operand right = ToOperand32(instr->right());
1258 if (can_overflow) {
1259 __ Adds(result, left, right);
1260 DeoptimizeIf(vs, instr->environment());
1261 } else {
1262 __ Add(result, left, right);
1263 }
1264 }
1265
1266
1267 void LCodeGen::DoAllocate(LAllocate* instr) {
1268 class DeferredAllocate: public LDeferredCode {
1269 public:
1270 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
1271 : LDeferredCode(codegen), instr_(instr) { }
1272 virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
1273 virtual LInstruction* instr() { return instr_; }
1274 private:
1275 LAllocate* instr_;
1276 };
1277
1278 DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
1279
1280 Register result = ToRegister(instr->result());
1281 Register temp1 = ToRegister(instr->temp1());
1282 Register temp2 = ToRegister(instr->temp2());
1283
1284 // Allocate memory for the object.
1285 AllocationFlags flags = TAG_OBJECT;
1286 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
1287 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
1288 }
1289
1290 if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
1291 ASSERT(!instr->hydrogen()->CanAllocateInOldDataSpace());
1292 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
1293 } else if (instr->hydrogen()->CanAllocateInOldDataSpace()) {
1294 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
1295 }
1296
1297 if (instr->size()->IsConstantOperand()) {
1298 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1299 __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
1300 } else {
1301 Register size = ToRegister(instr->size());
1302 __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
1303 }
1304
1305 __ Bind(deferred->exit());
1306 }
1307
1308
1309 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
1310 Register result = ToRegister(instr->result());
1311
1312 // TODO(3095996): Get rid of this. For now, we need to make the
1313 // result register contain a valid pointer because it is already
1314 // contained in the register pointer map.
1315 __ Mov(result, Operand(Smi::FromInt(0)));
1316
1317 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
1318 if (instr->size()->IsConstantOperand()) {
1319 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1320 // Use result as a scratch register.
1321 __ Mov(result, Operand(Smi::FromInt(size)));
1322 __ Push(result);
1323 } else {
1324 Register size = ToRegister(instr->size());
1325 __ SmiTag(size);
1326 __ Push(size);
1327 }
1328 if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
1329 CallRuntimeFromDeferred(
1330 Runtime::kAllocateInOldPointerSpace, 1, instr);
1331 ASSERT(!instr->hydrogen()->CanAllocateInOldDataSpace());
1332 CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr);
1333 } else if (instr->hydrogen()->CanAllocateInOldDataSpace()) {
1334 CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr);
1335 } else {
1336 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
1337 }
1338 __ StoreToSafepointRegisterSlot(x0, result);
1339 }
1340
1341
1342 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
1343 Register receiver = ToRegister(instr->receiver());
1344 Register function = ToRegister(instr->function());
1345 Register length = ToRegister(instr->length());
1346 Register elements = ToRegister(instr->elements());
1347 Register scratch = x5;
1348 ASSERT(receiver.Is(x0)); // Used for parameter count.
1349 ASSERT(function.Is(x1)); // Required by InvokeFunction.
1350 ASSERT(ToRegister(instr->result()).Is(x0));
1351 ASSERT(instr->IsMarkedAsCall());
1352
1353 // Copy the arguments to this function possibly from the
1354 // adaptor frame below it.
1355 const uint32_t kArgumentsLimit = 1 * KB;
1356 __ Cmp(length, kArgumentsLimit);
1357 DeoptimizeIf(hi, instr->environment());
1358
1359 // Push the receiver and use the register to keep the original
1360 // number of arguments.
1361 __ Push(receiver);
1362 Register argc = receiver;
1363 receiver = NoReg;
1364 __ Mov(argc, length);
1365 // The arguments are at a one pointer size offset from elements.
1366 __ Add(elements, elements, 1 * kPointerSize);
1367
1368 // Loop through the arguments pushing them onto the execution
1369 // stack.
1370 Label invoke, loop;
1371 // length is a small non-negative integer, due to the test above.
1372 __ Cbz(length, &invoke);
1373 __ Bind(&loop);
1374 __ Ldr(scratch, MemOperand(elements, length, LSL, kPointerSizeLog2));
1375 __ Push(scratch);
1376 __ Subs(length, length, 1);
1377 __ B(ne, &loop);
1378
1379 __ Bind(&invoke);
1380 ASSERT(instr->HasPointerMap());
1381 LPointerMap* pointers = instr->pointer_map();
1382 RecordPosition(pointers->position());
1383 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
1384 // The number of arguments is stored in argc (receiver) which is x0, as
1385 // expected by InvokeFunction.
1386 ParameterCount actual(argc);
1387 __ InvokeFunction(function, actual, CALL_FUNCTION,
1388 safepoint_generator, CALL_AS_METHOD);
1389 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1390 }
1391
1392
1393 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
1394 Register result = ToRegister(instr->result());
1395
1396 if (instr->hydrogen()->from_inlined()) {
1397 // When we are inside an inlined function, the arguments are the last things
1398 // that have been pushed on the stack. Therefore the arguments array can be
1399 // accessed directly from jssp.
1400 // However in the normal case, it is accessed via fp but there are two words
1401 // on the stack between fp and the arguments (the saved lr and fp) and the
1402 // LAccessArgumentsAt implementation take that into account.
1403 // In the inlined case we need to subtract the size of 2 words to jssp to
1404 // get a pointer which will work well with LAccessArgumentsAt.
1405 ASSERT(masm()->StackPointer().Is(jssp));
1406 __ Sub(result, jssp, 2 * kPointerSize);
1407 } else {
1408 ASSERT(instr->temp() != NULL);
1409 Register previous_fp = ToRegister(instr->temp());
1410
1411 __ Ldr(previous_fp,
1412 MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1413 __ Ldr(result,
1414 MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
1415 __ Cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1416 __ Csel(result, fp, previous_fp, ne);
1417 }
1418 }
1419
1420
1421 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
1422 Register elements = ToRegister(instr->elements());
1423 Register result = ToRegister(instr->result());
1424 Label done;
1425
1426 // If no arguments adaptor frame the number of arguments is fixed.
1427 __ Cmp(fp, elements);
1428 __ Mov(result, scope()->num_parameters());
1429 __ B(eq, &done);
1430
1431 // Arguments adaptor frame present. Get argument length from there.
1432 __ Ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1433 __ Ldrsw(result,
1434 UntagSmiMemOperand(result,
1435 ArgumentsAdaptorFrameConstants::kLengthOffset));
1436
1437 // Argument length is in result register.
1438 __ Bind(&done);
1439 }
1440
1441
1442 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1443 DoubleRegister left = ToDoubleRegister(instr->left());
1444 DoubleRegister right = ToDoubleRegister(instr->right());
1445 DoubleRegister result = ToDoubleRegister(instr->result());
1446
1447 switch (instr->op()) {
1448 case Token::ADD: __ Fadd(result, left, right); break;
1449 case Token::SUB: __ Fsub(result, left, right); break;
1450 case Token::MUL: __ Fmul(result, left, right); break;
1451 case Token::DIV: __ Fdiv(result, left, right); break;
1452 case Token::MOD: {
1453 // The ECMA-262 remainder operator is the remainder from a truncating
1454 // (round-towards-zero) division. Note that this differs from IEEE-754.
1455 //
1456 // TODO(jbramley): See if it's possible to do this inline, rather than by
1457 // calling a helper function. With frintz (to produce the intermediate
1458 // quotient) and fmsub (to calculate the remainder without loss of
1459 // precision), it should be possible. However, we would need support for
1460 // fdiv in round-towards-zero mode, and the A64 simulator doesn't support
1461 // that yet.
1462 ASSERT(left.Is(d0));
1463 ASSERT(right.Is(d1));
1464 __ CallCFunction(
1465 ExternalReference::double_fp_operation(Token::MOD, isolate()),
1466 0, 2);
1467 ASSERT(result.Is(d0));
1468 break;
1469 }
1470 default:
1471 UNREACHABLE();
1472 break;
1473 }
1474 }
1475
1476
1477 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1478 ASSERT(ToRegister(instr->left()).is(x1));
1479 ASSERT(ToRegister(instr->right()).is(x0));
1480 ASSERT(ToRegister(instr->result()).is(x0));
1481
1482 BinaryOpStub stub(instr->op(), NO_OVERWRITE);
1483 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1484 }
1485
1486
1487 void LCodeGen::DoBitI(LBitI* instr) {
1488 LOperand* left_op = instr->left();
1489 LOperand* right_op = instr->right();
1490 Register left = ToRegister(left_op);
1491 Register result = ToRegister(instr->result());
1492
1493 ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1494 Operand right = ToOperand(right_op);
1495
1496 switch (instr->op()) {
1497 case Token::BIT_AND: __ And(result, left, right); break;
1498 case Token::BIT_OR: __ Orr(result, left, right); break;
1499 case Token::BIT_XOR: __ Eor(result, left, right); break;
1500 default:
1501 UNREACHABLE();
1502 break;
1503 }
1504 }
1505
1506
1507 void LCodeGen::DoBitNotI(LBitNotI* instr) {
1508 Register input = ToRegister(instr->value()).W();
1509 Register result = ToRegister(instr->result()).W();
1510 __ Mvn(result, input);
1511 }
1512
1513
1514 void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
1515 if (instr->hydrogen()->skip_check()) return;
1516
1517 Register length = ToRegister(instr->length());
1518
1519 if (instr->index()->IsConstantOperand()) {
1520 int constant_index =
1521 ToInteger32(LConstantOperand::cast(instr->index()));
1522
1523 if (instr->hydrogen()->length()->representation().IsSmi()) {
1524 __ Cmp(length, Operand(Smi::FromInt(constant_index)));
1525 } else {
1526 __ Cmp(length, Operand(constant_index));
1527 }
1528 } else {
1529 __ Cmp(length, ToRegister(instr->index()));
1530 }
1531 DeoptimizeIf(ls, instr->environment());
1532 }
1533
1534
1535 void LCodeGen::DoBranch(LBranch* instr) {
1536 Representation r = instr->hydrogen()->value()->representation();
1537 Label* true_label = instr->TrueLabel(chunk_);
1538 Label* false_label = instr->FalseLabel(chunk_);
1539
1540 if (r.IsInteger32()) {
1541 ASSERT(!info()->IsStub());
1542 EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
1543 } else if (r.IsSmi()) {
1544 ASSERT(!info()->IsStub());
1545 STATIC_ASSERT(kSmiTag == 0);
1546 EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
1547 } else if (r.IsDouble()) {
1548 DoubleRegister value = ToDoubleRegister(instr->value());
1549 __ Fcmp(value, 0.0);
1550 // If we got a NaN jump to the false branch.
1551 __ B(vs, false_label);
1552 EmitBranch(instr, ne);
1553 } else {
1554 ASSERT(r.IsTagged());
1555 Register value = ToRegister(instr->value());
1556 HType type = instr->hydrogen()->value()->type();
1557
1558 if (type.IsBoolean()) {
1559 ASSERT(!info()->IsStub());
1560 __ CompareRoot(value, Heap::kTrueValueRootIndex);
1561 EmitBranch(instr, eq);
1562 } else if (type.IsSmi()) {
1563 ASSERT(!info()->IsStub());
1564 EmitCompareAndBranch(instr, ne, value, Operand(Smi::FromInt(0)));
1565 } else {
1566 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1567 // Avoid deopts in the case where we've never executed this path before.
1568 if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
1569
1570 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1571 // undefined -> false.
1572 __ JumpIfRoot(
1573 value, Heap::kUndefinedValueRootIndex, false_label);
1574 }
1575
1576 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1577 // Boolean -> its value.
1578 __ JumpIfRoot(
1579 value, Heap::kTrueValueRootIndex, true_label);
1580 __ JumpIfRoot(
1581 value, Heap::kFalseValueRootIndex, false_label);
1582 }
1583
1584 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1585 // 'null' -> false.
1586 __ JumpIfRoot(
1587 value, Heap::kNullValueRootIndex, false_label);
1588 }
1589
1590 if (expected.Contains(ToBooleanStub::SMI)) {
1591 // Smis: 0 -> false, all other -> true.
1592 ASSERT(Smi::FromInt(0) == 0);
1593 __ Cbz(value, false_label);
1594 __ JumpIfSmi(value, true_label);
1595 } else if (expected.NeedsMap()) {
1596 // If we need a map later and have a smi, deopt.
1597 DeoptimizeIfSmi(value, instr->environment());
1598 }
1599
1600 Register map = NoReg;
1601 Register scratch = NoReg;
1602
1603 if (expected.NeedsMap()) {
1604 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
1605 map = ToRegister(instr->temp1());
1606 scratch = ToRegister(instr->temp2());
1607
1608 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
1609
1610 if (expected.CanBeUndetectable()) {
1611 // Undetectable -> false.
1612 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
1613 __ TestAndBranchIfAnySet(
1614 scratch, 1 << Map::kIsUndetectable, false_label);
1615 }
1616 }
1617
1618 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1619 // spec object -> true.
1620 __ CompareInstanceType(map, scratch, FIRST_SPEC_OBJECT_TYPE);
1621 __ B(ge, true_label);
1622 }
1623
1624 if (expected.Contains(ToBooleanStub::STRING)) {
1625 // String value -> false iff empty.
1626 Label not_string;
1627 __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
1628 __ B(ge, &not_string);
1629 __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset));
1630 __ Cbz(scratch, false_label);
1631 __ B(true_label);
1632 __ Bind(&not_string);
1633 }
1634
1635 if (expected.Contains(ToBooleanStub::SYMBOL)) {
1636 // Symbol value -> true.
1637 __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
1638 __ B(eq, true_label);
1639 }
1640
1641 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1642 Label not_heap_number;
1643 __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, &not_heap_number);
1644
1645 __ Ldr(double_scratch(),
1646 FieldMemOperand(value, HeapNumber::kValueOffset));
1647 __ Fcmp(double_scratch(), 0.0);
1648 // If we got a NaN (overflow bit is set), jump to the false branch.
1649 __ B(vs, false_label);
1650 __ B(eq, false_label);
1651 __ B(true_label);
1652 __ Bind(&not_heap_number);
1653 }
1654
1655 // We've seen something for the first time -> deopt.
1656 Deoptimize(instr->environment());
1657 }
1658 }
1659 }
1660
1661
1662 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
1663 int formal_parameter_count,
1664 int arity,
1665 LInstruction* instr,
1666 CallKind call_kind,
1667 Register function_reg) {
1668 bool dont_adapt_arguments =
1669 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1670 bool can_invoke_directly =
1671 dont_adapt_arguments || formal_parameter_count == arity;
1672
1673 // The function interface relies on the following register assignments.
1674 ASSERT(function_reg.Is(x1) || function_reg.IsNone());
1675 Register arity_reg = x0;
1676 Register call_kind_reg = x5;
1677
1678 LPointerMap* pointers = instr->pointer_map();
1679 RecordPosition(pointers->position());
1680
1681 // If necessary, load the function object.
1682 if (function_reg.IsNone()) {
1683 function_reg = x1;
1684 __ LoadHeapObject(function_reg, function);
1685 }
1686
1687 if (FLAG_debug_code) {
1688 Label is_not_smi;
1689 // Try to confirm that function_reg (x1) is a tagged pointer.
1690 __ JumpIfNotSmi(function_reg, &is_not_smi);
1691 __ Abort("In CallKnownFunction, a function object is expected in x1.");
1692 __ Bind(&is_not_smi);
1693 }
1694
1695 if (can_invoke_directly) {
1696 // Change context.
1697 __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
1698
1699 // Set the arguments count if adaption is not needed. Assumes that x0 is
1700 // available to write to at this point.
1701 if (dont_adapt_arguments) {
1702 __ Mov(arity_reg, arity);
1703 }
1704
1705 // Invoke function.
1706 __ SetCallKind(call_kind_reg, call_kind);
1707 __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
1708 __ Call(x10);
1709
1710 // Set up deoptimization.
1711 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
1712 } else {
1713 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
1714 ParameterCount count(arity);
1715 ParameterCount expected(formal_parameter_count);
1716 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator,
1717 call_kind, function_reg);
1718 }
1719
1720 // Restore context.
1721 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1722 }
1723
1724
1725 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
1726 ASSERT(ToRegister(instr->result()).is(x0));
1727 CallKnownFunction(instr->hydrogen()->function(),
1728 instr->hydrogen()->formal_parameter_count(),
1729 instr->arity(), instr, CALL_AS_METHOD);
1730 }
1731
1732
1733 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
1734 ASSERT(ToRegister(instr->result()).is(x0));
1735 CallKnownFunction(instr->hydrogen()->target(),
1736 instr->hydrogen()->formal_parameter_count(),
1737 instr->arity(), instr, CALL_AS_FUNCTION);
1738 }
1739
1740
1741 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
1742 ASSERT(ToRegister(instr->result()).is(x0));
1743
1744 int arity = instr->arity();
1745 RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
1746 Handle<Code> ic =
1747 isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
1748 __ Mov(x2, Operand(instr->name()));
1749 CallCode(ic, mode, instr);
1750 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1751 }
1752
1753
1754 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
1755 ASSERT(ToRegister(instr->key()).Is(x2));
1756 ASSERT(ToRegister(instr->result()).Is(x0));
1757
1758 int arity = instr->arity();
1759 Handle<Code> ic =
1760 isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
1761 CallCode(ic, RelocInfo::CODE_TARGET, instr);
1762 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1763 }
1764
1765
1766 void LCodeGen::DoCallNamed(LCallNamed* instr) {
1767 ASSERT(ToRegister(instr->result()).is(x0));
1768
1769 int arity = instr->arity();
1770 RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
1771 Handle<Code> ic =
1772 isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
1773
1774 // IC needs a pointer to the name of the function to be called in x2.
1775 __ Mov(x2, Operand(instr->name()));
1776 CallCode(ic, mode, instr);
1777 // Restore context register.
1778 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1779 }
1780
1781
1782 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
1783 CallRuntime(instr->function(), instr->arity(), instr);
1784 }
1785
1786
1787 void LCodeGen::DoCallStub(LCallStub* instr) {
1788 ASSERT(ToRegister(instr->result()).is(x0));
1789 switch (instr->hydrogen()->major_key()) {
1790 case CodeStub::RegExpConstructResult: {
1791 RegExpConstructResultStub stub;
1792 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1793 break;
1794 }
1795 case CodeStub::RegExpExec: {
1796 RegExpExecStub stub;
1797 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1798 break;
1799 }
1800 case CodeStub::SubString: {
1801 SubStringStub stub;
1802 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1803 break;
1804 }
1805 case CodeStub::NumberToString: {
1806 NumberToStringStub stub;
1807 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1808 break;
1809 }
1810 case CodeStub::StringAdd: {
1811 // TODO(jbramley): In bleeding_edge, there is no StringAdd case here.
1812 StringAddStub stub(NO_STRING_ADD_FLAGS);
1813 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1814 break;
1815 }
1816 case CodeStub::StringCompare: {
1817 StringCompareStub stub;
1818 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1819 break;
1820 }
1821 case CodeStub::TranscendentalCache: {
1822 __ Peek(x0, 0);
1823 TranscendentalCacheStub stub(instr->transcendental_type(),
1824 TranscendentalCacheStub::TAGGED);
1825 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1826 break;
1827 }
1828 default:
1829 UNREACHABLE();
1830 }
1831 }
1832
1833
1834 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
1835 Register object = ToRegister(instr->value());
1836 Register map_reg = ToRegister(instr->temp());
1837
1838 Label success;
1839 SmallMapList* map_set = instr->hydrogen()->map_set();
1840 __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
1841 for (int i = 0; i < map_set->length(); i++) {
1842 Handle<Map> map = map_set->at(i);
1843 __ CompareMap(map_reg, map, &success);
1844 __ B(eq, &success);
1845 }
1846
1847 // If we didn't match a map, deoptimize.
1848 Deoptimize(instr->environment());
1849
1850 __ Bind(&success);
1851 }
1852
1853
1854 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
1855 // TODO(all): Depending of how we chose to implement the deopt, if we could
1856 // guarantee that we have a deopt handler reachable by a tbz instruction,
1857 // we could use tbz here and produce less code to support this instruction.
1858 DeoptimizeIfSmi(ToRegister(instr->value()), instr->environment());
1859 }
1860
1861
1862 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
1863 ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
1864 ZoneList<Handle<Map> >* maps = instr->maps();
1865 ASSERT(prototypes->length() == maps->length());
1866
1867 if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
1868 // TODO(jbramley): The temp registers are only needed in this case.
1869 Label success, deopt;
1870 Register temp1 = ToRegister(instr->temp1());
1871 Register temp2 = ToRegister(instr->temp2());
1872 for (int i = 0; i < prototypes->length(); i++) {
1873 __ LoadHeapObject(temp1, prototypes->at(i));
1874 __ Ldr(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
1875 __ CompareMap(temp2, maps->at(i), &success);
1876 __ B(eq, &success);
1877 }
1878 // If we didn't match a map, deoptimize.
1879 Deoptimize(instr->environment());
1880 __ Bind(&success);
1881 }
1882 }
1883
1884
1885 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
1886 Register value = ToRegister(instr->value());
1887 ASSERT(ToRegister(instr->result()).Is(value));
1888 // TODO(all): See DoCheckNonSmi for comments on use of tbz.
1889 DeoptimizeIfNotSmi(value, instr->environment());
1890 }
1891
1892
1893 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
1894 Register input = ToRegister(instr->value());
1895 Register scratch = ToRegister(instr->temp());
1896
1897 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
1898 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1899
1900 if (instr->hydrogen()->is_interval_check()) {
1901 InstanceType first, last;
1902 instr->hydrogen()->GetCheckInterval(&first, &last);
1903
1904 __ Cmp(scratch, first);
1905 if (first == last) {
1906 // If there is only one type in the interval check for equality.
1907 DeoptimizeIf(ne, instr->environment());
1908 } else if (last == LAST_TYPE) {
1909 // We don't need to compare with the higher bound of the interval.
1910 DeoptimizeIf(lo, instr->environment());
1911 } else {
1912 // If we are below the lower bound, set the C flag and clear the Z flag
1913 // to force a deopt.
1914 __ Ccmp(scratch, last, CFlag, hs);
1915 DeoptimizeIf(hi, instr->environment());
1916 }
1917 } else {
1918 uint8_t mask;
1919 uint8_t tag;
1920 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
1921
1922 if (IsPowerOf2(mask)) {
1923 ASSERT((tag == 0) || (tag == mask));
1924 // TODO(all): We might be able to use tbz/tbnz if we can guarantee that
1925 // the deopt handler is reachable by a tbz instruction.
1926 __ Tst(scratch, mask);
1927 DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
1928 } else {
1929 if (tag == 0) {
1930 __ Tst(scratch, mask);
1931 } else {
1932 __ And(scratch, scratch, mask);
1933 __ Cmp(scratch, tag);
1934 }
1935 DeoptimizeIf(ne, instr->environment());
1936 }
1937 }
1938 }
1939
1940
1941 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
1942 DoubleRegister input = ToDoubleRegister(instr->unclamped());
1943 Register result = ToRegister(instr->result());
1944 __ ClampDoubleToUint8(result, input, double_scratch());
1945 }
1946
1947
1948 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
1949 Register input = ToRegister32(instr->unclamped());
1950 Register result = ToRegister32(instr->result());
1951 __ ClampInt32ToUint8(result, input);
1952 }
1953
1954
1955 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
1956 Register input = ToRegister(instr->unclamped());
1957 Register result = ToRegister(instr->result());
1958 Register scratch = ToRegister(instr->temp1());
1959 Label done;
1960
1961 // Both smi and heap number cases are handled.
1962 Label is_not_smi;
1963 __ JumpIfNotSmi(input, &is_not_smi);
1964 __ SmiUntag(result, input);
1965 __ ClampInt32ToUint8(result);
1966 __ B(&done);
1967
1968 __ Bind(&is_not_smi);
1969
1970 // Check for heap number.
1971 Label is_heap_number;
1972 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
1973 __ JumpIfRoot(scratch, Heap::kHeapNumberMapRootIndex, &is_heap_number);
1974
1975 // Check for undefined. Undefined is coverted to zero for clamping conversion.
1976 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
1977 instr->environment());
1978 __ Mov(result, 0);
1979 __ B(&done);
1980
1981 // Heap number case.
1982 __ Bind(&is_heap_number);
1983 DoubleRegister dbl_scratch = double_scratch();
1984 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp2());
1985 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
1986 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
1987
1988 __ Bind(&done);
1989 }
1990
1991
1992 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
1993 Handle<String> class_name = instr->hydrogen()->class_name();
1994 Label* true_label = instr->TrueLabel(chunk_);
1995 Label* false_label = instr->FalseLabel(chunk_);
1996 Register input = ToRegister(instr->value());
1997 Register scratch1 = ToRegister(instr->temp1());
1998 Register scratch2 = ToRegister(instr->temp2());
1999
2000 __ JumpIfSmi(input, false_label);
2001
2002 Register map = scratch2;
2003 if (class_name->IsUtf8EqualTo(CStrVector("Function"))) {
2004 // Assuming the following assertions, we can use the same compares to test
2005 // for both being a function type and being in the object type range.
2006 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2007 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2008 FIRST_SPEC_OBJECT_TYPE + 1);
2009 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2010 LAST_SPEC_OBJECT_TYPE - 1);
2011 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2012
2013 // We expect CompareObjectType to load the object instance type in scratch1.
2014 __ CompareObjectType(input, map, scratch1, FIRST_SPEC_OBJECT_TYPE);
2015 __ B(lt, false_label);
2016 __ B(eq, true_label);
2017 __ Cmp(scratch1, LAST_SPEC_OBJECT_TYPE);
2018 __ B(eq, true_label);
2019 } else {
2020 __ IsObjectJSObjectType(input, map, scratch1, false_label);
2021 }
2022
2023 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2024 // Check if the constructor in the map is a function.
2025 __ Ldr(scratch1, FieldMemOperand(map, Map::kConstructorOffset));
2026
2027 // Objects with a non-function constructor have class 'Object'.
2028 if (class_name->IsUtf8EqualTo(CStrVector("Object"))) {
2029 __ JumpIfNotObjectType(
2030 scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, true_label);
2031 } else {
2032 __ JumpIfNotObjectType(
2033 scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, false_label);
2034 }
2035
2036 // The constructor function is in scratch1. Get its instance class name.
2037 __ Ldr(scratch1,
2038 FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
2039 __ Ldr(scratch1,
2040 FieldMemOperand(scratch1,
2041 SharedFunctionInfo::kInstanceClassNameOffset));
2042
2043 // The class name we are testing against is internalized since it's a literal.
2044 // The name in the constructor is internalized because of the way the context
2045 // is booted. This routine isn't expected to work for random API-created
2046 // classes and it doesn't have to because you can't access it with natives
2047 // syntax. Since both sides are internalized it is sufficient to use an
2048 // identity comparison.
2049 EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name));
2050 }
2051
2052
2053 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2054 Register value = ToRegister(instr->value());
2055 Register map = ToRegister(instr->temp());
2056
2057 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
2058 EmitCompareAndBranch(instr, eq, map, Operand(instr->map()));
2059 }
2060
2061
2062 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
2063 LOperand* left = instr->left();
2064 LOperand* right = instr->right();
2065 Condition cond = TokenToCondition(instr->op(), false);
2066
2067 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2068 // We can statically evaluate the comparison.
2069 double left_val = ToDouble(LConstantOperand::cast(left));
2070 double right_val = ToDouble(LConstantOperand::cast(right));
2071 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2072 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2073 EmitGoto(next_block);
2074 } else {
2075 if (instr->is_double()) {
2076 if (right->IsConstantOperand()) {
2077 __ Fcmp(ToDoubleRegister(left),
2078 ToDouble(LConstantOperand::cast(right)));
2079 } else if (left->IsConstantOperand()) {
2080 // Transpose the operands and reverse the condition.
2081 __ Fcmp(ToDoubleRegister(right),
2082 ToDouble(LConstantOperand::cast(left)));
2083 cond = ReverseConditionForCmp(cond);
2084 } else {
2085 __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
2086 }
2087
2088 // If a NaN is involved, i.e. the result is unordered (V set),
2089 // jump to false block label.
2090 __ B(vs, instr->FalseLabel(chunk_));
2091 EmitBranch(instr, cond);
2092 } else {
2093 if (instr->hydrogen_value()->representation().IsInteger32()) {
2094 if (right->IsConstantOperand()) {
2095 EmitCompareAndBranch(instr,
2096 cond,
2097 ToRegister32(left),
2098 ToOperand32(right));
2099 } else {
2100 // Transpose the operands and reverse the condition.
2101 EmitCompareAndBranch(instr,
2102 ReverseConditionForCmp(cond),
2103 ToRegister32(right),
2104 ToOperand32(left));
2105 }
2106 } else {
2107 ASSERT(instr->hydrogen_value()->representation().IsSmi());
2108 if (right->IsConstantOperand()) {
2109 int32_t value = ToInteger32(LConstantOperand::cast(right));
2110 EmitCompareAndBranch(instr,
2111 cond,
2112 ToRegister(left),
2113 Operand(Smi::FromInt(value)));
2114 } else if (left->IsConstantOperand()) {
2115 // Transpose the operands and reverse the condition.
2116 int32_t value = ToInteger32(LConstantOperand::cast(left));
2117 EmitCompareAndBranch(instr,
2118 ReverseConditionForCmp(cond),
2119 ToRegister(right),
2120 Operand(Smi::FromInt(value)));
2121 } else {
2122 EmitCompareAndBranch(instr,
2123 cond,
2124 ToRegister(left),
2125 ToRegister(right));
2126 }
2127 }
2128 }
2129 }
2130 }
2131
2132
2133 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2134 Register left = ToRegister(instr->left());
2135 Register right = ToRegister(instr->right());
2136 EmitCompareAndBranch(instr, eq, left, right);
2137 }
2138
2139
2140 void LCodeGen::DoCmpT(LCmpT* instr) {
2141 Token::Value op = instr->op();
2142 Condition cond = TokenToCondition(op, false);
2143
2144 ASSERT(ToRegister(instr->left()).Is(x1));
2145 ASSERT(ToRegister(instr->right()).Is(x0));
2146 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2147 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2148 // Signal that we don't inline smi code before this stub.
2149 InlineSmiCheckInfo::EmitNotInlined(masm());
2150
2151 // Return true or false depending on CompareIC result.
2152 // This instruction is marked as call. We can clobber any register.
2153 ASSERT(instr->IsMarkedAsCall());
2154 __ LoadTrueFalseRoots(x1, x2);
2155 __ Cmp(x0, 0);
2156 __ Csel(ToRegister(instr->result()), x1, x2, cond);
2157 }
2158
2159
2160 void LCodeGen::DoConstantD(LConstantD* instr) {
2161 ASSERT(instr->result()->IsDoubleRegister());
2162 DoubleRegister result = ToDoubleRegister(instr->result());
2163 __ Fmov(result, instr->value());
2164 }
2165
2166
2167 void LCodeGen::DoConstantI(LConstantI* instr) {
2168 __ Mov(ToRegister(instr->result()), instr->value());
2169 }
2170
2171
2172 void LCodeGen::DoConstantS(LConstantS* instr) {
2173 __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2174 }
2175
2176
2177 void LCodeGen::DoConstantT(LConstantT* instr) {
2178 Handle<Object> value = instr->value();
2179 AllowDeferredHandleDereference smi_check;
2180 if (value->IsSmi()) {
2181 __ Mov(ToRegister(instr->result()), Operand(value));
2182 } else {
2183 __ LoadHeapObject(ToRegister(instr->result()),
2184 Handle<HeapObject>::cast(value));
2185 }
2186 }
2187
2188
2189 void LCodeGen::DoContext(LContext* instr) {
2190 // If there is a non-return use, the context must be moved to a register.
2191 Register result = ToRegister(instr->result());
2192 // TODO(jbramley): LContext is only generated if it meets this condition, so
2193 // why not move cp unconditionally?
2194 for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) {
2195 if (!it.value()->IsReturn()) {
2196 __ Mov(result, cp);
2197 return;
2198 }
2199 }
2200 }
2201
2202
2203 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
2204 Register reg = ToRegister(instr->value());
2205 Handle<JSFunction> target = instr->hydrogen()->target();
2206 AllowDeferredHandleDereference smi_check;
2207 if (isolate()->heap()->InNewSpace(*target)) {
2208 Register temp = ToRegister(instr->temp());
2209 Handle<JSGlobalPropertyCell> cell =
2210 isolate()->factory()->NewJSGlobalPropertyCell(target);
2211 __ Mov(temp, Operand(Handle<Object>(cell)));
2212 __ Ldr(temp, FieldMemOperand(temp, JSGlobalPropertyCell::kValueOffset));
2213 __ Cmp(reg, temp);
2214 } else {
2215 __ Cmp(reg, Operand(target));
2216 }
2217 DeoptimizeIf(ne, instr->environment());
2218 }
2219
2220
2221 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
2222 EnsureSpaceForLazyDeopt();
2223 ASSERT(instr->HasEnvironment());
2224 LEnvironment* env = instr->environment();
2225 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
2226 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2227 }
2228
2229
2230 void LCodeGen::DoDateField(LDateField* instr) {
2231 Register object = ToRegister(instr->date());
2232 Register result = ToRegister(instr->result());
2233 Register temp1 = x10;
2234 Register temp2 = x11;
2235 Smi* index = instr->index();
2236 Label runtime, done, deopt, obj_ok;
2237
2238 ASSERT(object.is(result) && object.Is(x0));
2239 ASSERT(instr->IsMarkedAsCall());
2240
2241 __ JumpIfSmi(object, &deopt);
2242 __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
2243 __ B(eq, &obj_ok);
2244
2245 __ Bind(&deopt);
2246 Deoptimize(instr->environment());
2247
2248 __ Bind(&obj_ok);
2249 if (index->value() == 0) {
2250 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
2251 } else {
2252 if (index->value() < JSDate::kFirstUncachedField) {
2253 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
2254 __ Mov(temp1, Operand(stamp));
2255 __ Ldr(temp1, MemOperand(temp1));
2256 __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset));
2257 __ Cmp(temp1, temp2);
2258 __ B(ne, &runtime);
2259 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
2260 kPointerSize * index->value()));
2261 __ B(&done);
2262 }
2263
2264 __ Bind(&runtime);
2265 __ Mov(x1, Operand(index));
2266 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
2267 }
2268
2269 __ Bind(&done);
2270 }
2271
2272
2273 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
2274 if (instr->hydrogen_value()->IsSoftDeoptimize()) {
2275 SoftDeoptimize(instr->environment());
2276 } else {
2277 Deoptimize(instr->environment());
2278 }
2279 }
2280
2281
2282 void LCodeGen::DoDivI(LDivI* instr) {
2283 Register dividend = ToRegister32(instr->left());
2284 Register result = ToRegister32(instr->result());
2285
2286 bool has_power_of_2_divisor = instr->hydrogen()->HasPowerOf2Divisor();
2287 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
2288 bool bailout_on_minus_zero =
2289 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
2290 bool can_be_div_by_zero =
2291 instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero);
2292 bool all_uses_truncating_to_int32 =
2293 instr->hydrogen()->CheckFlag(HInstruction::kAllUsesTruncatingToInt32);
2294
2295 if (has_power_of_2_divisor) {
2296 ASSERT(instr->temp() == NULL);
2297 int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
2298 int32_t power;
2299 int32_t power_mask;
2300 Label deopt, done;
2301
2302 ASSERT(divisor != 0);
2303 if (divisor > 0) {
2304 power = WhichPowerOf2(divisor);
2305 power_mask = divisor - 1;
2306 } else {
2307 // Check for (0 / -x) as that will produce negative zero.
2308 if (bailout_on_minus_zero) {
2309 if (all_uses_truncating_to_int32) {
2310 // If all uses truncate, and the dividend is zero, the truncated
2311 // result is zero.
2312 __ Mov(result, 0);
2313 __ Cbz(dividend, &done);
2314 } else {
2315 __ Cbz(dividend, &deopt);
2316 }
2317 }
2318 // Check for (kMinInt / -1).
2319 if ((divisor == -1) && can_overflow && !all_uses_truncating_to_int32) {
2320 // Check for kMinInt by subtracting one and checking for overflow.
2321 __ Cmp(dividend, 1);
2322 __ B(vs, &deopt);
2323 }
2324 power = WhichPowerOf2(-divisor);
2325 power_mask = -divisor - 1;
2326 }
2327
2328 if (power_mask != 0) {
2329 if (all_uses_truncating_to_int32) {
2330 __ Cmp(dividend, 0);
2331 __ Cneg(result, dividend, lt);
2332 __ Asr(result, result, power);
2333 if (divisor > 0) __ Cneg(result, result, lt);
2334 if (divisor < 0) __ Cneg(result, result, gt);
2335 return; // Don't fall through to negation below.
2336 } else {
2337 // Deoptimize if remainder is not 0. If the least-significant
2338 // power bits aren't 0, it's not a multiple of 2^power, and
2339 // therefore, there will be a remainder.
2340 __ TestAndBranchIfAnySet(dividend, power_mask, &deopt);
2341 __ Asr(result, dividend, power);
2342 if (divisor < 0) __ Neg(result, result);
2343 }
2344 } else {
2345 ASSERT((divisor == 1) || (divisor == -1));
2346 if (divisor < 0) {
2347 __ Neg(result, dividend);
2348 } else {
2349 __ Mov(result, dividend);
2350 }
2351 }
2352 __ B(&done);
2353 __ Bind(&deopt);
2354 Deoptimize(instr->environment());
2355 __ Bind(&done);
2356 } else {
2357 Register divisor = ToRegister32(instr->right());
2358
2359 // Issue the division first, and then check for any deopt cases whilst the
2360 // result is computed.
2361 __ Sdiv(result, dividend, divisor);
2362
2363 if (!all_uses_truncating_to_int32) {
2364 Label deopt;
2365 // Check for x / 0.
2366 if (can_be_div_by_zero) {
2367 __ Cbz(divisor, &deopt);
2368 }
2369
2370 // Check for (0 / -x) as that will produce negative zero.
2371 if (bailout_on_minus_zero) {
2372 __ Cmp(divisor, 0);
2373
2374 // If the divisor < 0 (mi), compare the dividend, and deopt if it is
2375 // zero, ie. zero dividend with negative divisor deopts.
2376 // If the divisor >= 0 (pl, the opposite of mi) set the flags to
2377 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
2378 __ Ccmp(dividend, 0, NoFlag, mi);
2379 __ B(eq, &deopt);
2380 }
2381
2382 // Check for (kMinInt / -1).
2383 if (can_overflow) {
2384 // Test dividend for kMinInt by subtracting one (cmp) and checking for
2385 // overflow.
2386 __ Cmp(dividend, 1);
2387 // If overflow is set, ie. dividend = kMinInt, compare the divisor with
2388 // -1. If overflow is clear, set the flags for condition ne, as the
2389 // dividend isn't -1, and thus we shouldn't deopt.
2390 __ Ccmp(divisor, -1, NoFlag, vs);
2391 __ B(eq, &deopt);
2392 }
2393
2394 // Compute remainder and deopt if it's not zero.
2395 Register remainder = ToRegister32(instr->temp());
2396 __ Msub(remainder, result, divisor, dividend);
2397 __ Cbnz(remainder, &deopt);
2398
2399 Label div_ok;
2400 __ B(&div_ok);
2401 __ Bind(&deopt);
2402 Deoptimize(instr->environment());
2403 __ Bind(&div_ok);
2404 } else {
2405 ASSERT(instr->temp() == NULL);
2406 }
2407 }
2408 }
2409
2410
2411 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
2412 DoubleRegister input = ToDoubleRegister(instr->value());
2413
2414 if (instr->truncating()) {
2415 Register result = ToRegister(instr->result());
2416 Register scratch1 = ToRegister(instr->temp1());
2417 Register scratch2 = ToRegister(instr->temp2());
2418 __ ECMA262ToInt32(result, input, scratch1, scratch2);
2419 } else {
2420 Register result = ToRegister32(instr->result());
2421 ASSERT((instr->temp1() == NULL) && (instr->temp2() == NULL));
2422 Label done, deopt;
2423
2424 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2425 // Check for an input of -0.0, using the result register as a scratch.
2426 __ Fmov(result, input);
2427 __ Cmp(result, 1);
2428 __ B(&deopt, vs);
2429 }
2430
2431 __ TryConvertDoubleToInt32(result, input, double_scratch(), &done);
2432 __ Bind(&deopt);
2433 Deoptimize(instr->environment());
2434 __ Bind(&done);
2435 }
2436 }
2437
2438
2439 // TODO(jbramley): This is almost the same as DoDoubleToI. Can we merge them?
2440 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
2441 DoubleRegister input = ToDoubleRegister(instr->value());
2442
2443 if (instr->truncating()) {
2444 Register result = ToRegister(instr->result());
2445 Register scratch1 = ToRegister(instr->temp1());
2446 Register scratch2 = ToRegister(instr->temp2());
2447 __ ECMA262ToInt32(result, input, scratch1, scratch2);
2448 } else {
2449 Register result = ToRegister32(instr->result());
2450 ASSERT((instr->temp1() == NULL) && (instr->temp2() == NULL));
2451 Label done, deopt;
2452
2453 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2454 // Check for an input of -0.0, using the result register as a scratch.
2455 __ Fmov(result, input);
2456 __ Cmp(result, 1);
2457 __ B(&deopt, vs);
2458 }
2459
2460 __ TryConvertDoubleToInt32(result, input, double_scratch(), &done);
2461 __ Bind(&deopt);
2462 Deoptimize(instr->environment());
2463 __ Bind(&done);
2464 }
2465 __ SmiTag(ToRegister(instr->result()));
2466 }
2467
2468
2469 void LCodeGen::DoDrop(LDrop* instr) {
2470 TODO_UNIMPLEMENTED("DoDrop is untested.");
2471 __ Drop(instr->count());
2472 }
2473
2474
2475 void LCodeGen::DoDummyUse(LDummyUse* instr) {
2476 // Nothing to see here, move on!
2477 }
2478
2479
2480 void LCodeGen::DoElementsKind(LElementsKind* instr) {
2481 Register result = ToRegister(instr->result());
2482 Register input = ToRegister(instr->value());
2483
2484 // Load map into result.
2485 __ Ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
2486
2487 // Load the map's "bit field 2" into result.
2488 ASSERT((Map::kElementsKindBitCount + Map::kElementsKindShift) <= kByteSize);
2489 __ Ldrb(result.W(), FieldMemOperand(result, Map::kBitField2Offset));
2490
2491 // Retrieve elements_kind from bit field 2.
2492 __ Ubfx(result.W(), result.W(), Map::kElementsKindShift,
2493 Map::kElementsKindBitCount);
2494 }
2495
2496
2497 void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
2498 Register result = ToRegister(instr->result());
2499 Register array = ToRegister(instr->value());
2500 __ Ldr(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
2501 }
2502
2503
2504 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
2505 // FunctionLiteral instruction is marked as call, we can trash any register.
2506 ASSERT(instr->IsMarkedAsCall());
2507
2508 // Use the fast case closure allocation code that allocates in new
2509 // space for nested functions that don't need literals cloning.
2510 bool pretenure = instr->hydrogen()->pretenure();
2511 if (!pretenure && instr->hydrogen()->has_no_literals()) {
2512 FastNewClosureStub stub(instr->hydrogen()->language_mode(),
2513 instr->hydrogen()->is_generator());
2514 __ Mov(x1, Operand(instr->hydrogen()->shared_info()));
2515 __ Push(x1);
2516 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2517 } else {
2518 __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2519 __ Mov(x1, Operand(pretenure ? factory()->true_value()
2520 : factory()->false_value()));
2521 __ Push(cp, x2, x1);
2522 CallRuntime(Runtime::kNewClosure, 3, instr);
2523 }
2524 }
2525
2526
2527 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
2528 Register map = ToRegister(instr->map());
2529 Register result = ToRegister(instr->result());
2530 Label load_cache, done;
2531
2532 __ EnumLengthUntagged(result, map);
2533 __ Cbnz(result, &load_cache);
2534
2535 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
2536 __ B(&done);
2537
2538 __ Bind(&load_cache);
2539 __ LoadInstanceDescriptors(map, result);
2540 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
2541 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
2542 DeoptimizeIfZero(result, instr->environment());
2543
2544 __ Bind(&done);
2545 }
2546
2547
2548 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
2549 Register object = ToRegister(instr->object());
2550 Register null_value = x5;
2551
2552 ASSERT(instr->IsMarkedAsCall());
2553 ASSERT(object.Is(x0));
2554
2555 Label deopt;
2556
2557 __ JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &deopt);
2558
2559 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
2560 __ Cmp(object, null_value);
2561 __ B(eq, &deopt);
2562
2563 __ JumpIfSmi(object, &deopt);
2564
2565 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
2566 __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
2567 __ B(le, &deopt);
2568
2569 Label use_cache, call_runtime;
2570 __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
2571
2572 __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2573 __ B(&use_cache);
2574
2575 __ Bind(&deopt);
2576 Deoptimize(instr->environment());
2577
2578 // Get the set of properties to enumerate.
2579 __ Bind(&call_runtime);
2580 __ Push(object);
2581 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
2582
2583 __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
2584 __ JumpIfNotRoot(x1, Heap::kMetaMapRootIndex, &deopt);
2585
2586 __ Bind(&use_cache);
2587 }
2588
2589
2590 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
2591 Register result = ToRegister(instr->result());
2592 __ Ldr(result, GlobalObjectMemOperand());
2593 }
2594
2595
2596 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
2597 Register global = ToRegister(instr->global_object());
2598 Register result = ToRegister(instr->result());
2599 __ Ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
2600 }
2601
2602
2603 int LCodeGen::GetNextEmittedBlock() const {
2604 for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
2605 if (!chunk_->GetLabel(i)->HasReplacement()) return i;
2606 }
2607 return -1;
2608 }
2609
2610
2611 void LCodeGen::EmitGoto(int block) {
2612 // Do not emit jump if we are emitting a goto to the next block.
2613 if (!IsNextEmittedBlock(block)) {
2614 __ B(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
2615 }
2616 }
2617
2618
2619 void LCodeGen::DoGoto(LGoto* instr) {
2620 EmitGoto(instr->block_id());
2621 }
2622
2623
2624 // HHasInstanceTypeAndBranch instruction is built with an interval of type
2625 // to test but is only used in very restricted ways. The only possible kinds
2626 // of intervals are:
2627 // - [ FIRST_TYPE, instr->to() ]
2628 // - [ instr->form(), LAST_TYPE ]
2629 // - instr->from() == instr->to()
2630 //
2631 // These kinds of intervals can be check with only one compare instruction
2632 // providing the correct value and test condition are used.
2633 //
2634 // TestType() will return the value to use in the compare instruction and
2635 // BranchCondition() will return the condition to use depending on the kind
2636 // of interval actually specified in the instruction.
2637 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2638 InstanceType from = instr->from();
2639 InstanceType to = instr->to();
2640 if (from == FIRST_TYPE) return to;
2641 ASSERT((from == to) || (to == LAST_TYPE));
2642 return from;
2643 }
2644
2645
2646 // See comment above TestType function for what this function does.
2647 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2648 InstanceType from = instr->from();
2649 InstanceType to = instr->to();
2650 if (from == to) return eq;
2651 if (to == LAST_TYPE) return hs;
2652 if (from == FIRST_TYPE) return ls;
2653 UNREACHABLE();
2654 return eq;
2655 }
2656
2657
2658 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2659 Register input = ToRegister(instr->value());
2660 Register scratch = ToRegister(instr->temp());
2661
2662 // TODO(all): When we'll have rebased, we can avoid the smi check if the
2663 // input is known to be a HeapObject.
2664 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2665 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2666 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2667 }
2668
2669
2670 void LCodeGen::DoIn(LIn* instr) {
2671 Register obj = ToRegister(instr->object());
2672 Register key = ToRegister(instr->key());
2673 __ Push(key, obj);
2674 ASSERT(instr->HasPointerMap());
2675 LPointerMap* pointers = instr->pointer_map();
2676 RecordPosition(pointers->position());
2677 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
2678 __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
2679 }
2680
2681
2682 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
2683 Register result = ToRegister(instr->result());
2684 Register base = ToRegister(instr->base_object());
2685 __ Add(result, base, instr->offset());
2686 }
2687
2688
2689 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2690 // Assert that the arguments are in the registers expected by InstanceofStub.
2691 ASSERT(ToRegister(instr->left()).Is(InstanceofStub::left()));
2692 ASSERT(ToRegister(instr->right()).Is(InstanceofStub::right()));
2693
2694 InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2695 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2696
2697 // InstanceofStub returns a result in x0:
2698 // 0 => not an instance
2699 // smi 1 => instance.
2700 __ Cmp(x0, 0);
2701 __ LoadTrueFalseRoots(x0, x1);
2702 __ Csel(x0, x0, x1, eq);
2703 }
2704
2705
2706 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2707 class DeferredInstanceOfKnownGlobal: public LDeferredCode {
2708 public:
2709 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2710 LInstanceOfKnownGlobal* instr)
2711 : LDeferredCode(codegen), instr_(instr) { }
2712 virtual void Generate() {
2713 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2714 }
2715 virtual LInstruction* instr() { return instr_; }
2716 Label* map_check() { return &map_check_; }
2717 private:
2718 LInstanceOfKnownGlobal* instr_;
2719 Label map_check_;
2720 };
2721
2722 DeferredInstanceOfKnownGlobal* deferred =
2723 new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2724
2725 Label return_false, cache_miss;
2726 Register object = ToRegister(instr->value());
2727 Register result = ToRegister(instr->result());
2728
2729 // This instruction is marked as call. We can clobber any register.
2730 ASSERT(instr->IsMarkedAsCall());
2731
2732 // We must take into account that object is in x11.
2733 ASSERT(object.Is(x11));
2734 Register scratch = x10;
2735
2736 // A Smi is not instance of anything.
2737 __ JumpIfSmi(object, &return_false);
2738
2739 TODO_UNIMPLEMENTED("patchable inline check");
2740
2741 // The inlined call site cache did not match.
2742 // Check null and string before calling the deferred code.
2743 __ Bind(&cache_miss);
2744 // Null is not instance of anything.
2745 __ JumpIfRoot(object, Heap::kNullValueRootIndex, &return_false);
2746
2747 // String values are not instances of anything.
2748 // Return false if the object is a string. Otherwise, jump to the deferred
2749 // code.
2750 // Note that we can't jump directly to deferred code from
2751 // IsObjectJSStringType, because it uses tbz for the jump and the deferred
2752 // code can be out of range.
2753 __ IsObjectJSStringType(object, scratch, NULL, &return_false);
2754 __ B(deferred->entry());
2755
2756 __ Bind(&return_false);
2757 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2758
2759 // Here result is either true or false.
2760 __ Bind(deferred->exit());
2761 }
2762
2763
2764 void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
2765 Register object = ToRegister(instr->object());
2766 Register result = ToRegister(instr->result());
2767 __ Ldr(result, FieldMemOperand(object, HeapObject::kMapOffset));
2768 __ Ldrb(result, FieldMemOperand(result, Map::kInstanceSizeOffset));
2769 }
2770
2771
2772 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2773 Label* map_check) {
2774 Register result = ToRegister(instr->result());
2775 ASSERT(result.Is(x0)); // InstanceofStub returns its result in x0.
2776 InstanceofStub::Flags flags = InstanceofStub::kArgsInRegisters;
2777
2778 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2779
2780 // Prepare InstanceofStub arguments.
2781 ASSERT(ToRegister(instr->value()).Is(InstanceofStub::left()));
2782 __ LoadHeapObject(InstanceofStub::right(), instr->function());
2783
2784 InstanceofStub stub(flags);
2785 CallCodeGeneric(stub.GetCode(isolate()),
2786 RelocInfo::CODE_TARGET,
2787 instr,
2788 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2789 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2790 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2791
2792 // TODO(all): This could be integrated into InstanceofStub.
2793 __ LoadTrueFalseRoots(x1, x2);
2794 ASSERT(Smi::FromInt(0) == 0);
2795 __ Cmp(result, 0);
2796 __ Csel(result, x1, x2, eq);
2797
2798 // Put the result value into the result register slot.
2799 __ StoreToSafepointRegisterSlot(result, result);
2800 }
2801
2802
2803 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
2804 DoGap(instr);
2805 }
2806
2807
2808 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
2809 Register value = ToRegister32(instr->value());
2810 DoubleRegister result = ToDoubleRegister(instr->result());
2811 __ Scvtf(result, value);
2812 }
2813
2814
2815 void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
2816 // A64 smis can represent all Integer32 values, so this cannot deoptimize.
2817 ASSERT(!instr->hydrogen()->value()->HasRange() ||
2818 instr->hydrogen()->value()->range()->IsInSmiRange());
2819
2820 Register value = ToRegister(instr->value());
2821 Register result = ToRegister(instr->result());
2822 __ SmiTag(result, value);
2823 }
2824
2825
2826 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
2827 // The function is required to be in x1.
2828 ASSERT(ToRegister(instr->function()).is(x1));
2829 ASSERT(instr->HasPointerMap());
2830
2831 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
2832 if (known_function.is_null()) {
2833 LPointerMap* pointers = instr->pointer_map();
2834 RecordPosition(pointers->position());
2835 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2836 ParameterCount count(instr->arity());
2837 __ InvokeFunction(x1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
2838 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2839 } else {
2840 CallKnownFunction(known_function,
2841 instr->hydrogen()->formal_parameter_count(),
2842 instr->arity(),
2843 instr,
2844 CALL_AS_METHOD,
2845 x1);
2846 }
2847 }
2848
2849
2850 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
2851 Register temp1 = ToRegister(instr->temp1());
2852 Register temp2 = ToRegister(instr->temp2());
2853
2854 // Get the frame pointer for the calling frame.
2855 __ Ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2856
2857 // Skip the arguments adaptor frame if it exists.
2858 Label check_frame_marker;
2859 __ Ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
2860 __ Cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2861 __ B(ne, &check_frame_marker);
2862 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
2863
2864 // Check the marker in the calling frame.
2865 __ Bind(&check_frame_marker);
2866 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
2867
2868 EmitCompareAndBranch(
2869 instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
2870 }
2871
2872
2873 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2874 Label* is_object = instr->TrueLabel(chunk_);
2875 Label* is_not_object = instr->FalseLabel(chunk_);
2876 Register value = ToRegister(instr->value());
2877 Register map = ToRegister(instr->temp1());
2878 Register scratch = ToRegister(instr->temp2());
2879
2880 __ JumpIfSmi(value, is_not_object);
2881 __ JumpIfRoot(value, Heap::kNullValueRootIndex, is_object);
2882
2883 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
2884
2885 // Check for undetectable objects.
2886 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
2887 __ TestAndBranchIfAnySet(scratch, 1 << Map::kIsUndetectable, is_not_object);
2888
2889 // Check that instance type is in object type range.
2890 __ IsInstanceJSObjectType(map, scratch, NULL);
2891 // Flags have been updated by IsInstanceJSObjectType. We can now test the
2892 // flags for "le" condition to check if the object's type is a valid
2893 // JS object type.
2894 EmitBranch(instr, le);
2895 }
2896
2897
2898 Condition LCodeGen::EmitIsString(Register input,
2899 Register temp1,
2900 Label* is_not_string) {
2901 __ JumpIfSmi(input, is_not_string);
2902 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2903
2904 return lt;
2905 }
2906
2907
2908 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2909 Register val = ToRegister(instr->value());
2910 Register scratch = ToRegister(instr->temp());
2911
2912 Condition true_cond = EmitIsString(val, scratch, instr->FalseLabel(chunk_));
2913
2914 EmitBranch(instr, true_cond);
2915 }
2916
2917
2918 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2919 Register value = ToRegister(instr->value());
2920 STATIC_ASSERT(kSmiTag == 0);
2921 EmitTestAndBranch(instr, eq, value, kSmiTagMask);
2922 }
2923
2924
2925 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2926 Register input = ToRegister(instr->value());
2927 Register temp = ToRegister(instr->temp());
2928
2929 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2930 __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2931 __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2932
2933 // TODO(jbramley): Find a way to use Tbz here.
2934 __ Tst(temp, 1 << Map::kIsUndetectable);
2935 EmitBranch(instr, ne);
2936 }
2937
2938
2939 static const char* LabelType(LLabel* label) {
2940 if (label->is_loop_header()) return " (loop header)";
2941 if (label->is_osr_entry()) return " (OSR entry)";
2942 return "";
2943 }
2944
2945
2946 void LCodeGen::DoLabel(LLabel* label) {
2947 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
2948 current_instruction_,
2949 label->hydrogen_value()->id(),
2950 label->block_id(),
2951 LabelType(label));
2952
2953 __ Bind(label->label());
2954 current_block_ = label->block_id();
2955 DoGap(label);
2956 }
2957
2958
2959 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2960 Register context = ToRegister(instr->context());
2961 Register result = ToRegister(instr->result());
2962 __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
2963 if (instr->hydrogen()->RequiresHoleCheck()) {
2964 if (instr->hydrogen()->DeoptimizesOnHole()) {
2965 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
2966 instr->environment());
2967 } else {
2968 Label not_the_hole;
2969 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
2970 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2971 __ Bind(&not_the_hole);
2972 }
2973 }
2974 }
2975
2976
2977 void LCodeGen::DoLoadExternalArrayPointer(LLoadExternalArrayPointer* instr) {
2978 Register to_reg = ToRegister(instr->result());
2979 Register from_reg = ToRegister(instr->object());
2980 __ Ldr(to_reg, FieldMemOperand(from_reg,
2981 ExternalArray::kExternalPointerOffset));
2982 }
2983
2984
2985 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2986 Register function = ToRegister(instr->function());
2987 Register result = ToRegister(instr->result());
2988 Register temp = ToRegister(instr->temp());
2989 Label deopt;
2990
2991 // Check that the function really is a function. Leaves map in the result
2992 // register.
2993 __ JumpIfNotObjectType(function, result, temp, JS_FUNCTION_TYPE, &deopt);
2994
2995 // Make sure that the function has an instance prototype.
2996 Label non_instance;
2997 __ Ldrb(temp, FieldMemOperand(result, Map::kBitFieldOffset));
2998 __ Tbnz(temp, Map::kHasNonInstancePrototype, &non_instance);
2999
3000 // Get the prototype or initial map from the function.
3001 __ Ldr(result, FieldMemOperand(function,
3002 JSFunction::kPrototypeOrInitialMapOffset));
3003
3004 // Check that the function has a prototype or an initial map.
3005 __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &deopt);
3006
3007 // If the function does not have an initial map, we're done.
3008 Label done;
3009 __ CompareObjectType(result, temp, temp, MAP_TYPE);
3010 __ B(ne, &done);
3011
3012 // Get the prototype from the initial map.
3013 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3014 __ B(&done);
3015
3016 // Non-instance prototype: fetch prototype from constructor field in initial
3017 // map.
3018 __ Bind(&non_instance);
3019 __ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
3020 __ B(&done);
3021
3022 // Deoptimize case.
3023 __ Bind(&deopt);
3024 Deoptimize(instr->environment());
3025
3026 // All done.
3027 __ Bind(&done);
3028 }
3029
3030
3031 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
3032 Register result = ToRegister(instr->result());
3033 __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell())));
3034 __ Ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
3035 if (instr->hydrogen()->RequiresHoleCheck()) {
3036 DeoptimizeIfRoot(
3037 result, Heap::kTheHoleValueRootIndex, instr->environment());
3038 }
3039 }
3040
3041
3042 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3043 ASSERT(ToRegister(instr->global_object()).Is(x0));
3044 ASSERT(ToRegister(instr->result()).Is(x0));
3045 __ Mov(x2, Operand(instr->name()));
3046 RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
3047 : RelocInfo::CODE_TARGET_CONTEXT;
3048 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
3049 CallCode(ic, mode, instr);
3050 }
3051
3052
3053 MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(Register key,
3054 Register base,
3055 Register scratch,
3056 bool key_is_smi,
3057 bool key_is_constant,
3058 int constant_key,
3059 int element_size_shift,
3060 int additional_index) {
3061 if (key_is_constant) {
3062 return MemOperand(base, (constant_key + additional_index) <<
3063 element_size_shift);
3064 }
3065
3066 if (additional_index == 0) {
3067 if (key_is_smi) {
3068 // Key is smi: untag, and scale by element size.
3069 __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
3070 return MemOperand(scratch);
3071 } else {
3072 // Key is not smi, and element size is not byte: scale by element size.
3073 return MemOperand(base, key, LSL, element_size_shift);
3074 }
3075 } else {
3076 if (key_is_smi) {
3077 __ SmiUntag(scratch, key);
3078 __ Add(scratch, scratch, additional_index);
3079 } else {
3080 __ Add(scratch, key, additional_index);
3081 }
3082 return MemOperand(base, scratch, LSL, element_size_shift);
3083 }
3084 }
3085
3086
3087 void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
3088 Register ext_ptr = ToRegister(instr->elements());
3089 Register scratch;
3090 ElementsKind elements_kind = instr->elements_kind();
3091
3092 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3093 bool key_is_constant = instr->key()->IsConstantOperand();
3094 Register key = no_reg;
3095 int constant_key = 0;
3096 if (key_is_constant) {
3097 ASSERT(instr->temp() == NULL);
3098 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3099 if (constant_key & 0xf0000000) {
3100 Abort("Array index constant value too big.");
3101 }
3102 } else {
3103 scratch = ToRegister(instr->temp());
3104 key = ToRegister(instr->key());
3105 }
3106
3107 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3108 MemOperand mem_op =
3109 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
3110 key_is_constant, constant_key,
3111 element_size_shift,
3112 instr->additional_index());
3113
3114 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3115 DoubleRegister result = ToDoubleRegister(instr->result());
3116 __ Ldr(result.S(), mem_op);
3117 __ Fcvt(result, result.S());
3118 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3119 DoubleRegister result = ToDoubleRegister(instr->result());
3120 __ Ldr(result, mem_op);
3121 } else {
3122 Register result = ToRegister(instr->result());
3123
3124 switch (elements_kind) {
3125 case EXTERNAL_BYTE_ELEMENTS: __ Ldrsb(result, mem_op); break;
3126 case EXTERNAL_PIXEL_ELEMENTS: // Fall through.
3127 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: __ Ldrb(result, mem_op); break;
3128 case EXTERNAL_SHORT_ELEMENTS: __ Ldrsh(result, mem_op); break;
3129 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: __ Ldrh(result, mem_op); break;
3130 case EXTERNAL_INT_ELEMENTS: __ Ldrsw(result, mem_op); break;
3131 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3132 __ Ldr(result.W(), mem_op);
3133 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3134 // Deopt if value > 0x80000000.
3135 __ Tst(result, 0xFFFFFFFF80000000);
3136 DeoptimizeIf(ne, instr->environment());
3137 }
3138 break;
3139 case EXTERNAL_FLOAT_ELEMENTS:
3140 case EXTERNAL_DOUBLE_ELEMENTS:
3141 case FAST_HOLEY_DOUBLE_ELEMENTS:
3142 case FAST_HOLEY_ELEMENTS:
3143 case FAST_HOLEY_SMI_ELEMENTS:
3144 case FAST_DOUBLE_ELEMENTS:
3145 case FAST_ELEMENTS:
3146 case FAST_SMI_ELEMENTS:
3147 case DICTIONARY_ELEMENTS:
3148 case NON_STRICT_ARGUMENTS_ELEMENTS:
3149 UNREACHABLE();
3150 break;
3151 }
3152 }
3153 }
3154
3155
3156 void LCodeGen::CalcKeyedArrayBaseRegister(Register base,
3157 Register elements,
3158 Register key,
3159 bool key_is_tagged,
3160 ElementsKind elements_kind) {
3161 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3162
3163 // Even though the HLoad/StoreKeyed instructions force the input
3164 // representation for the key to be an integer, the input gets replaced during
3165 // bounds check elimination with the index argument to the bounds check, which
3166 // can be tagged, so that case must be handled here, too.
3167 if (key_is_tagged) {
3168 __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
3169 } else {
3170 // Sign extend key because it could be a 32-bit negative value and the
3171 // address computation happens in 64-bit.
3172 ASSERT((element_size_shift >= 0) && (element_size_shift <= 4));
3173 __ Add(base, elements, Operand(key, SXTW, element_size_shift));
3174 }
3175 }
3176
3177
3178 void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
3179 Register elements = ToRegister(instr->elements());
3180 DoubleRegister result = ToDoubleRegister(instr->result());
3181 Register load_base;
3182 int offset = 0;
3183
3184 if (instr->key()->IsConstantOperand()) {
3185 ASSERT(instr->hydrogen()->RequiresHoleCheck() ||
3186 (instr->temp() == NULL));
3187
3188 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3189 if (constant_key & 0xf0000000) {
3190 Abort("Array index constant value too big.");
3191 }
3192 offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
3193 instr->additional_index());
3194 load_base = elements;
3195 } else {
3196 load_base = ToRegister(instr->temp());
3197 Register key = ToRegister(instr->key());
3198 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3199 CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
3200 instr->hydrogen()->elements_kind());
3201 offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
3202 }
3203 __ Ldr(result, FieldMemOperand(load_base, offset));
3204
3205 if (instr->hydrogen()->RequiresHoleCheck()) {
3206 Register scratch = ToRegister(instr->temp());
3207
3208 // TODO(all): Is it faster to reload this value to an integer register, or
3209 // move from fp to integer?
3210 __ Fmov(scratch, result);
3211 __ Cmp(scratch, kHoleNanInt64);
3212 DeoptimizeIf(eq, instr->environment());
3213 }
3214 }
3215
3216
3217 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
3218 Register elements = ToRegister(instr->elements());
3219 Register result = ToRegister(instr->result());
3220 Register load_base;
3221 int offset = 0;
3222
3223 if (instr->key()->IsConstantOperand()) {
3224 ASSERT(instr->temp() == NULL);
3225 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3226 offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
3227 instr->additional_index());
3228 load_base = elements;
3229 } else {
3230 load_base = ToRegister(instr->temp());
3231 Register key = ToRegister(instr->key());
3232 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3233 CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
3234 instr->hydrogen()->elements_kind());
3235 offset = FixedArray::OffsetOfElementAt(instr->additional_index());
3236 }
3237 __ Ldr(result, FieldMemOperand(load_base, offset));
3238
3239 if (instr->hydrogen()->RequiresHoleCheck()) {
3240 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3241 DeoptimizeIfNotSmi(result, instr->environment());
3242 } else {
3243 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
3244 instr->environment());
3245 }
3246 }
3247 }
3248
3249
3250 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3251 ASSERT(ToRegister(instr->object()).Is(x1));
3252 ASSERT(ToRegister(instr->key()).Is(x0));
3253
3254 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3255 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3256
3257 ASSERT(ToRegister(instr->result()).Is(x0));
3258 }
3259
3260
3261 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3262 HObjectAccess access = instr->hydrogen()->access();
3263 int offset = access.offset();
3264 Register object = ToRegister(instr->object());
3265
3266 if (instr->hydrogen()->representation().IsDouble()) {
3267 FPRegister result = ToDoubleRegister(instr->result());
3268 __ Ldr(result, FieldMemOperand(object, offset));
3269 } else {
3270 Register result = ToRegister(instr->result());
3271 if (access.IsInobject()) {
3272 __ Ldr(result, FieldMemOperand(object, offset));
3273 } else {
3274 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3275 __ Ldr(result, FieldMemOperand(result, offset));
3276 }
3277 }
3278 }
3279
3280
3281 void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
3282 Register object,
3283 Handle<Map> type,
3284 Handle<String> name,
3285 LEnvironment* env) {
3286 LookupResult lookup(isolate());
3287 type->LookupDescriptor(NULL, *name, &lookup);
3288 ASSERT(lookup.IsFound() || lookup.IsCacheable());
3289
3290 if (lookup.IsField()) {
3291 int index = lookup.GetLocalFieldIndexFromMap(*type);
3292 int offset = index * kPointerSize;
3293 if (index < 0) {
3294 // Negative property indices are in-object properties, indexed from the
3295 // end of the fixed part of the object.
3296 __ Ldr(result, FieldMemOperand(object, offset + type->instance_size()));
3297 } else {
3298 // Non-negative property indices are in the properties array.
3299 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3300 __ Ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
3301 }
3302 } else if (lookup.IsConstantFunction()) {
3303 Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
3304 __ LoadHeapObject(result, function);
3305 } else {
3306 // Negative lookup. Check prototypes.
3307 Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
3308 Heap* heap = type->GetHeap();
3309 while (*current != heap->null_value()) {
3310 __ LoadHeapObject(result, current);
3311 __ CompareMap(result, result, Handle<Map>(current->map()));
3312 DeoptimizeIf(ne, env);
3313 current =
3314 Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
3315 }
3316 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3317 }
3318 }
3319
3320
3321 void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
3322 Register object = ToRegister(instr->object());
3323 Register result = ToRegister(instr->result());
3324 // The result register is loaded with its value when the object's map has been
3325 // found. At this point we don't need to hold the map in object_map anymore,
3326 // so both values can share the same register.
3327 // However when we need to go through the generic code path, the instruction
3328 // is MarkedAsCall and both object and result registers will be allocated to
3329 // x0. Object should not be clobbered until the call to LoadIC. We choose a
3330 // different arbitrary register for object_map in this case.
3331 Register object_map = instr->IsMarkedAsCall()
3332 ? x10
3333 : result;
3334
3335 int map_count = instr->hydrogen()->types()->length();
3336 bool need_generic = instr->hydrogen()->need_generic();
3337
3338 if ((map_count == 0) && !need_generic) {
3339 Deoptimize(instr->environment());
3340 return;
3341 }
3342
3343 Handle<String> name = instr->hydrogen()->name();
3344 Label done;
3345 __ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
3346 for (int i = 0; i < map_count; i++) {
3347 bool last = (i == (map_count - 1));
3348 Handle<Map> map = instr->hydrogen()->types()->at(i);
3349 Label check_passed;
3350 __ CompareMap(object_map, map, &check_passed);
3351 if (last && !need_generic) {
3352 DeoptimizeIf(ne, instr->environment());
3353 __ Bind(&check_passed);
3354 EmitLoadFieldOrConstantFunction(result, object, map, name,
3355 instr->environment());
3356 } else {
3357 Label next;
3358 __ B(ne, &next);
3359 __ Bind(&check_passed);
3360 EmitLoadFieldOrConstantFunction(result, object, map, name,
3361 instr->environment());
3362 __ B(&done);
3363 __ Bind(&next);
3364 }
3365 }
3366 if (need_generic) {
3367 ASSERT(instr->IsMarkedAsCall());
3368 // LoadIC expects x2 to hold the name, and x0 to hold the receiver.
3369 ASSERT(object.Is(x0));
3370 __ Mov(x2, Operand(name));
3371 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
3372 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3373 }
3374 __ Bind(&done);
3375 }
3376
3377
3378 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3379 // LoadIC expects x2 to hold the name, and x0 to hold the receiver.
3380 ASSERT(ToRegister(instr->object()).is(x0));
3381 __ Mov(x2, Operand(instr->name()));
3382
3383 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
3384 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3385
3386 ASSERT(ToRegister(instr->result()).is(x0));
3387 }
3388
3389
3390 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
3391 Register result = ToRegister(instr->result());
3392 Register map = ToRegister(instr->value());
3393 __ EnumLengthSmi(result, map);
3394 }
3395
3396
3397 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3398 Representation r = instr->hydrogen()->value()->representation();
3399 if (r.IsDouble()) {
3400 DoubleRegister input = ToDoubleRegister(instr->value());
3401 DoubleRegister result = ToDoubleRegister(instr->result());
3402 __ Fabs(result, input);
3403 } else {
3404 ASSERT(r.IsInteger32());
3405 Register input = ToRegister32(instr->value());
3406 Register result = ToRegister32(instr->result());
3407 Label done;
3408 __ Abs(result, input, NULL, &done);
3409 Deoptimize(instr->environment());
3410 __ Bind(&done);
3411 }
3412 }
3413
3414
3415 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
3416 Label* exit,
3417 Label* allocation_entry) {
3418 // Handle the tricky cases of MathAbsTagged:
3419 // - HeapNumber inputs.
3420 // - Negative inputs produce a positive result, so a new HeapNumber is
3421 // allocated to hold it.
3422 // - Positive inputs are returned as-is, since there is no need to allocate
3423 // a new HeapNumber for the result.
3424 // - The (smi) input -0x80000000, produces +0x80000000, which does not fit
3425 // a smi. In this case, the inline code sets the result and jumps directly
3426 // to the allocation_entry label.
3427 Register input = ToRegister(instr->value());
3428 Register temp1 = ToRegister(instr->temp1());
3429 Register temp2 = ToRegister(instr->temp2());
3430 Register result_bits = ToRegister(instr->temp3());
3431 Register result = ToRegister(instr->result());
3432
3433 Label runtime_allocation;
3434
3435 // Deoptimize if the input is not a HeapNumber.
3436 __ Ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
3437 DeoptimizeIfNotRoot(temp1, Heap::kHeapNumberMapRootIndex,
3438 instr->environment());
3439
3440 // If the argument is positive, we can return it as-is, without any need to
3441 // allocate a new HeapNumber for the result. We have to do this in integer
3442 // registers (rather than with fabs) because we need to be able to distinguish
3443 // the two zeroes.
3444 __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset));
3445 __ Mov(result, input);
3446 __ Tbz(result_bits, kXSignBit, exit);
3447
3448 // Calculate abs(input) by clearing the sign bit.
3449 __ Bic(result_bits, result_bits, kXSignMask);
3450
3451 // Allocate a new HeapNumber to hold the result.
3452 // result_bits The bit representation of the (double) result.
3453 __ Bind(allocation_entry);
3454 __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2);
3455 // The inline (non-deferred) code will store result_bits into result.
3456 __ B(exit);
3457
3458 __ Bind(&runtime_allocation);
3459 if (FLAG_debug_code) {
3460 // Because result is in the pointer map, we need to make sure it has a valid
3461 // tagged value before we call the runtime. We speculatively set it to the
3462 // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already
3463 // be valid.
3464 Label result_ok;
3465 Register input = ToRegister(instr->value());
3466 __ JumpIfSmi(result, &result_ok);
3467 __ Cmp(input, result);
3468 DeoptimizeIf(ne, instr->environment());
3469 __ Bind(&result_ok);
3470 }
3471
3472 { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3473 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3474 __ StoreToSafepointRegisterSlot(x0, result);
3475 }
3476 // The inline (non-deferred) code will store result_bits into result.
3477 }
3478
3479
3480 void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
3481 // Class for deferred case.
3482 class DeferredMathAbsTagged: public LDeferredCode {
3483 public:
3484 DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr)
3485 : LDeferredCode(codegen), instr_(instr) { }
3486 virtual void Generate() {
3487 codegen()->DoDeferredMathAbsTagged(instr_, exit(),
3488 allocation_entry());
3489 }
3490 virtual LInstruction* instr() { return instr_; }
3491 Label* allocation_entry() { return &allocation; }
3492 private:
3493 LMathAbsTagged* instr_;
3494 Label allocation;
3495 };
3496
3497 // TODO(jbramley): The early-exit mechanism would skip the new frame handling
3498 // in GenerateDeferredCode. Tidy this up.
3499 ASSERT(!NeedsDeferredFrame());
3500
3501 DeferredMathAbsTagged* deferred =
3502 new(zone()) DeferredMathAbsTagged(this, instr);
3503
3504 ASSERT(instr->hydrogen()->value()->representation().IsTagged());
3505 Register input = ToRegister(instr->value());
3506 Register result_bits = ToRegister(instr->temp3());
3507 Register result = ToRegister(instr->result());
3508 Label done;
3509
3510 // Handle smis inline.
3511 // We can treat smis as 64-bit integers, since the (low-order) tag bits will
3512 // never get set by the negation. This is therefore the same as the Integer32
3513 // case in DoMathAbs, except that it operates on 64-bit values.
3514 STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
3515
3516 // TODO(jbramley): We can't use JumpIfNotSmi here because the tbz it uses
3517 // doesn't always have enough range. Consider making a variant of it, or a
3518 // TestIsSmi helper.
3519 STATIC_ASSERT(kSmiTag == 0);
3520 __ Tst(input, kSmiTagMask);
3521 __ B(ne, deferred->entry());
3522
3523 __ Abs(result, input, NULL, &done);
3524
3525 // The result is the magnitude (abs) of the smallest value a smi can
3526 // represent, encoded as a double.
3527 __ Mov(result_bits, double_to_rawbits(0x80000000));
3528 __ B(deferred->allocation_entry());
3529
3530 __ Bind(deferred->exit());
3531 __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset));
3532
3533 __ Bind(&done);
3534 }
3535
3536
3537 void LCodeGen::DoMathCos(LMathCos* instr) {
3538 ASSERT(ToDoubleRegister(instr->result()).is(d0));
3539 TranscendentalCacheStub stub(TranscendentalCache::COS,
3540 TranscendentalCacheStub::UNTAGGED);
3541 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3542 ASSERT(ToDoubleRegister(instr->result()).Is(d0));
3543 }
3544
3545
3546 void LCodeGen::DoMathExp(LMathExp* instr) {
3547 DoubleRegister input = ToDoubleRegister(instr->value());
3548 DoubleRegister result = ToDoubleRegister(instr->result());
3549 DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
3550 DoubleRegister double_temp2 = double_scratch();
3551 Register temp1 = ToRegister(instr->temp1());
3552 Register temp2 = ToRegister(instr->temp2());
3553 Register temp3 = ToRegister(instr->temp3());
3554
3555 MathExpGenerator::EmitMathExp(masm(), input, result,
3556 double_temp1, double_temp2,
3557 temp1, temp2, temp3);
3558 }
3559
3560
3561 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3562 // TODO(jbramley): If we could provide a double result, we could use frintm
3563 // and produce a valid double result in a single instruction.
3564 DoubleRegister input = ToDoubleRegister(instr->value());
3565 Register result = ToRegister(instr->result());
3566 Label deopt;
3567 Label done;
3568
3569 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3570 // Check for an input of -0.0, using the result register as a scratch.
3571 __ Fmov(result, input);
3572 __ Cmp(result, 1);
3573 __ B(&deopt, vs);
3574 }
3575
3576 __ Fcvtms(result, input);
3577
3578 // Check that the result fits into a 32-bit integer.
3579 // - The result did not overflow.
3580 __ Cmp(result, Operand(result, SXTW));
3581 // - The input was not NaN.
3582 __ Fccmp(input, input, NoFlag, eq);
3583 __ B(&done, eq);
3584
3585 __ Bind(&deopt);
3586 Deoptimize(instr->environment());
3587
3588 __ Bind(&done);
3589 }
3590
3591
3592 void LCodeGen::DoMathLog(LMathLog* instr) {
3593 ASSERT(ToDoubleRegister(instr->result()).is(d0));
3594 TranscendentalCacheStub stub(TranscendentalCache::LOG,
3595 TranscendentalCacheStub::UNTAGGED);
3596 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3597 ASSERT(ToDoubleRegister(instr->result()).Is(d0));
3598 }
3599
3600
3601 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3602 DoubleRegister input = ToDoubleRegister(instr->value());
3603 DoubleRegister result = ToDoubleRegister(instr->result());
3604 Label done;
3605
3606 // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases:
3607 // Math.pow(-Infinity, 0.5) == +Infinity
3608 // Math.pow(-0.0, 0.5) == +0.0
3609
3610 // Catch -infinity inputs first.
3611 // TODO(jbramley): A constant infinity register would be helpful here.
3612 __ Fmov(double_scratch(), kFP64NegativeInfinity);
3613 __ Fcmp(double_scratch(), input);
3614 __ Fabs(result, input);
3615 __ B(&done, eq);
3616
3617 // Add +0.0 to convert -0.0 to +0.0.
3618 // TODO(jbramley): A constant zero register would be helpful here.
3619 __ Fmov(double_scratch(), 0.0);
3620 __ Fadd(double_scratch(), input, double_scratch());
3621 __ Fsqrt(result, double_scratch());
3622
3623 __ Bind(&done);
3624 }
3625
3626
3627 void LCodeGen::DoMathRound(LMathRound* instr) {
3628 // TODO(jbramley): We could provide a double result here using frint.
3629 DoubleRegister input = ToDoubleRegister(instr->value());
3630 DoubleRegister temp1 = ToDoubleRegister(instr->temp1());
3631 Register result = ToRegister(instr->result());
3632 Label try_rounding;
3633 Label deopt;
3634 Label done;
3635
3636 // Math.round() rounds to the nearest integer, with ties going towards
3637 // +infinity. This does not match any IEEE-754 rounding mode.
3638 // - Infinities and NaNs are propagated unchanged, but cause deopts because
3639 // they can't be represented as integers.
3640 // - The sign of the result is the same as the sign of the input. This means
3641 // that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a
3642 // result of -0.0.
3643
3644 DoubleRegister dot_five = double_scratch();
3645 __ Fmov(dot_five, 0.5);
3646 __ Fabs(temp1, input);
3647 __ Fcmp(temp1, dot_five);
3648 // If input is in [-0.5, -0], the result is -0.
3649 // If input is in [+0, +0.5[, the result is +0.
3650 // If the input is +0.5, the result is 1.
3651 __ B(hi, &try_rounding); // hi so NaN will also branch.
3652
3653 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3654 __ Fmov(result, input);
3655 __ Cmp(result, 0);
3656 DeoptimizeIf(mi, instr->environment()); // [-0.5, -0.0].
3657 }
3658 __ Fcmp(input, dot_five);
3659 __ Mov(result, 1); // +0.5.
3660 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3661 // flag kBailoutOnMinusZero, will return 0 (xzr).
3662 __ Csel(result, result, xzr, eq);
3663 __ B(&done);
3664
3665 __ Bind(&deopt);
3666 Deoptimize(instr->environment());
3667
3668 __ Bind(&try_rounding);
3669 // Since we're providing a 32-bit result, we can implement ties-to-infinity by
3670 // adding 0.5 to the input, then taking the floor of the result. This does not
3671 // work for very large positive doubles because adding 0.5 would cause an
3672 // intermediate rounding stage, so a different approach will be necessary if a
3673 // double result is needed.
3674 __ Fadd(temp1, input, dot_five);
3675 __ Fcvtms(result, temp1);
3676
3677 // Deopt if
3678 // * the input was NaN
3679 // * the result is not representable using a 32-bit integer.
3680 __ Fcmp(input, 0.0);
3681 __ Ccmp(result, Operand(result.W(), SXTW), NoFlag, vc);
3682 __ B(ne, &deopt);
3683
3684 __ Bind(&done);
3685 }
3686
3687
3688 void LCodeGen::DoMathSin(LMathSin* instr) {
3689 ASSERT(ToDoubleRegister(instr->result()).is(d0));
3690 TranscendentalCacheStub stub(TranscendentalCache::SIN,
3691 TranscendentalCacheStub::UNTAGGED);
3692 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3693 ASSERT(ToDoubleRegister(instr->result()).Is(d0));
3694 }
3695
3696
3697 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3698 DoubleRegister input = ToDoubleRegister(instr->value());
3699 DoubleRegister result = ToDoubleRegister(instr->result());
3700 __ Fsqrt(result, input);
3701 }
3702
3703
3704 void LCodeGen::DoMathTan(LMathTan* instr) {
3705 ASSERT(ToDoubleRegister(instr->result()).is(d0));
3706 TranscendentalCacheStub stub(TranscendentalCache::TAN,
3707 TranscendentalCacheStub::UNTAGGED);
3708 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3709 ASSERT(ToDoubleRegister(instr->result()).Is(d0));
3710 }
3711
3712
3713 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
3714 HMathMinMax::Operation op = instr->hydrogen()->operation();
3715 if (instr->hydrogen()->representation().IsInteger32()) {
3716 Register result = ToRegister32(instr->result());
3717 Register left = ToRegister32(instr->left());
3718 Operand right = ToOperand32(instr->right());
3719
3720 __ Cmp(left, right);
3721 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
3722 } else {
3723 ASSERT(instr->hydrogen()->representation().IsDouble());
3724 DoubleRegister result = ToDoubleRegister(instr->result());
3725 DoubleRegister left = ToDoubleRegister(instr->left());
3726 DoubleRegister right = ToDoubleRegister(instr->right());
3727
3728 if (op == HMathMinMax::kMathMax) {
3729 __ Fmax(result, left, right);
3730 } else {
3731 ASSERT(op == HMathMinMax::kMathMin);
3732 __ Fmin(result, left, right);
3733 }
3734 }
3735 }
3736
3737
3738 void LCodeGen::DoMulConstI(LMulConstI* instr) {
3739 Register result = ToRegister32(instr->result());
3740 Register left = ToRegister32(instr->left());
3741 int32_t right = ToInteger32(instr->right());
3742
3743 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
3744 bool bailout_on_minus_zero =
3745 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
3746
3747 if (bailout_on_minus_zero) {
3748 if (right < 0) {
3749 // The result is -0 if right is negative and left is zero.
3750 DeoptimizeIfZero(left, instr->environment());
3751 } else if (right == 0) {
3752 // The result is -0 if the right is zero and the left is negative.
3753 DeoptimizeIfNegative(left, instr->environment());
3754 }
3755 }
3756
3757 switch (right) {
3758 // Cases which can detect overflow.
3759 case -1:
3760 if (can_overflow) {
3761 // Only 0x80000000 can overflow here.
3762 __ Negs(result, left);
3763 DeoptimizeIf(vs, instr->environment());
3764 } else {
3765 __ Neg(result, left);
3766 }
3767 break;
3768 case 0:
3769 // This case can never overflow.
3770 __ Mov(result, 0);
3771 break;
3772 case 1:
3773 // This case can never overflow.
3774 __ Mov(result, left, kDiscardForSameWReg);
3775 break;
3776 case 2:
3777 if (can_overflow) {
3778 __ Adds(result, left, left);
3779 DeoptimizeIf(vs, instr->environment());
3780 } else {
3781 __ Add(result, left, left);
3782 }
3783 break;
3784
3785 // All other cases cannot detect overflow, because it would probably be no
3786 // faster than using the smull method in LMulI.
3787 // TODO(jbramley): Investigate this, and add overflow support if it would
3788 // be useful.
3789 default:
3790 ASSERT(!can_overflow);
3791
3792 // Multiplication by constant powers of two (and some related values)
3793 // can be done efficiently with shifted operands.
3794 if (right >= 0) {
3795 if (IsPowerOf2(right)) {
3796 // result = left << log2(right)
3797 __ Lsl(result, left, WhichPowerOf2(right));
3798 } else if (IsPowerOf2(right - 1)) {
3799 // result = left + left << log2(right - 1)
3800 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
3801 } else if (IsPowerOf2(right + 1)) {
3802 // result = -left + left << log2(right + 1)
3803 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
3804 __ Neg(result, result);
3805 } else {
3806 UNREACHABLE();
3807 }
3808 } else {
3809 if (IsPowerOf2(-right)) {
3810 // result = -left << log2(-right)
3811 __ Neg(result, Operand(left, LSL, WhichPowerOf2(-right)));
3812 } else if (IsPowerOf2(-right + 1)) {
3813 // result = left - left << log2(-right + 1)
3814 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
3815 } else if (IsPowerOf2(-right - 1)) {
3816 // result = -left - left << log2(-right - 1)
3817 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
3818 __ Neg(result, result);
3819 } else {
3820 UNREACHABLE();
3821 }
3822 }
3823 break;
3824 }
3825 }
3826
3827
3828 void LCodeGen::DoMulI(LMulI* instr) {
3829 Register result = ToRegister32(instr->result());
3830 Register left = ToRegister32(instr->left());
3831
3832 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
3833 bool bailout_on_minus_zero =
3834 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
3835
3836 Register right = ToRegister32(instr->right());
3837 if (bailout_on_minus_zero) {
3838 // If one operand is zero and the other is negative, the result is -0.
3839 // - Set Z (eq) if either left or right, or both, are 0.
3840 __ Cmp(left, 0);
3841 __ Ccmp(right, 0, ZFlag, ne);
3842 // - If so (eq), set N (mi) if left + right is negative.
3843 // - Otherwise, clear N.
3844 __ Ccmn(left, right, NoFlag, eq);
3845 DeoptimizeIf(mi, instr->environment());
3846 }
3847
3848 if (can_overflow) {
3849 __ Smull(result.X(), left, right);
3850 __ Cmp(result.X(), Operand(result, SXTW));
3851 DeoptimizeIf(ne, instr->environment());
3852 } else {
3853 __ Mul(result, left, right);
3854 }
3855 }
3856
3857
3858 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
3859 // TODO(3095996): Get rid of this. For now, we need to make the
3860 // result register contain a valid pointer because it is already
3861 // contained in the register pointer map.
3862 Register result = ToRegister(instr->result());
3863 __ Mov(result, 0);
3864
3865 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3866 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3867 __ StoreToSafepointRegisterSlot(x0, result);
3868 }
3869
3870
3871 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
3872 class DeferredNumberTagD: public LDeferredCode {
3873 public:
3874 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
3875 : LDeferredCode(codegen), instr_(instr) { }
3876 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
3877 virtual LInstruction* instr() { return instr_; }
3878 private:
3879 LNumberTagD* instr_;
3880 };
3881
3882 DoubleRegister input = ToDoubleRegister(instr->value());
3883 Register result = ToRegister(instr->result());
3884 Register temp1 = ToRegister(instr->temp1());
3885 Register temp2 = ToRegister(instr->temp2());
3886 Label done;
3887
3888 bool convert_hole = false;
3889 HValue* change_input = instr->hydrogen()->value();
3890 if (change_input->IsLoadKeyed()) {
3891 HLoadKeyed* load = HLoadKeyed::cast(change_input);
3892 convert_hole = load->UsesMustHandleHole();
3893 }
3894
3895 if (convert_hole) {
3896 Label no_special_nan_handling, canonicalize;
3897 // TODO(jbramley): This special case does not exist in bleeding_edge.
3898 // * Non-NaN inputs are handled as usual.
3899 // * If the input is the hole, the output is the hole.
3900 // * If the input is any other NaN, the output is the canonical NaN.
3901 __ Fcmp(input, 0.0);
3902 __ B(vc, &no_special_nan_handling);
3903 __ Fmov(temp1, input);
3904 __ Cmp(temp1, kHoleNanInt64);
3905 __ B(ne, &canonicalize);
3906 __ Mov(result, Operand(factory()->the_hole_value()));
3907 __ B(&done);
3908 __ Bind(&canonicalize);
3909 // TODO(jbramley): Overwriting the input is probably a mistake, but this
3910 // code is removed in bleeding_edge anyway so it won't be here for long.
3911 TODO_UNIMPLEMENTED("DoNumberTagD: Fix NaN canonicalization logic.");
3912 __ Fmov(input, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
3913 __ Bind(&no_special_nan_handling);
3914 }
3915
3916 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
3917 if (FLAG_inline_new) {
3918 __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2);
3919 } else {
3920 __ B(deferred->entry());
3921 }
3922
3923 __ Bind(deferred->exit());
3924 __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset));
3925 __ Bind(&done);
3926 }
3927
3928
3929 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
3930 LOperand* value,
3931 LOperand* temp1,
3932 LOperand* temp2,
3933 IntegerSignedness signedness) {
3934 Label slow;
3935 Register src = ToRegister32(value);
3936 Register dst = ToRegister(instr->result());
3937 DoubleRegister dbl_scratch = double_scratch();
3938
3939 Label done;
3940 if (signedness == SIGNED_INT32) {
3941 ASM_UNIMPLEMENTED_BREAK("DeferredNumberTagI - signed int32 case.");
3942 } else {
3943 ASSERT(signedness == UNSIGNED_INT32);
3944 __ Ucvtf(dbl_scratch, src);
3945 }
3946
3947 Register scratch1 = ToRegister(temp1);
3948 if (FLAG_inline_new) {
3949 Register scratch2 = ToRegister(temp2);
3950 __ AllocateHeapNumber(dst, &slow, scratch1, scratch2);
3951 __ B(&done);
3952 }
3953
3954 // Slow case: call the runtime system to do the number allocation.
3955 __ Bind(&slow);
3956
3957 // Check that the dst register contains new space allocation top, which is a
3958 // valid address for the GC.
3959 if (FLAG_debug_code) {
3960 ExternalReference new_space_allocation_top =
3961 ExternalReference::new_space_allocation_top_address(isolate());
3962 __ Mov(scratch1, Operand(new_space_allocation_top));
3963 __ Ldr(scratch1, MemOperand(scratch1));
3964 __ Cmp(dst, scratch1);
3965 __ Check(eq, "Register dst does not contain allocation top.");
3966 }
3967
3968 {
3969 // Preserve the value of all registers.
3970 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3971
3972 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3973 __ StoreToSafepointRegisterSlot(x0, dst);
3974 }
3975
3976 // Done. Move converted value in dbl_scratch into the newly allocated heap
3977 // number.
3978 __ Bind(&done);
3979 __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
3980 }
3981
3982
3983 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
3984 class DeferredNumberTagU: public LDeferredCode {
3985 public:
3986 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
3987 : LDeferredCode(codegen), instr_(instr) { }
3988 virtual void Generate() {
3989 codegen()->DoDeferredNumberTagI(instr_,
3990 instr_->value(),
3991 instr_->temp1(),
3992 instr_->temp2(),
3993 UNSIGNED_INT32);
3994 }
3995 virtual LInstruction* instr() { return instr_; }
3996 private:
3997 LNumberTagU* instr_;
3998 };
3999
4000 Register value = ToRegister(instr->value());
4001 Register result = ToRegister(instr->result());
4002
4003 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4004 __ Cmp(value, Smi::kMaxValue);
4005 __ B(hi, deferred->entry());
4006 __ SmiTag(result, value);
4007 __ Bind(deferred->exit());
4008 }
4009
4010
4011 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4012 Register input = ToRegister(instr->value());
4013 Register scratch = ToRegister(instr->temp());
4014 DoubleRegister result = ToDoubleRegister(instr->result());
4015 bool allow_undefined_as_nan = instr->hydrogen()->allow_undefined_as_nan();
4016
4017 Label done, load_smi;
4018
4019 // Work out what untag mode we're working with.
4020 NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
4021 HValue* value = instr->hydrogen()->value();
4022 if (value->type().IsSmi()) {
4023 mode = NUMBER_CANDIDATE_IS_SMI;
4024 } else if (value->IsLoadKeyed()) {
4025 HLoadKeyed* load = HLoadKeyed::cast(value);
4026 if (load->UsesMustHandleHole()) {
4027 if (load->hole_mode() == ALLOW_RETURN_HOLE) {
4028 mode = NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE;
4029 }
4030 }
4031 }
4032
4033 STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE >
4034 NUMBER_CANDIDATE_IS_ANY_TAGGED);
4035 if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4036 __ JumpIfSmi(input, &load_smi);
4037
4038 Label convert_undefined, deopt;
4039
4040 // Heap number map check.
4041 Label* not_heap_number = allow_undefined_as_nan ? &convert_undefined
4042 : &deopt;
4043 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
4044 __ JumpIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex, not_heap_number);
4045
4046 // Load heap number.
4047 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
4048 if (instr->hydrogen()->deoptimize_on_minus_zero()) {
4049 ASM_UNIMPLEMENTED_BREAK("NumberUntagD - deopt on minus zero");
4050 }
4051 __ B(&done);
4052
4053 if (allow_undefined_as_nan) {
4054 Label load_nan;
4055
4056 __ Bind(&convert_undefined);
4057 // Convert undefined (and hole) to NaN.
4058 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE) {
4059 __ JumpIfRoot(input, Heap::kUndefinedValueRootIndex, &load_nan);
4060 __ JumpIfNotRoot(input, Heap::kTheHoleValueRootIndex, &deopt);
4061 } else {
4062 ASSERT(mode == NUMBER_CANDIDATE_IS_ANY_TAGGED);
4063 __ JumpIfNotRoot(input, Heap::kUndefinedValueRootIndex, &deopt);
4064 }
4065
4066 __ Bind(&load_nan);
4067 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4068 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4069 __ B(&done);
4070 }
4071
4072 __ Bind(&deopt);
4073 Deoptimize(instr->environment());
4074 } else {
4075 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
4076 // Fall through to load_smi.
4077 }
4078
4079 // Smi to double register conversion.
4080 __ Bind(&load_smi);
4081 __ SmiUntagToDouble(result, input);
4082
4083 __ Bind(&done);
4084 }
4085
4086
4087 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
4088 ASM_UNIMPLEMENTED_BREAK("DoOsrEntry");
4089 }
4090
4091
4092 void LCodeGen::DoOuterContext(LOuterContext* instr) {
4093 Register context = ToRegister(instr->context());
4094 Register result = ToRegister(instr->result());
4095 __ Ldr(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
4096 }
4097
4098
4099 void LCodeGen::DoParameter(LParameter* instr) {
4100 // Nothing to do.
4101 }
4102
4103
4104 void LCodeGen::DoPushArgument(LPushArgument* instr) {
4105 LOperand* argument = instr->value();
4106 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
4107 Abort("DoPushArgument not implemented for double types.");
4108 } else {
4109 __ Push(ToRegister(argument));
4110 }
4111 }
4112
4113
4114 void LCodeGen::DoReturn(LReturn* instr) {
4115 if (FLAG_trace && info()->IsOptimizing()) {
4116 // Push the return value on the stack as the parameter.
4117 // Runtime::TraceExit returns its parameter in x0.
4118 __ Push(x0);
4119 __ CallRuntime(Runtime::kTraceExit, 1);
4120 }
4121
4122 if (info()->saves_caller_doubles()) {
4123 ASSERT(NeedsEagerFrame());
4124 BitVector* doubles = chunk()->allocated_double_registers();
4125 BitVector::Iterator iterator(doubles);
4126 int count = 0;
4127 while (!iterator.Done()) {
4128 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
4129 // TODO(jbramley): Make Peek support FPRegisters.
4130 __ Ldr(value, MemOperand(__ StackPointer(), count * kDoubleSize));
4131 iterator.Advance();
4132 count++;
4133 }
4134 }
4135
4136 int no_frame_start = -1;
4137 if (NeedsEagerFrame()) {
4138 Register stack_pointer = masm()->StackPointer();
4139 __ Mov(stack_pointer, fp);
4140 no_frame_start = masm_->pc_offset();
4141 __ Pop(fp, lr);
4142 }
4143
4144 if (instr->has_constant_parameter_count()) {
4145 int parameter_count = ToInteger32(instr->constant_parameter_count());
4146 __ Drop(parameter_count + 1);
4147 } else {
4148 Register parameter_count = ToRegister(instr->parameter_count());
4149 __ DropBySMI(parameter_count);
4150 }
4151 __ Ret();
4152
4153 if (no_frame_start != -1) {
4154 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
4155 }
4156 }
4157
4158
4159 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
4160 String::Encoding encoding = instr->encoding();
4161 Register string = ToRegister(instr->string());
4162 Register index = ToRegister(instr->index());
4163 Register value = ToRegister(instr->value());
4164 Register temp = ToRegister(instr->temp());
4165
4166 if (FLAG_debug_code) {
4167 __ Ldr(temp, FieldMemOperand(string, HeapObject::kMapOffset));
4168 __ Ldrb(temp, FieldMemOperand(temp, Map::kInstanceTypeOffset));
4169 __ And(temp, temp, kStringRepresentationMask | kStringEncodingMask);
4170
4171 if (encoding == String::ONE_BYTE_ENCODING) {
4172 __ Cmp(temp, kSeqStringTag | kOneByteStringTag);
4173 __ Check(eq, "Unexpected string type");
4174 } else {
4175 ASSERT(encoding == String::TWO_BYTE_ENCODING);
4176 __ Cmp(temp, kSeqStringTag | kTwoByteStringTag);
4177 __ Check(eq, "Unexpected string type");
4178 }
4179 }
4180
4181 __ Add(temp, string, SeqString::kHeaderSize - kHeapObjectTag);
4182 if (encoding == String::ONE_BYTE_ENCODING) {
4183 __ Strb(value, MemOperand(temp, index));
4184 } else {
4185 __ Strh(value, MemOperand(temp, index, LSL, 1));
4186 }
4187 }
4188
4189
4190 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4191 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
4192 __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
4193 }
4194
4195
4196 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4197 Register input = ToRegister(instr->value());
4198 Register result = ToRegister(instr->result());
4199 Label done, untag;
4200
4201 if (instr->needs_check()) {
4202 DeoptimizeIfNotSmi(input, instr->environment());
4203 }
4204
4205 __ Bind(&untag);
4206 __ SmiUntag(result, input);
4207 __ Bind(&done);
4208 }
4209
4210
4211 void LCodeGen::DoShiftI(LShiftI* instr) {
4212 LOperand* right_op = instr->right();
4213 Register left = ToRegister32(instr->left());
4214 Register result = ToRegister32(instr->result());
4215
4216 if (right_op->IsRegister()) {
4217 Register right = ToRegister32(instr->right());
4218 switch (instr->op()) {
4219 case Token::ROR: __ Ror(result, left, right); break;
4220 case Token::SAR: __ Asr(result, left, right); break;
4221 case Token::SHL: __ Lsl(result, left, right); break;
4222 case Token::SHR:
4223 if (instr->can_deopt()) {
4224 // TODO(all): Using conditional compare may be faster here, eg.
4225 // Deopt if (right == 0) && (left < 0).
4226 // __ Cmp(right, 0);
4227 // __ Ccmp(left, 0, NoFlag, eq);
4228 Label right_not_zero;
4229 __ Cbnz(right, &right_not_zero);
4230 DeoptimizeIfNegative(left, instr->environment());
4231 __ Bind(&right_not_zero);
4232 }
4233 __ Lsr(result, left, right);
4234 break;
4235 default: UNREACHABLE();
4236 }
4237 } else {
4238 ASSERT(right_op->IsConstantOperand());
4239 int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f;
4240 if (shift_count == 0) {
4241 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4242 DeoptimizeIfNegative(left, instr->environment());
4243 }
4244 __ Mov(result, left, kDiscardForSameWReg);
4245 } else {
4246 switch (instr->op()) {
4247 case Token::ROR: __ Ror(result, left, shift_count); break;
4248 case Token::SAR: __ Asr(result, left, shift_count); break;
4249 case Token::SHL: __ Lsl(result, left, shift_count); break;
4250 case Token::SHR: __ Lsr(result, left, shift_count); break;
4251 default: UNREACHABLE();
4252 }
4253 }
4254 }
4255 }
4256
4257
4258 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
4259 __ Debug("LDebugBreak", 0, BREAK);
4260 }
4261
4262
4263 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
4264 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4265 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
4266 RecordSafepointWithLazyDeopt(
4267 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4268 ASSERT(instr->HasEnvironment());
4269 LEnvironment* env = instr->environment();
4270 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4271 }
4272
4273
4274 void LCodeGen::DoStackCheck(LStackCheck* instr) {
4275 class DeferredStackCheck: public LDeferredCode {
4276 public:
4277 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
4278 : LDeferredCode(codegen), instr_(instr) { }
4279 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
4280 virtual LInstruction* instr() { return instr_; }
4281 private:
4282 LStackCheck* instr_;
4283 };
4284
4285 ASSERT(instr->HasEnvironment());
4286 LEnvironment* env = instr->environment();
4287 // There is no LLazyBailout instruction for stack-checks. We have to
4288 // prepare for lazy deoptimization explicitly here.
4289 if (instr->hydrogen()->is_function_entry()) {
4290 // Perform stack overflow check.
4291 Label done;
4292 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
4293 __ B(hs, &done);
4294
4295 PredictableCodeSizeScope predictable(masm_,
4296 Assembler::kCallSizeWithRelocation);
4297 StackCheckStub stub;
4298 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4299 EnsureSpaceForLazyDeopt();
4300
4301 __ Bind(&done);
4302 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4303 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4304 } else {
4305 ASSERT(instr->hydrogen()->is_backwards_branch());
4306 // Perform stack overflow check if this goto needs it before jumping.
4307 DeferredStackCheck* deferred_stack_check =
4308 new(zone()) DeferredStackCheck(this, instr);
4309 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
4310 __ B(lo, deferred_stack_check->entry());
4311
4312 EnsureSpaceForLazyDeopt();
4313 __ Bind(instr->done_label());
4314 deferred_stack_check->SetExit(instr->done_label());
4315 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4316 // Don't record a deoptimization index for the safepoint here.
4317 // This will be done explicitly when emitting call and the safepoint in
4318 // the deferred code.
4319 }
4320 }
4321
4322
4323 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
4324 Register context = ToRegister(instr->context());
4325 Register value = ToRegister(instr->value());
4326 Register scratch = ToRegister(instr->temp());
4327 MemOperand target = ContextMemOperand(context, instr->slot_index());
4328
4329 Label skip_assignment;
4330
4331 if (instr->hydrogen()->RequiresHoleCheck()) {
4332 __ Ldr(scratch, target);
4333 if (instr->hydrogen()->DeoptimizesOnHole()) {
4334 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex,
4335 instr->environment());
4336 } else {
4337 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
4338 }
4339 }
4340
4341 __ Str(value, target);
4342 if (instr->hydrogen()->NeedsWriteBarrier()) {
4343 HType type = instr->hydrogen()->value()->type();
4344 SmiCheck check_needed =
4345 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4346 __ RecordWriteContextSlot(context,
4347 target.offset(),
4348 value,
4349 scratch,
4350 GetLinkRegisterState(),
4351 kSaveFPRegs,
4352 EMIT_REMEMBERED_SET,
4353 check_needed);
4354 }
4355 __ Bind(&skip_assignment);
4356 }
4357
4358
4359 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
4360 Register value = ToRegister(instr->value());
4361 Register cell = ToRegister(instr->temp1());
4362
4363 // Load the cell.
4364 __ Mov(cell, Operand(instr->hydrogen()->cell()));
4365
4366 // If the cell we are storing to contains the hole it could have
4367 // been deleted from the property dictionary. In that case, we need
4368 // to update the property details in the property dictionary to mark
4369 // it as no longer deleted. We deoptimize in that case.
4370 if (instr->hydrogen()->RequiresHoleCheck()) {
4371 Register payload = ToRegister(instr->temp2());
4372 __ Ldr(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
4373 DeoptimizeIfRoot(
4374 payload, Heap::kTheHoleValueRootIndex, instr->environment());
4375 }
4376
4377 // Store the value.
4378 __ Str(value, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
4379 // Cells are always rescanned, so no write barrier here.
4380 }
4381
4382
4383 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
4384 ASSERT(ToRegister(instr->global_object()).Is(x1));
4385 ASSERT(ToRegister(instr->value()).Is(x0));
4386
4387 __ Mov(x2, Operand(instr->name()));
4388 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
4389 ? isolate()->builtins()->StoreIC_Initialize_Strict()
4390 : isolate()->builtins()->StoreIC_Initialize();
4391 CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
4392 }
4393
4394
4395 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
4396 Register ext_ptr = ToRegister(instr->elements());
4397 Register key = no_reg;
4398 Register scratch;
4399 ElementsKind elements_kind = instr->elements_kind();
4400
4401 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
4402 bool key_is_constant = instr->key()->IsConstantOperand();
4403 int constant_key = 0;
4404 if (key_is_constant) {
4405 ASSERT(instr->temp() == NULL);
4406 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4407 if (constant_key & 0xf0000000) {
4408 Abort("Array index constant value too big.");
4409 }
4410 } else {
4411 key = ToRegister(instr->key());
4412 scratch = ToRegister(instr->temp());
4413 }
4414
4415 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4416 MemOperand dst =
4417 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
4418 key_is_constant, constant_key,
4419 element_size_shift,
4420 instr->additional_index());
4421
4422 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
4423 DoubleRegister value = ToDoubleRegister(instr->value());
4424 DoubleRegister dbl_scratch = double_scratch();
4425 __ Fcvt(dbl_scratch.S(), value);
4426 __ Str(dbl_scratch.S(), dst);
4427 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
4428 DoubleRegister value = ToDoubleRegister(instr->value());
4429 __ Str(value, dst);
4430 } else {
4431 Register value = ToRegister(instr->value());
4432
4433 switch (elements_kind) {
4434 case EXTERNAL_PIXEL_ELEMENTS:
4435 case EXTERNAL_BYTE_ELEMENTS:
4436 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: __ Strb(value, dst); break;
4437 case EXTERNAL_SHORT_ELEMENTS:
4438 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: __ Strh(value, dst); break;
4439 case EXTERNAL_INT_ELEMENTS:
4440 case EXTERNAL_UNSIGNED_INT_ELEMENTS: __ Str(value.W(), dst); break;
4441 case EXTERNAL_FLOAT_ELEMENTS:
4442 case EXTERNAL_DOUBLE_ELEMENTS:
4443 case FAST_DOUBLE_ELEMENTS:
4444 case FAST_ELEMENTS:
4445 case FAST_SMI_ELEMENTS:
4446 case FAST_HOLEY_DOUBLE_ELEMENTS:
4447 case FAST_HOLEY_ELEMENTS:
4448 case FAST_HOLEY_SMI_ELEMENTS:
4449 case DICTIONARY_ELEMENTS:
4450 case NON_STRICT_ARGUMENTS_ELEMENTS:
4451 UNREACHABLE();
4452 break;
4453 }
4454 }
4455 }
4456
4457
4458 void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
4459 Register elements = ToRegister(instr->elements());
4460 DoubleRegister value = ToDoubleRegister(instr->value());
4461 Register store_base = ToRegister(instr->temp());
4462 int offset = 0;
4463
4464 if (instr->key()->IsConstantOperand()) {
4465 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4466 if (constant_key & 0xf0000000) {
4467 Abort("Array index constant value too big.");
4468 }
4469 offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
4470 instr->additional_index());
4471 store_base = elements;
4472 } else {
4473 Register key = ToRegister(instr->key());
4474 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
4475 CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
4476 instr->hydrogen()->elements_kind());
4477 offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
4478 }
4479
4480 if (instr->NeedsCanonicalization()) {
4481 DoubleRegister dbl_scratch = double_scratch();
4482 __ Fmov(dbl_scratch,
4483 FixedDoubleArray::canonical_not_the_hole_nan_as_double());
4484 __ Fmaxnm(dbl_scratch, dbl_scratch, value);
4485 __ Str(dbl_scratch, FieldMemOperand(store_base, offset));
4486 } else {
4487 __ Str(value, FieldMemOperand(store_base, offset));
4488 }
4489 }
4490
4491
4492 void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
4493 Register value = ToRegister(instr->value());
4494 Register elements = ToRegister(instr->elements());
4495 Register store_base = ToRegister(instr->temp());
4496 Register key = no_reg;
4497 int offset = 0;
4498
4499 if (instr->key()->IsConstantOperand()) {
4500 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4501 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4502 offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
4503 instr->additional_index());
4504 store_base = elements;
4505 } else {
4506 key = ToRegister(instr->key());
4507 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
4508 CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
4509 instr->hydrogen()->elements_kind());
4510 offset = FixedArray::OffsetOfElementAt(instr->additional_index());
4511 }
4512 __ Str(value, FieldMemOperand(store_base, offset));
4513
4514 if (instr->hydrogen()->NeedsWriteBarrier()) {
4515 HType type = instr->hydrogen()->value()->type();
4516 SmiCheck check_needed = type.IsHeapObject()
4517 ? OMIT_SMI_CHECK
4518 : INLINE_SMI_CHECK;
4519 // Compute address of modified element and store it into key register.
4520 __ Add(key, store_base, offset - kHeapObjectTag);
4521 __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
4522 EMIT_REMEMBERED_SET, check_needed);
4523 }
4524 }
4525
4526
4527 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4528 ASSERT(ToRegister(instr->object()).Is(x2));
4529 ASSERT(ToRegister(instr->key()).Is(x1));
4530 ASSERT(ToRegister(instr->value()).Is(x0));
4531
4532 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
4533 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4534 : isolate()->builtins()->KeyedStoreIC_Initialize();
4535 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4536 }
4537
4538
4539 // TODO(jbramley): Once the merge is done and we're tracking bleeding_edge, try
4540 // to tidy up this function.
4541 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4542 Representation representation = instr->representation();
4543
4544 Register object = ToRegister(instr->object());
4545 Register temp0 = ToRegister(instr->temp0());
4546 Register temp1 = ToRegister(instr->temp1());
4547 HObjectAccess access = instr->hydrogen()->access();
4548 int offset = access.offset();
4549
4550 Handle<Map> transition = instr->transition();
4551
4552 if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
4553 Register value = ToRegister(instr->value());
4554 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4555 DeoptimizeIfSmi(value, instr->environment());
4556 }
4557 } else if (FLAG_track_double_fields && representation.IsDouble()) {
4558 ASSERT(transition.is_null());
4559 ASSERT(access.IsInobject());
4560 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4561 FPRegister value = ToDoubleRegister(instr->value());
4562 __ Str(value, FieldMemOperand(object, offset));
4563 return;
4564 }
4565
4566 if (!transition.is_null()) {
4567 // Store the new map value.
4568 Register new_map_value = temp0;
4569 __ Mov(new_map_value, Operand(transition));
4570 __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
4571 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4572 // Update the write barrier for the map field.
4573 __ RecordWriteField(object,
4574 HeapObject::kMapOffset,
4575 new_map_value,
4576 temp1,
4577 GetLinkRegisterState(),
4578 kSaveFPRegs,
4579 OMIT_REMEMBERED_SET,
4580 OMIT_SMI_CHECK);
4581 }
4582 }
4583
4584 // Do the store.
4585 Register value = ToRegister(instr->value());
4586 HType type = instr->hydrogen()->value()->type();
4587 SmiCheck check_needed =
4588 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4589 if (access.IsInobject()) {
4590 __ Str(value, FieldMemOperand(object, offset));
4591 if (instr->hydrogen()->NeedsWriteBarrier()) {
4592 // Update the write barrier for the object for in-object properties.
4593 __ RecordWriteField(object,
4594 offset,
4595 value, // Clobbered.
4596 temp0, // Clobbered.
4597 GetLinkRegisterState(),
4598 kSaveFPRegs,
4599 EMIT_REMEMBERED_SET,
4600 check_needed);
4601 }
4602 } else {
4603 __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset));
4604 __ Str(value, FieldMemOperand(temp0, offset));
4605 if (instr->hydrogen()->NeedsWriteBarrier()) {
4606 // Update the write barrier for the properties array.
4607 __ RecordWriteField(temp0,
4608 offset,
4609 value, // Clobbered.
4610 temp1, // Clobbered.
4611 GetLinkRegisterState(),
4612 kSaveFPRegs,
4613 EMIT_REMEMBERED_SET,
4614 check_needed);
4615 }
4616 }
4617 }
4618
4619
4620 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4621 ASSERT(ToRegister(instr->value()).is(x0));
4622 ASSERT(ToRegister(instr->object()).is(x1));
4623
4624 // Name must be in x2.
4625 __ Mov(x2, Operand(instr->name()));
4626 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
4627 ? isolate()->builtins()->StoreIC_Initialize_Strict()
4628 : isolate()->builtins()->StoreIC_Initialize();
4629 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4630 }
4631
4632
4633 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4634 Register left = ToRegister(instr->left());
4635 Register right = ToRegister(instr->right());
4636 __ Push(left, right);
4637 // TODO(jbramley): Once we haved rebased, use instr->hydrogen->flags() to get
4638 // the flags for the stub.
4639 StringAddStub stub(NO_STRING_CHECK_IN_STUB);
4640 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4641 }
4642
4643
4644 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4645 class DeferredStringCharCodeAt: public LDeferredCode {
4646 public:
4647 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4648 : LDeferredCode(codegen), instr_(instr) { }
4649 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
4650 virtual LInstruction* instr() { return instr_; }
4651 private:
4652 LStringCharCodeAt* instr_;
4653 };
4654
4655 DeferredStringCharCodeAt* deferred =
4656 new(zone()) DeferredStringCharCodeAt(this, instr);
4657
4658 StringCharLoadGenerator::Generate(masm(),
4659 ToRegister(instr->string()),
4660 ToRegister(instr->index()),
4661 ToRegister(instr->result()),
4662 deferred->entry());
4663 __ Bind(deferred->exit());
4664 }
4665
4666
4667 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4668 Register string = ToRegister(instr->string());
4669 Register result = ToRegister(instr->result());
4670
4671 // TODO(3095996): Get rid of this. For now, we need to make the
4672 // result register contain a valid pointer because it is already
4673 // contained in the register pointer map.
4674 __ Mov(result, 0);
4675
4676 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4677 __ Push(string);
4678 // Push the index as a smi. This is safe because of the checks in
4679 // DoStringCharCodeAt above.
4680 Register index = ToRegister(instr->index());
4681 __ SmiTag(index);
4682 __ Push(index);
4683
4684 CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
4685 __ AssertSmi(x0);
4686 __ SmiUntag(x0);
4687 __ StoreToSafepointRegisterSlot(x0, result);
4688 }
4689
4690
4691 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4692 class DeferredStringCharFromCode: public LDeferredCode {
4693 public:
4694 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4695 : LDeferredCode(codegen), instr_(instr) { }
4696 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
4697 virtual LInstruction* instr() { return instr_; }
4698 private:
4699 LStringCharFromCode* instr_;
4700 };
4701
4702 DeferredStringCharFromCode* deferred =
4703 new(zone()) DeferredStringCharFromCode(this, instr);
4704
4705 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4706 Register char_code = ToRegister(instr->char_code());
4707 Register result = ToRegister(instr->result());
4708
4709 __ Cmp(char_code, Operand(String::kMaxOneByteCharCode));
4710 __ B(hi, deferred->entry());
4711 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4712 __ Add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
4713 __ Ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4714 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
4715 __ B(eq, deferred->entry());
4716 __ Bind(deferred->exit());
4717 }
4718
4719
4720 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4721 Register char_code = ToRegister(instr->char_code());
4722 Register result = ToRegister(instr->result());
4723
4724 // TODO(3095996): Get rid of this. For now, we need to make the
4725 // result register contain a valid pointer because it is already
4726 // contained in the register pointer map.
4727 __ Mov(result, 0);
4728
4729 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4730 __ SmiTag(char_code);
4731 __ Push(char_code);
4732 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
4733 __ StoreToSafepointRegisterSlot(x0, result);
4734 }
4735
4736
4737 void LCodeGen::DoStringLength(LStringLength* instr) {
4738 Register string = ToRegister(instr->string());
4739 Register result = ToRegister(instr->result());
4740 __ Ldr(result, FieldMemOperand(string, String::kLengthOffset));
4741 }
4742
4743
4744 void LCodeGen::DoSubI(LSubI* instr) {
4745 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4746 Register result = ToRegister32(instr->result());
4747 Register left = ToRegister32(instr->left());
4748 Operand right = ToOperand32(instr->right());
4749 if (can_overflow) {
4750 __ Subs(result, left, right);
4751 DeoptimizeIf(vs, instr->environment());
4752 } else {
4753 __ Sub(result, left, right);
4754 }
4755 }
4756
4757
4758 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
4759 LOperand* value,
4760 LOperand* temp1,
4761 LOperand* temp2) {
4762 Register input = ToRegister(value);
4763 Register scratch1 = ToRegister(temp1);
4764 DoubleRegister dbl_scratch1 = double_scratch();
4765
4766 Label done;
4767
4768 // Load heap object map.
4769 __ Ldr(scratch1, FieldMemOperand(input, HeapObject::kMapOffset));
4770
4771 if (instr->truncating()) {
4772 Register output = ToRegister(instr->result());
4773 Register scratch2 = ToRegister(temp2);
4774 Label undefined;
4775
4776 // If it's not a heap number, jump to undefined check.
4777 __ JumpIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, &undefined);
4778
4779 // A heap number: load value and convert to int32 using truncating function.
4780 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
4781 __ ECMA262ToInt32(output, dbl_scratch1, scratch1, scratch2);
4782 __ B(&done);
4783
4784 // Check for undefined. Undefined is converted to zero for truncating
4785 // conversions.
4786 __ Bind(&undefined);
4787
4788 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
4789 instr->environment());
4790 __ Mov(output, 0);
4791 } else {
4792 Register output = ToRegister32(instr->result());
4793
4794 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
4795 Label converted;
4796
4797 // Deoptimized if it's not a heap number.
4798 DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex,
4799 instr->environment());
4800
4801 // A heap number: load value and convert to int32 using non-truncating
4802 // function. If the result is out of range, branch to deoptimize.
4803 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
4804 __ TryConvertDoubleToInt32(output, dbl_scratch1, dbl_scratch2, &converted);
4805 Deoptimize(instr->environment());
4806
4807 __ Bind(&converted);
4808
4809 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4810 __ Cmp(output, 0);
4811 __ B(ne, &done);
4812 __ Fmov(scratch1, dbl_scratch1);
4813 DeoptimizeIfNegative(scratch1, instr->environment());
4814 }
4815 }
4816 __ Bind(&done);
4817 }
4818
4819
4820 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4821 class DeferredTaggedToI: public LDeferredCode {
4822 public:
4823 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4824 : LDeferredCode(codegen), instr_(instr) { }
4825 virtual void Generate() {
4826 codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(),
4827 instr_->temp2());
4828 }
4829
4830 virtual LInstruction* instr() { return instr_; }
4831 private:
4832 LTaggedToI* instr_;
4833 };
4834
4835 Register input = ToRegister(instr->value());
4836 Register output = ToRegister(instr->result());
4837
4838 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4839
4840 // TODO(jbramley): We can't use JumpIfNotSmi here because the tbz it uses
4841 // doesn't always have enough range. Consider making a variant of it, or a
4842 // TestIsSmi helper.
4843 STATIC_ASSERT(kSmiTag == 0);
4844 __ Tst(input, kSmiTagMask);
4845 __ B(ne, deferred->entry());
4846
4847 __ SmiUntag(output, input);
4848 __ Bind(deferred->exit());
4849 }
4850
4851
4852 void LCodeGen::DoThisFunction(LThisFunction* instr) {
4853 Register result = ToRegister(instr->result());
4854 __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4855 }
4856
4857
4858 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
4859 ASSERT(ToRegister(instr->value()).Is(x0));
4860 ASSERT(ToRegister(instr->result()).Is(x0));
4861 ASM_UNIMPLEMENTED_BREAK("DoToFastProperties");
4862 __ Push(x0);
4863 CallRuntime(Runtime::kToFastProperties, 1, instr);
4864 }
4865
4866
4867 void LCodeGen::DoThrow(LThrow* instr) {
4868 Register value = ToRegister(instr->value());
4869 __ Push(value);
4870 CallRuntime(Runtime::kThrow, 1, instr);
4871
4872 if (FLAG_debug_code) {
4873 __ Abort("Unreachable code in Throw.");
4874 }
4875 }
4876
4877
4878 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4879 Register object = ToRegister(instr->object());
4880
4881 Handle<Map> from_map = instr->original_map();
4882 Handle<Map> to_map = instr->transitioned_map();
4883 ElementsKind from_kind = instr->from_kind();
4884 ElementsKind to_kind = instr->to_kind();
4885
4886 Register scratch;
4887 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4888 scratch = ToRegister(instr->temp1());
4889 } else {
4890 ASSERT(FLAG_compiled_transitions || instr->IsMarkedAsCall());
4891 scratch = x10;
4892 }
4893
4894 Label not_applicable;
4895 __ CompareMap(object, scratch, from_map);
4896 __ B(ne, &not_applicable);
4897
4898 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4899 Register new_map = ToRegister(instr->temp2());
4900 __ Mov(new_map, Operand(to_map));
4901 __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
4902 // Write barrier.
4903 __ RecordWriteField(object, HeapObject::kMapOffset, new_map, scratch,
4904 GetLinkRegisterState(), kDontSaveFPRegs);
4905 } else if (FLAG_compiled_transitions) {
4906 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4907 __ Mov(x0, object);
4908 __ Mov(x1, Operand(to_map));
4909 TransitionElementsKindStub stub(from_kind, to_kind);
4910 __ CallStub(&stub);
4911 RecordSafepointWithRegisters(
4912 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4913 } else if ((IsFastSmiElementsKind(from_kind) &&
4914 IsFastDoubleElementsKind(to_kind)) ||
4915 (IsFastDoubleElementsKind(from_kind) &&
4916 IsFastObjectElementsKind(to_kind))) {
4917 ASSERT((instr->temp1() == NULL) && (instr->temp2() == NULL));
4918 __ Mov(x2, object);
4919 __ Mov(x3, Operand(to_map));
4920 if (IsFastSmiElementsKind(from_kind)) {
4921 CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
4922 RelocInfo::CODE_TARGET, instr);
4923 } else if (IsFastDoubleElementsKind(from_kind)) {
4924 CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
4925 RelocInfo::CODE_TARGET, instr);
4926 }
4927 } else {
4928 UNREACHABLE();
4929 }
4930 __ Bind(&not_applicable);
4931 }
4932
4933
4934 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4935 Register object = ToRegister(instr->object());
4936 Register temp1 = ToRegister(instr->temp1());
4937 Register temp2 = ToRegister(instr->temp2());
4938 __ TestJSArrayForAllocationSiteInfo(object, temp1, temp2);
4939 DeoptimizeIf(eq, instr->environment());
4940 }
4941
4942
4943 void LCodeGen::DoTypeof(LTypeof* instr) {
4944 Register input = ToRegister(instr->value());
4945 __ Push(input);
4946 CallRuntime(Runtime::kTypeof, 1, instr);
4947 }
4948
4949
4950 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
4951 Handle<String> type_name = instr->type_literal();
4952 Label* true_label = instr->TrueLabel(chunk_);
4953 Label* false_label = instr->FalseLabel(chunk_);
4954 Register value = ToRegister(instr->value());
4955
4956 if (type_name->Equals(heap()->number_string())) {
4957 ASSERT(instr->temp1() != NULL);
4958 Register map = ToRegister(instr->temp1());
4959
4960 __ JumpIfSmi(value, true_label);
4961 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
4962 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
4963 EmitBranch(instr, eq);
4964
4965 } else if (type_name->Equals(heap()->string_string())) {
4966 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
4967 Register map = ToRegister(instr->temp1());
4968 Register scratch = ToRegister(instr->temp2());
4969
4970 __ JumpIfSmi(value, false_label);
4971 __ JumpIfObjectType(
4972 value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge);
4973 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
4974 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
4975
4976 } else if (type_name->Equals(heap()->symbol_string())) {
4977 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
4978 Register map = ToRegister(instr->temp1());
4979 Register scratch = ToRegister(instr->temp2());
4980
4981 __ JumpIfSmi(value, false_label);
4982 __ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
4983 EmitBranch(instr, eq);
4984
4985 } else if (type_name->Equals(heap()->boolean_string())) {
4986 __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
4987 __ CompareRoot(value, Heap::kFalseValueRootIndex);
4988 EmitBranch(instr, eq);
4989
4990 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
4991 __ CompareRoot(value, Heap::kNullValueRootIndex);
4992 EmitBranch(instr, eq);
4993
4994 } else if (type_name->Equals(heap()->undefined_string())) {
4995 ASSERT(instr->temp1() != NULL);
4996 Register scratch = ToRegister(instr->temp1());
4997
4998 __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
4999 __ JumpIfSmi(value, false_label);
5000 // Check for undetectable objects and jump to the true branch in this case.
5001 __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5002 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5003 EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
5004
5005 } else if (type_name->Equals(heap()->function_string())) {
5006 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5007 ASSERT(instr->temp1() != NULL);
5008 Register type = ToRegister(instr->temp1());
5009
5010 __ JumpIfSmi(value, false_label);
5011 __ JumpIfObjectType(value, type, type, JS_FUNCTION_TYPE, true_label);
5012 // HeapObject's type has been loaded into type register by JumpIfObjectType.
5013 EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE);
5014
5015 } else if (type_name->Equals(heap()->object_string())) {
5016 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
5017 Register map = ToRegister(instr->temp1());
5018 Register scratch = ToRegister(instr->temp2());
5019
5020 __ JumpIfSmi(value, false_label);
5021 if (!FLAG_harmony_typeof) {
5022 __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
5023 }
5024 __ JumpIfObjectType(value, map, scratch,
5025 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt);
5026 __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5027 __ B(gt, false_label);
5028 // Check for undetectable objects => false.
5029 __ Ldrb(scratch, FieldMemOperand(value, Map::kBitFieldOffset));
5030 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5031
5032 } else {
5033 __ B(false_label);
5034 }
5035 }
5036
5037
5038 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
5039 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
5040 }
5041
5042
5043 void LCodeGen::DoValueOf(LValueOf* instr) {
5044 Register input = ToRegister(instr->value());
5045 Register result = ToRegister(instr->result());
5046 Register scratch = ToRegister(instr->temp());
5047 Label done;
5048
5049 ASSERT(input.Is(result));
5050
5051 // If the object is a smi return it.
5052 __ JumpIfSmi(input, &done);
5053
5054 // If the object is not a value type, return the object, otherwise
5055 // return the value.
5056 __ JumpIfNotObjectType(input, scratch, scratch, JS_VALUE_TYPE, &done);
5057 __ Ldr(result, FieldMemOperand(input, JSValue::kValueOffset));
5058
5059 __ Bind(&done);
5060 }
5061
5062
5063 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5064 Register object = ToRegister(instr->value());
5065 Register map = ToRegister(instr->map());
5066 Register temp = ToRegister(instr->temp());
5067 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
5068 __ Cmp(map, temp);
5069 DeoptimizeIf(ne, instr->environment());
5070 }
5071
5072
5073 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
5074 Register receiver = ToRegister(instr->receiver());
5075 Register function = ToRegister(instr->function());
5076 Register result = ToRegister(instr->result());
5077 Register temp = ToRegister(instr->temp());
5078
5079 // If the receiver is null or undefined, we have to pass the global object as
5080 // a receiver to normal functions. Values have to be passed unchanged to
5081 // builtins and strict-mode functions.
5082 Label global_object, done, deopt;
5083
5084 // Do not transform the receiver to object for strict mode functions.
5085 __ Ldr(temp, FieldMemOperand(function,
5086 JSFunction::kSharedFunctionInfoOffset));
5087 __ Ldr(temp,
5088 UntagSmiFieldMemOperand(temp,
5089 SharedFunctionInfo::kCompilerHintsOffset));
5090 __ Tbnz(temp, SharedFunctionInfo::kStrictModeFunction, &done);
5091
5092 // Do not transform the receiver to object for builtins.
5093 __ Tbnz(temp, SharedFunctionInfo::kNative, &done);
5094
5095 // Normal function. Replace undefined or null with global receiver.
5096 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
5097 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
5098
5099 // Deoptimize if the receiver is not a JS object.
5100 __ JumpIfSmi(receiver, &deopt);
5101 __ CompareObjectType(receiver, temp, temp, FIRST_SPEC_OBJECT_TYPE);
5102 __ B(ge, &done);
5103 // Otherwise, fall through to deopt.
5104
5105 __ Bind(&deopt);
5106 Deoptimize(instr->environment());
5107
5108 __ Bind(&global_object);
5109 // We could load directly into the result register here, but the additional
5110 // branches required are likely to be more time consuming than one additional
5111 // move.
5112 __ Ldr(receiver, GlobalObjectMemOperand());
5113 __ Ldr(receiver, FieldMemOperand(receiver,
5114 JSGlobalObject::kGlobalReceiverOffset));
5115 __ Bind(&done);
5116
5117 __ Mov(result, receiver);
5118 }
5119
5120
5121 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5122 Register object = ToRegister(instr->object());
5123 Register index = ToRegister(instr->index());
5124 Register result = ToRegister(instr->result());
5125
5126 __ AssertSmi(index);
5127
5128 Label out_of_object, done;
5129 __ Cmp(index, Operand(Smi::FromInt(0)));
5130 __ B(lt, &out_of_object);
5131
5132 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
5133 __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
5134 __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize));
5135
5136 __ B(&done);
5137
5138 __ Bind(&out_of_object);
5139 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5140 // Index is equal to negated out of object property index plus 1.
5141 __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
5142 __ Ldr(result, FieldMemOperand(result,
5143 FixedArray::kHeaderSize - kPointerSize));
5144 __ Bind(&done);
5145 }
5146
5147 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/a64/lithium-codegen-a64.h ('k') | src/a64/lithium-gap-resolver-a64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698