Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(17)

Side by Side Diff: src/a64/lithium-codegen-a64.cc

Issue 181453002: Reset trunk to 3.24.35.4 (Closed) Base URL: https://v8.googlecode.com/svn/trunk
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/a64/lithium-codegen-a64.h ('k') | src/a64/lithium-gap-resolver-a64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "a64/lithium-codegen-a64.h"
31 #include "a64/lithium-gap-resolver-a64.h"
32 #include "code-stubs.h"
33 #include "stub-cache.h"
34 #include "hydrogen-osr.h"
35
36 namespace v8 {
37 namespace internal {
38
39
40 class SafepointGenerator V8_FINAL : public CallWrapper {
41 public:
42 SafepointGenerator(LCodeGen* codegen,
43 LPointerMap* pointers,
44 Safepoint::DeoptMode mode)
45 : codegen_(codegen),
46 pointers_(pointers),
47 deopt_mode_(mode) { }
48 virtual ~SafepointGenerator() { }
49
50 virtual void BeforeCall(int call_size) const { }
51
52 virtual void AfterCall() const {
53 codegen_->RecordSafepoint(pointers_, deopt_mode_);
54 }
55
56 private:
57 LCodeGen* codegen_;
58 LPointerMap* pointers_;
59 Safepoint::DeoptMode deopt_mode_;
60 };
61
62
63 #define __ masm()->
64
65 // Emit code to branch if the given condition holds.
66 // The code generated here doesn't modify the flags and they must have
67 // been set by some prior instructions.
68 //
69 // The EmitInverted function simply inverts the condition.
70 class BranchOnCondition : public BranchGenerator {
71 public:
72 BranchOnCondition(LCodeGen* codegen, Condition cond)
73 : BranchGenerator(codegen),
74 cond_(cond) { }
75
76 virtual void Emit(Label* label) const {
77 __ B(cond_, label);
78 }
79
80 virtual void EmitInverted(Label* label) const {
81 if (cond_ != al) {
82 __ B(InvertCondition(cond_), label);
83 }
84 }
85
86 private:
87 Condition cond_;
88 };
89
90
91 // Emit code to compare lhs and rhs and branch if the condition holds.
92 // This uses MacroAssembler's CompareAndBranch function so it will handle
93 // converting the comparison to Cbz/Cbnz if the right-hand side is 0.
94 //
95 // EmitInverted still compares the two operands but inverts the condition.
96 class CompareAndBranch : public BranchGenerator {
97 public:
98 CompareAndBranch(LCodeGen* codegen,
99 Condition cond,
100 const Register& lhs,
101 const Operand& rhs)
102 : BranchGenerator(codegen),
103 cond_(cond),
104 lhs_(lhs),
105 rhs_(rhs) { }
106
107 virtual void Emit(Label* label) const {
108 __ CompareAndBranch(lhs_, rhs_, cond_, label);
109 }
110
111 virtual void EmitInverted(Label* label) const {
112 __ CompareAndBranch(lhs_, rhs_, InvertCondition(cond_), label);
113 }
114
115 private:
116 Condition cond_;
117 const Register& lhs_;
118 const Operand& rhs_;
119 };
120
121
122 // Test the input with the given mask and branch if the condition holds.
123 // If the condition is 'eq' or 'ne' this will use MacroAssembler's
124 // TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the
125 // conversion to Tbz/Tbnz when possible.
126 class TestAndBranch : public BranchGenerator {
127 public:
128 TestAndBranch(LCodeGen* codegen,
129 Condition cond,
130 const Register& value,
131 uint64_t mask)
132 : BranchGenerator(codegen),
133 cond_(cond),
134 value_(value),
135 mask_(mask) { }
136
137 virtual void Emit(Label* label) const {
138 switch (cond_) {
139 case eq:
140 __ TestAndBranchIfAllClear(value_, mask_, label);
141 break;
142 case ne:
143 __ TestAndBranchIfAnySet(value_, mask_, label);
144 break;
145 default:
146 __ Tst(value_, mask_);
147 __ B(cond_, label);
148 }
149 }
150
151 virtual void EmitInverted(Label* label) const {
152 // The inverse of "all clear" is "any set" and vice versa.
153 switch (cond_) {
154 case eq:
155 __ TestAndBranchIfAnySet(value_, mask_, label);
156 break;
157 case ne:
158 __ TestAndBranchIfAllClear(value_, mask_, label);
159 break;
160 default:
161 __ Tst(value_, mask_);
162 __ B(InvertCondition(cond_), label);
163 }
164 }
165
166 private:
167 Condition cond_;
168 const Register& value_;
169 uint64_t mask_;
170 };
171
172
173 // Test the input and branch if it is non-zero and not a NaN.
174 class BranchIfNonZeroNumber : public BranchGenerator {
175 public:
176 BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value,
177 const FPRegister& scratch)
178 : BranchGenerator(codegen), value_(value), scratch_(scratch) { }
179
180 virtual void Emit(Label* label) const {
181 __ Fabs(scratch_, value_);
182 // Compare with 0.0. Because scratch_ is positive, the result can be one of
183 // nZCv (equal), nzCv (greater) or nzCV (unordered).
184 __ Fcmp(scratch_, 0.0);
185 __ B(gt, label);
186 }
187
188 virtual void EmitInverted(Label* label) const {
189 __ Fabs(scratch_, value_);
190 __ Fcmp(scratch_, 0.0);
191 __ B(le, label);
192 }
193
194 private:
195 const FPRegister& value_;
196 const FPRegister& scratch_;
197 };
198
199
200 // Test the input and branch if it is a heap number.
201 class BranchIfHeapNumber : public BranchGenerator {
202 public:
203 BranchIfHeapNumber(LCodeGen* codegen, const Register& value)
204 : BranchGenerator(codegen), value_(value) { }
205
206 virtual void Emit(Label* label) const {
207 __ JumpIfHeapNumber(value_, label);
208 }
209
210 virtual void EmitInverted(Label* label) const {
211 __ JumpIfNotHeapNumber(value_, label);
212 }
213
214 private:
215 const Register& value_;
216 };
217
218
219 // Test the input and branch if it is the specified root value.
220 class BranchIfRoot : public BranchGenerator {
221 public:
222 BranchIfRoot(LCodeGen* codegen, const Register& value,
223 Heap::RootListIndex index)
224 : BranchGenerator(codegen), value_(value), index_(index) { }
225
226 virtual void Emit(Label* label) const {
227 __ JumpIfRoot(value_, index_, label);
228 }
229
230 virtual void EmitInverted(Label* label) const {
231 __ JumpIfNotRoot(value_, index_, label);
232 }
233
234 private:
235 const Register& value_;
236 const Heap::RootListIndex index_;
237 };
238
239
240 void LCodeGen::WriteTranslation(LEnvironment* environment,
241 Translation* translation) {
242 if (environment == NULL) return;
243
244 // The translation includes one command per value in the environment.
245 int translation_size = environment->translation_size();
246 // The output frame height does not include the parameters.
247 int height = translation_size - environment->parameter_count();
248
249 WriteTranslation(environment->outer(), translation);
250 bool has_closure_id = !info()->closure().is_null() &&
251 !info()->closure().is_identical_to(environment->closure());
252 int closure_id = has_closure_id
253 ? DefineDeoptimizationLiteral(environment->closure())
254 : Translation::kSelfLiteralId;
255
256 switch (environment->frame_type()) {
257 case JS_FUNCTION:
258 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
259 break;
260 case JS_CONSTRUCT:
261 translation->BeginConstructStubFrame(closure_id, translation_size);
262 break;
263 case JS_GETTER:
264 ASSERT(translation_size == 1);
265 ASSERT(height == 0);
266 translation->BeginGetterStubFrame(closure_id);
267 break;
268 case JS_SETTER:
269 ASSERT(translation_size == 2);
270 ASSERT(height == 0);
271 translation->BeginSetterStubFrame(closure_id);
272 break;
273 case STUB:
274 translation->BeginCompiledStubFrame();
275 break;
276 case ARGUMENTS_ADAPTOR:
277 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
278 break;
279 default:
280 UNREACHABLE();
281 }
282
283 int object_index = 0;
284 int dematerialized_index = 0;
285 for (int i = 0; i < translation_size; ++i) {
286 LOperand* value = environment->values()->at(i);
287
288 AddToTranslation(environment,
289 translation,
290 value,
291 environment->HasTaggedValueAt(i),
292 environment->HasUint32ValueAt(i),
293 &object_index,
294 &dematerialized_index);
295 }
296 }
297
298
299 void LCodeGen::AddToTranslation(LEnvironment* environment,
300 Translation* translation,
301 LOperand* op,
302 bool is_tagged,
303 bool is_uint32,
304 int* object_index_pointer,
305 int* dematerialized_index_pointer) {
306 if (op == LEnvironment::materialization_marker()) {
307 int object_index = (*object_index_pointer)++;
308 if (environment->ObjectIsDuplicateAt(object_index)) {
309 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
310 translation->DuplicateObject(dupe_of);
311 return;
312 }
313 int object_length = environment->ObjectLengthAt(object_index);
314 if (environment->ObjectIsArgumentsAt(object_index)) {
315 translation->BeginArgumentsObject(object_length);
316 } else {
317 translation->BeginCapturedObject(object_length);
318 }
319 int dematerialized_index = *dematerialized_index_pointer;
320 int env_offset = environment->translation_size() + dematerialized_index;
321 *dematerialized_index_pointer += object_length;
322 for (int i = 0; i < object_length; ++i) {
323 LOperand* value = environment->values()->at(env_offset + i);
324 AddToTranslation(environment,
325 translation,
326 value,
327 environment->HasTaggedValueAt(env_offset + i),
328 environment->HasUint32ValueAt(env_offset + i),
329 object_index_pointer,
330 dematerialized_index_pointer);
331 }
332 return;
333 }
334
335 if (op->IsStackSlot()) {
336 if (is_tagged) {
337 translation->StoreStackSlot(op->index());
338 } else if (is_uint32) {
339 translation->StoreUint32StackSlot(op->index());
340 } else {
341 translation->StoreInt32StackSlot(op->index());
342 }
343 } else if (op->IsDoubleStackSlot()) {
344 translation->StoreDoubleStackSlot(op->index());
345 } else if (op->IsArgument()) {
346 ASSERT(is_tagged);
347 int src_index = GetStackSlotCount() + op->index();
348 translation->StoreStackSlot(src_index);
349 } else if (op->IsRegister()) {
350 Register reg = ToRegister(op);
351 if (is_tagged) {
352 translation->StoreRegister(reg);
353 } else if (is_uint32) {
354 translation->StoreUint32Register(reg);
355 } else {
356 translation->StoreInt32Register(reg);
357 }
358 } else if (op->IsDoubleRegister()) {
359 DoubleRegister reg = ToDoubleRegister(op);
360 translation->StoreDoubleRegister(reg);
361 } else if (op->IsConstantOperand()) {
362 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
363 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
364 translation->StoreLiteral(src_index);
365 } else {
366 UNREACHABLE();
367 }
368 }
369
370
371 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
372 int result = deoptimization_literals_.length();
373 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
374 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
375 }
376 deoptimization_literals_.Add(literal, zone());
377 return result;
378 }
379
380
381 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
382 Safepoint::DeoptMode mode) {
383 if (!environment->HasBeenRegistered()) {
384 int frame_count = 0;
385 int jsframe_count = 0;
386 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
387 ++frame_count;
388 if (e->frame_type() == JS_FUNCTION) {
389 ++jsframe_count;
390 }
391 }
392 Translation translation(&translations_, frame_count, jsframe_count, zone());
393 WriteTranslation(environment, &translation);
394 int deoptimization_index = deoptimizations_.length();
395 int pc_offset = masm()->pc_offset();
396 environment->Register(deoptimization_index,
397 translation.index(),
398 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
399 deoptimizations_.Add(environment, zone());
400 }
401 }
402
403
404 void LCodeGen::CallCode(Handle<Code> code,
405 RelocInfo::Mode mode,
406 LInstruction* instr) {
407 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
408 }
409
410
411 void LCodeGen::CallCodeGeneric(Handle<Code> code,
412 RelocInfo::Mode mode,
413 LInstruction* instr,
414 SafepointMode safepoint_mode) {
415 ASSERT(instr != NULL);
416
417 Assembler::BlockConstPoolScope scope(masm_);
418 __ Call(code, mode);
419 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
420
421 if ((code->kind() == Code::BINARY_OP_IC) ||
422 (code->kind() == Code::COMPARE_IC)) {
423 // Signal that we don't inline smi code before these stubs in the
424 // optimizing code generator.
425 InlineSmiCheckInfo::EmitNotInlined(masm());
426 }
427 }
428
429
430 void LCodeGen::DoCallFunction(LCallFunction* instr) {
431 ASSERT(ToRegister(instr->context()).is(cp));
432 ASSERT(ToRegister(instr->function()).Is(x1));
433 ASSERT(ToRegister(instr->result()).Is(x0));
434
435 int arity = instr->arity();
436 CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
437 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
438 }
439
440
441 void LCodeGen::DoCallNew(LCallNew* instr) {
442 ASSERT(ToRegister(instr->context()).is(cp));
443 ASSERT(instr->IsMarkedAsCall());
444 ASSERT(ToRegister(instr->constructor()).is(x1));
445
446 __ Mov(x0, instr->arity());
447 // No cell in x2 for construct type feedback in optimized code.
448 Handle<Object> undefined_value(isolate()->factory()->undefined_value());
449 __ Mov(x2, Operand(undefined_value));
450
451 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
452 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
453
454 ASSERT(ToRegister(instr->result()).is(x0));
455 }
456
457
458 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
459 ASSERT(instr->IsMarkedAsCall());
460 ASSERT(ToRegister(instr->context()).is(cp));
461 ASSERT(ToRegister(instr->constructor()).is(x1));
462
463 __ Mov(x0, Operand(instr->arity()));
464 __ Mov(x2, Operand(factory()->undefined_value()));
465
466 ElementsKind kind = instr->hydrogen()->elements_kind();
467 AllocationSiteOverrideMode override_mode =
468 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
469 ? DISABLE_ALLOCATION_SITES
470 : DONT_OVERRIDE;
471
472 if (instr->arity() == 0) {
473 ArrayNoArgumentConstructorStub stub(kind, override_mode);
474 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
475 } else if (instr->arity() == 1) {
476 Label done;
477 if (IsFastPackedElementsKind(kind)) {
478 Label packed_case;
479
480 // We might need to create a holey array; look at the first argument.
481 __ Peek(x10, 0);
482 __ Cbz(x10, &packed_case);
483
484 ElementsKind holey_kind = GetHoleyElementsKind(kind);
485 ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
486 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
487 __ B(&done);
488 __ Bind(&packed_case);
489 }
490
491 ArraySingleArgumentConstructorStub stub(kind, override_mode);
492 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
493 __ Bind(&done);
494 } else {
495 ArrayNArgumentsConstructorStub stub(kind, override_mode);
496 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
497 }
498
499 ASSERT(ToRegister(instr->result()).is(x0));
500 }
501
502
503 void LCodeGen::CallRuntime(const Runtime::Function* function,
504 int num_arguments,
505 LInstruction* instr,
506 SaveFPRegsMode save_doubles) {
507 ASSERT(instr != NULL);
508
509 __ CallRuntime(function, num_arguments, save_doubles);
510
511 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
512 }
513
514
515 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
516 if (context->IsRegister()) {
517 __ Mov(cp, ToRegister(context));
518 } else if (context->IsStackSlot()) {
519 __ Ldr(cp, ToMemOperand(context));
520 } else if (context->IsConstantOperand()) {
521 HConstant* constant =
522 chunk_->LookupConstant(LConstantOperand::cast(context));
523 __ LoadHeapObject(cp,
524 Handle<HeapObject>::cast(constant->handle(isolate())));
525 } else {
526 UNREACHABLE();
527 }
528 }
529
530
531 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
532 int argc,
533 LInstruction* instr,
534 LOperand* context) {
535 LoadContextFromDeferred(context);
536 __ CallRuntimeSaveDoubles(id);
537 RecordSafepointWithRegisters(
538 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
539 }
540
541
542 void LCodeGen::RecordAndWritePosition(int position) {
543 if (position == RelocInfo::kNoPosition) return;
544 masm()->positions_recorder()->RecordPosition(position);
545 masm()->positions_recorder()->WriteRecordedPositions();
546 }
547
548
549 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
550 SafepointMode safepoint_mode) {
551 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
552 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
553 } else {
554 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
555 RecordSafepointWithRegisters(
556 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
557 }
558 }
559
560
561 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
562 Safepoint::Kind kind,
563 int arguments,
564 Safepoint::DeoptMode deopt_mode) {
565 ASSERT(expected_safepoint_kind_ == kind);
566
567 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
568 Safepoint safepoint = safepoints_.DefineSafepoint(
569 masm(), kind, arguments, deopt_mode);
570
571 for (int i = 0; i < operands->length(); i++) {
572 LOperand* pointer = operands->at(i);
573 if (pointer->IsStackSlot()) {
574 safepoint.DefinePointerSlot(pointer->index(), zone());
575 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
576 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
577 }
578 }
579
580 if (kind & Safepoint::kWithRegisters) {
581 // Register cp always contains a pointer to the context.
582 safepoint.DefinePointerRegister(cp, zone());
583 }
584 }
585
586 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
587 Safepoint::DeoptMode deopt_mode) {
588 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
589 }
590
591
592 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
593 LPointerMap empty_pointers(zone());
594 RecordSafepoint(&empty_pointers, deopt_mode);
595 }
596
597
598 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
599 int arguments,
600 Safepoint::DeoptMode deopt_mode) {
601 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
602 }
603
604
605 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
606 LPointerMap* pointers, int arguments, Safepoint::DeoptMode deopt_mode) {
607 RecordSafepoint(
608 pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
609 }
610
611
612 bool LCodeGen::GenerateCode() {
613 LPhase phase("Z_Code generation", chunk());
614 ASSERT(is_unused());
615 status_ = GENERATING;
616
617 // Open a frame scope to indicate that there is a frame on the stack. The
618 // NONE indicates that the scope shouldn't actually generate code to set up
619 // the frame (that is done in GeneratePrologue).
620 FrameScope frame_scope(masm_, StackFrame::NONE);
621
622 return GeneratePrologue() &&
623 GenerateBody() &&
624 GenerateDeferredCode() &&
625 GenerateDeoptJumpTable() &&
626 GenerateSafepointTable();
627 }
628
629
630 void LCodeGen::SaveCallerDoubles() {
631 ASSERT(info()->saves_caller_doubles());
632 ASSERT(NeedsEagerFrame());
633 Comment(";;; Save clobbered callee double registers");
634 BitVector* doubles = chunk()->allocated_double_registers();
635 BitVector::Iterator iterator(doubles);
636 int count = 0;
637 while (!iterator.Done()) {
638 // TODO(all): Is this supposed to save just the callee-saved doubles? It
639 // looks like it's saving all of them.
640 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
641 __ Poke(value, count * kDoubleSize);
642 iterator.Advance();
643 count++;
644 }
645 }
646
647
648 void LCodeGen::RestoreCallerDoubles() {
649 ASSERT(info()->saves_caller_doubles());
650 ASSERT(NeedsEagerFrame());
651 Comment(";;; Restore clobbered callee double registers");
652 BitVector* doubles = chunk()->allocated_double_registers();
653 BitVector::Iterator iterator(doubles);
654 int count = 0;
655 while (!iterator.Done()) {
656 // TODO(all): Is this supposed to restore just the callee-saved doubles? It
657 // looks like it's restoring all of them.
658 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
659 __ Peek(value, count * kDoubleSize);
660 iterator.Advance();
661 count++;
662 }
663 }
664
665
666 bool LCodeGen::GeneratePrologue() {
667 ASSERT(is_generating());
668
669 if (info()->IsOptimizing()) {
670 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
671
672 // TODO(all): Add support for stop_t FLAG in DEBUG mode.
673
674 // Classic mode functions and builtins need to replace the receiver with the
675 // global proxy when called as functions (without an explicit receiver
676 // object).
677 if (info_->this_has_uses() &&
678 info_->is_classic_mode() &&
679 !info_->is_native()) {
680 Label ok;
681 int receiver_offset = info_->scope()->num_parameters() * kXRegSizeInBytes;
682 __ Peek(x10, receiver_offset);
683 __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
684
685 __ Ldr(x10, GlobalObjectMemOperand());
686 __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
687 __ Poke(x10, receiver_offset);
688
689 __ Bind(&ok);
690 }
691 }
692
693 ASSERT(__ StackPointer().Is(jssp));
694 info()->set_prologue_offset(masm_->pc_offset());
695 if (NeedsEagerFrame()) {
696 __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
697 frame_is_built_ = true;
698 info_->AddNoFrameRange(0, masm_->pc_offset());
699 }
700
701 // Reserve space for the stack slots needed by the code.
702 int slots = GetStackSlotCount();
703 if (slots > 0) {
704 __ Claim(slots, kPointerSize);
705 }
706
707 if (info()->saves_caller_doubles()) {
708 SaveCallerDoubles();
709 }
710
711 // Allocate a local context if needed.
712 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
713 if (heap_slots > 0) {
714 Comment(";;; Allocate local context");
715 // Argument to NewContext is the function, which is in x1.
716 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
717 FastNewContextStub stub(heap_slots);
718 __ CallStub(&stub);
719 } else {
720 __ Push(x1);
721 __ CallRuntime(Runtime::kNewFunctionContext, 1);
722 }
723 RecordSafepoint(Safepoint::kNoLazyDeopt);
724 // Context is returned in x0. It replaces the context passed to us. It's
725 // saved in the stack and kept live in cp.
726 __ Mov(cp, x0);
727 __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
728 // Copy any necessary parameters into the context.
729 int num_parameters = scope()->num_parameters();
730 for (int i = 0; i < num_parameters; i++) {
731 Variable* var = scope()->parameter(i);
732 if (var->IsContextSlot()) {
733 Register value = x0;
734 Register scratch = x3;
735
736 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
737 (num_parameters - 1 - i) * kPointerSize;
738 // Load parameter from stack.
739 __ Ldr(value, MemOperand(fp, parameter_offset));
740 // Store it in the context.
741 MemOperand target = ContextMemOperand(cp, var->index());
742 __ Str(value, target);
743 // Update the write barrier. This clobbers value and scratch.
744 __ RecordWriteContextSlot(cp, target.offset(), value, scratch,
745 GetLinkRegisterState(), kSaveFPRegs);
746 }
747 }
748 Comment(";;; End allocate local context");
749 }
750
751 // Trace the call.
752 if (FLAG_trace && info()->IsOptimizing()) {
753 // We have not executed any compiled code yet, so cp still holds the
754 // incoming context.
755 __ CallRuntime(Runtime::kTraceEnter, 0);
756 }
757
758 return !is_aborted();
759 }
760
761
762 void LCodeGen::GenerateOsrPrologue() {
763 // Generate the OSR entry prologue at the first unknown OSR value, or if there
764 // are none, at the OSR entrypoint instruction.
765 if (osr_pc_offset_ >= 0) return;
766
767 osr_pc_offset_ = masm()->pc_offset();
768
769 // Adjust the frame size, subsuming the unoptimized frame into the
770 // optimized frame.
771 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
772 ASSERT(slots >= 0);
773 __ Claim(slots);
774 }
775
776
777 bool LCodeGen::GenerateDeferredCode() {
778 ASSERT(is_generating());
779 if (deferred_.length() > 0) {
780 for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
781 LDeferredCode* code = deferred_[i];
782
783 HValue* value =
784 instructions_->at(code->instruction_index())->hydrogen_value();
785 RecordAndWritePosition(
786 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
787
788 Comment(";;; <@%d,#%d> "
789 "-------------------- Deferred %s --------------------",
790 code->instruction_index(),
791 code->instr()->hydrogen_value()->id(),
792 code->instr()->Mnemonic());
793
794 __ Bind(code->entry());
795
796 if (NeedsDeferredFrame()) {
797 Comment(";;; Build frame");
798 ASSERT(!frame_is_built_);
799 ASSERT(info()->IsStub());
800 frame_is_built_ = true;
801 __ Push(lr, fp, cp);
802 __ Mov(fp, Operand(Smi::FromInt(StackFrame::STUB)));
803 __ Push(fp);
804 __ Add(fp, __ StackPointer(),
805 StandardFrameConstants::kFixedFrameSizeFromFp);
806 Comment(";;; Deferred code");
807 }
808
809 code->Generate();
810
811 if (NeedsDeferredFrame()) {
812 Comment(";;; Destroy frame");
813 ASSERT(frame_is_built_);
814 __ Pop(xzr, cp, fp, lr);
815 frame_is_built_ = false;
816 }
817
818 __ B(code->exit());
819 }
820 }
821
822 // Force constant pool emission at the end of the deferred code to make
823 // sure that no constant pools are emitted after deferred code because
824 // deferred code generation is the last step which generates code. The two
825 // following steps will only output data used by crakshaft.
826 masm()->CheckConstPool(true, false);
827
828 return !is_aborted();
829 }
830
831
832 bool LCodeGen::GenerateDeoptJumpTable() {
833 if (deopt_jump_table_.length() > 0) {
834 Comment(";;; -------------------- Jump table --------------------");
835 }
836 Label table_start;
837 __ bind(&table_start);
838 Label needs_frame;
839 for (int i = 0; i < deopt_jump_table_.length(); i++) {
840 __ Bind(&deopt_jump_table_[i].label);
841 Address entry = deopt_jump_table_[i].address;
842 Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
843 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
844 if (id == Deoptimizer::kNotDeoptimizationEntry) {
845 Comment(";;; jump table entry %d.", i);
846 } else {
847 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
848 }
849 if (deopt_jump_table_[i].needs_frame) {
850 ASSERT(!info()->saves_caller_doubles());
851 __ Mov(__ Tmp0(), Operand(ExternalReference::ForDeoptEntry(entry)));
852 if (needs_frame.is_bound()) {
853 __ B(&needs_frame);
854 } else {
855 __ Bind(&needs_frame);
856 // This variant of deopt can only be used with stubs. Since we don't
857 // have a function pointer to install in the stack frame that we're
858 // building, install a special marker there instead.
859 // TODO(jochen): Revisit the use of TmpX().
860 ASSERT(info()->IsStub());
861 __ Mov(__ Tmp1(), Operand(Smi::FromInt(StackFrame::STUB)));
862 __ Push(lr, fp, cp, __ Tmp1());
863 __ Add(fp, __ StackPointer(), 2 * kPointerSize);
864 __ Call(__ Tmp0());
865 }
866 } else {
867 if (info()->saves_caller_doubles()) {
868 ASSERT(info()->IsStub());
869 RestoreCallerDoubles();
870 }
871 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
872 }
873 masm()->CheckConstPool(false, false);
874 }
875
876 // Force constant pool emission at the end of the deopt jump table to make
877 // sure that no constant pools are emitted after.
878 masm()->CheckConstPool(true, false);
879
880 // The deoptimization jump table is the last part of the instruction
881 // sequence. Mark the generated code as done unless we bailed out.
882 if (!is_aborted()) status_ = DONE;
883 return !is_aborted();
884 }
885
886
887 bool LCodeGen::GenerateSafepointTable() {
888 ASSERT(is_done());
889 safepoints_.Emit(masm(), GetStackSlotCount());
890 return !is_aborted();
891 }
892
893
894 void LCodeGen::FinishCode(Handle<Code> code) {
895 ASSERT(is_done());
896 code->set_stack_slots(GetStackSlotCount());
897 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
898 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
899 PopulateDeoptimizationData(code);
900 info()->CommitDependencies(code);
901 }
902
903
904 void LCodeGen::Abort(BailoutReason reason) {
905 info()->set_bailout_reason(reason);
906 status_ = ABORTED;
907 }
908
909
910 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
911 int length = deoptimizations_.length();
912 if (length == 0) return;
913
914 Handle<DeoptimizationInputData> data =
915 factory()->NewDeoptimizationInputData(length, TENURED);
916
917 Handle<ByteArray> translations =
918 translations_.CreateByteArray(isolate()->factory());
919 data->SetTranslationByteArray(*translations);
920 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
921 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
922
923 Handle<FixedArray> literals =
924 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
925 { AllowDeferredHandleDereference copy_handles;
926 for (int i = 0; i < deoptimization_literals_.length(); i++) {
927 literals->set(i, *deoptimization_literals_[i]);
928 }
929 data->SetLiteralArray(*literals);
930 }
931
932 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
933 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
934
935 // Populate the deoptimization entries.
936 for (int i = 0; i < length; i++) {
937 LEnvironment* env = deoptimizations_[i];
938 data->SetAstId(i, env->ast_id());
939 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
940 data->SetArgumentsStackHeight(i,
941 Smi::FromInt(env->arguments_stack_height()));
942 data->SetPc(i, Smi::FromInt(env->pc_offset()));
943 }
944
945 code->set_deoptimization_data(*data);
946 }
947
948
949 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
950 ASSERT(deoptimization_literals_.length() == 0);
951
952 const ZoneList<Handle<JSFunction> >* inlined_closures =
953 chunk()->inlined_closures();
954
955 for (int i = 0, length = inlined_closures->length(); i < length; i++) {
956 DefineDeoptimizationLiteral(inlined_closures->at(i));
957 }
958
959 inlined_function_count_ = deoptimization_literals_.length();
960 }
961
962
963 void LCodeGen::DeoptimizeBranch(
964 LEnvironment* environment,
965 BranchType branch_type, Register reg, int bit,
966 Deoptimizer::BailoutType* override_bailout_type) {
967 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
968 Deoptimizer::BailoutType bailout_type =
969 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
970
971 if (override_bailout_type != NULL) {
972 bailout_type = *override_bailout_type;
973 }
974
975 ASSERT(environment->HasBeenRegistered());
976 ASSERT(info()->IsOptimizing() || info()->IsStub());
977 int id = environment->deoptimization_index();
978 Address entry =
979 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
980
981 if (entry == NULL) {
982 Abort(kBailoutWasNotPrepared);
983 }
984
985 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
986 Label not_zero;
987 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
988
989 __ Push(x0, x1, x2);
990 __ Mrs(x2, NZCV);
991 __ Mov(x0, Operand(count));
992 __ Ldr(w1, MemOperand(x0));
993 __ Subs(x1, x1, 1);
994 __ B(gt, &not_zero);
995 __ Mov(w1, FLAG_deopt_every_n_times);
996 __ Str(w1, MemOperand(x0));
997 __ Pop(x0, x1, x2);
998 ASSERT(frame_is_built_);
999 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1000 __ Unreachable();
1001
1002 __ Bind(&not_zero);
1003 __ Str(w1, MemOperand(x0));
1004 __ Msr(NZCV, x2);
1005 __ Pop(x0, x1, x2);
1006 }
1007
1008 if (info()->ShouldTrapOnDeopt()) {
1009 Label dont_trap;
1010 __ B(&dont_trap, InvertBranchType(branch_type), reg, bit);
1011 __ Debug("trap_on_deopt", __LINE__, BREAK);
1012 __ Bind(&dont_trap);
1013 }
1014
1015 ASSERT(info()->IsStub() || frame_is_built_);
1016 // Go through jump table if we need to build frame, or restore caller doubles.
1017 if (frame_is_built_ && !info()->saves_caller_doubles()) {
1018 Label dont_deopt;
1019 __ B(&dont_deopt, InvertBranchType(branch_type), reg, bit);
1020 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1021 __ Bind(&dont_deopt);
1022 } else {
1023 // We often have several deopts to the same entry, reuse the last
1024 // jump entry if this is the case.
1025 if (deopt_jump_table_.is_empty() ||
1026 (deopt_jump_table_.last().address != entry) ||
1027 (deopt_jump_table_.last().bailout_type != bailout_type) ||
1028 (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
1029 Deoptimizer::JumpTableEntry table_entry(entry,
1030 bailout_type,
1031 !frame_is_built_);
1032 deopt_jump_table_.Add(table_entry, zone());
1033 }
1034 __ B(&deopt_jump_table_.last().label,
1035 branch_type, reg, bit);
1036 }
1037 }
1038
1039
1040 void LCodeGen::Deoptimize(LEnvironment* environment,
1041 Deoptimizer::BailoutType* override_bailout_type) {
1042 DeoptimizeBranch(environment, always, NoReg, -1, override_bailout_type);
1043 }
1044
1045
1046 void LCodeGen::DeoptimizeIf(Condition cond, LEnvironment* environment) {
1047 DeoptimizeBranch(environment, static_cast<BranchType>(cond));
1048 }
1049
1050
1051 void LCodeGen::DeoptimizeIfZero(Register rt, LEnvironment* environment) {
1052 DeoptimizeBranch(environment, reg_zero, rt);
1053 }
1054
1055
1056 void LCodeGen::DeoptimizeIfNegative(Register rt, LEnvironment* environment) {
1057 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
1058 DeoptimizeBranch(environment, reg_bit_set, rt, sign_bit);
1059 }
1060
1061
1062 void LCodeGen::DeoptimizeIfSmi(Register rt,
1063 LEnvironment* environment) {
1064 DeoptimizeBranch(environment, reg_bit_clear, rt, MaskToBit(kSmiTagMask));
1065 }
1066
1067
1068 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LEnvironment* environment) {
1069 DeoptimizeBranch(environment, reg_bit_set, rt, MaskToBit(kSmiTagMask));
1070 }
1071
1072
1073 void LCodeGen::DeoptimizeIfRoot(Register rt,
1074 Heap::RootListIndex index,
1075 LEnvironment* environment) {
1076 __ CompareRoot(rt, index);
1077 DeoptimizeIf(eq, environment);
1078 }
1079
1080
1081 void LCodeGen::DeoptimizeIfNotRoot(Register rt,
1082 Heap::RootListIndex index,
1083 LEnvironment* environment) {
1084 __ CompareRoot(rt, index);
1085 DeoptimizeIf(ne, environment);
1086 }
1087
1088
1089 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
1090 if (!info()->IsStub()) {
1091 // Ensure that we have enough space after the previous lazy-bailout
1092 // instruction for patching the code here.
1093 intptr_t current_pc = masm()->pc_offset();
1094
1095 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
1096 ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1097 ASSERT((padding_size % kInstructionSize) == 0);
1098 InstructionAccurateScope instruction_accurate(
1099 masm(), padding_size / kInstructionSize);
1100
1101 while (padding_size > 0) {
1102 __ nop();
1103 padding_size -= kInstructionSize;
1104 }
1105 }
1106 }
1107 last_lazy_deopt_pc_ = masm()->pc_offset();
1108 }
1109
1110
1111 Register LCodeGen::ToRegister(LOperand* op) const {
1112 // TODO(all): support zero register results, as ToRegister32.
1113 ASSERT((op != NULL) && op->IsRegister());
1114 return Register::FromAllocationIndex(op->index());
1115 }
1116
1117
1118 Register LCodeGen::ToRegister32(LOperand* op) const {
1119 ASSERT(op != NULL);
1120 if (op->IsConstantOperand()) {
1121 // If this is a constant operand, the result must be the zero register.
1122 ASSERT(ToInteger32(LConstantOperand::cast(op)) == 0);
1123 return wzr;
1124 } else {
1125 return ToRegister(op).W();
1126 }
1127 }
1128
1129
1130 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
1131 HConstant* constant = chunk_->LookupConstant(op);
1132 return Smi::FromInt(constant->Integer32Value());
1133 }
1134
1135
1136 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
1137 ASSERT((op != NULL) && op->IsDoubleRegister());
1138 return DoubleRegister::FromAllocationIndex(op->index());
1139 }
1140
1141
1142 Operand LCodeGen::ToOperand(LOperand* op) {
1143 ASSERT(op != NULL);
1144 if (op->IsConstantOperand()) {
1145 LConstantOperand* const_op = LConstantOperand::cast(op);
1146 HConstant* constant = chunk()->LookupConstant(const_op);
1147 Representation r = chunk_->LookupLiteralRepresentation(const_op);
1148 if (r.IsSmi()) {
1149 ASSERT(constant->HasSmiValue());
1150 return Operand(Smi::FromInt(constant->Integer32Value()));
1151 } else if (r.IsInteger32()) {
1152 ASSERT(constant->HasInteger32Value());
1153 return Operand(constant->Integer32Value());
1154 } else if (r.IsDouble()) {
1155 Abort(kToOperandUnsupportedDoubleImmediate);
1156 }
1157 ASSERT(r.IsTagged());
1158 return Operand(constant->handle(isolate()));
1159 } else if (op->IsRegister()) {
1160 return Operand(ToRegister(op));
1161 } else if (op->IsDoubleRegister()) {
1162 Abort(kToOperandIsDoubleRegisterUnimplemented);
1163 return Operand(0);
1164 }
1165 // Stack slots not implemented, use ToMemOperand instead.
1166 UNREACHABLE();
1167 return Operand(0);
1168 }
1169
1170
1171 Operand LCodeGen::ToOperand32I(LOperand* op) {
1172 return ToOperand32(op, SIGNED_INT32);
1173 }
1174
1175
1176 Operand LCodeGen::ToOperand32U(LOperand* op) {
1177 return ToOperand32(op, UNSIGNED_INT32);
1178 }
1179
1180
1181 Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) {
1182 ASSERT(op != NULL);
1183 if (op->IsRegister()) {
1184 return Operand(ToRegister32(op));
1185 } else if (op->IsConstantOperand()) {
1186 LConstantOperand* const_op = LConstantOperand::cast(op);
1187 HConstant* constant = chunk()->LookupConstant(const_op);
1188 Representation r = chunk_->LookupLiteralRepresentation(const_op);
1189 if (r.IsInteger32()) {
1190 ASSERT(constant->HasInteger32Value());
1191 return Operand(signedness == SIGNED_INT32
1192 ? constant->Integer32Value()
1193 : static_cast<uint32_t>(constant->Integer32Value()));
1194 } else {
1195 // Other constants not implemented.
1196 Abort(kToOperand32UnsupportedImmediate);
1197 }
1198 }
1199 // Other cases are not implemented.
1200 UNREACHABLE();
1201 return Operand(0);
1202 }
1203
1204
1205 static ptrdiff_t ArgumentsOffsetWithoutFrame(ptrdiff_t index) {
1206 ASSERT(index < 0);
1207 return -(index + 1) * kPointerSize;
1208 }
1209
1210
1211 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
1212 ASSERT(op != NULL);
1213 ASSERT(!op->IsRegister());
1214 ASSERT(!op->IsDoubleRegister());
1215 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
1216 if (NeedsEagerFrame()) {
1217 return MemOperand(fp, StackSlotOffset(op->index()));
1218 } else {
1219 // Retrieve parameter without eager stack-frame relative to the
1220 // stack-pointer.
1221 return MemOperand(masm()->StackPointer(),
1222 ArgumentsOffsetWithoutFrame(op->index()));
1223 }
1224 }
1225
1226
1227 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
1228 HConstant* constant = chunk_->LookupConstant(op);
1229 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
1230 return constant->handle(isolate());
1231 }
1232
1233
1234 bool LCodeGen::IsSmi(LConstantOperand* op) const {
1235 return chunk_->LookupLiteralRepresentation(op).IsSmi();
1236 }
1237
1238
1239 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
1240 return op->IsConstantOperand() &&
1241 chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
1242 }
1243
1244
1245 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
1246 HConstant* constant = chunk_->LookupConstant(op);
1247 return constant->Integer32Value();
1248 }
1249
1250
1251 double LCodeGen::ToDouble(LConstantOperand* op) const {
1252 HConstant* constant = chunk_->LookupConstant(op);
1253 ASSERT(constant->HasDoubleValue());
1254 return constant->DoubleValue();
1255 }
1256
1257
1258 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1259 Condition cond = nv;
1260 switch (op) {
1261 case Token::EQ:
1262 case Token::EQ_STRICT:
1263 cond = eq;
1264 break;
1265 case Token::NE:
1266 case Token::NE_STRICT:
1267 cond = ne;
1268 break;
1269 case Token::LT:
1270 cond = is_unsigned ? lo : lt;
1271 break;
1272 case Token::GT:
1273 cond = is_unsigned ? hi : gt;
1274 break;
1275 case Token::LTE:
1276 cond = is_unsigned ? ls : le;
1277 break;
1278 case Token::GTE:
1279 cond = is_unsigned ? hs : ge;
1280 break;
1281 case Token::IN:
1282 case Token::INSTANCEOF:
1283 default:
1284 UNREACHABLE();
1285 }
1286 return cond;
1287 }
1288
1289
1290 template<class InstrType>
1291 void LCodeGen::EmitBranchGeneric(InstrType instr,
1292 const BranchGenerator& branch) {
1293 int left_block = instr->TrueDestination(chunk_);
1294 int right_block = instr->FalseDestination(chunk_);
1295
1296 int next_block = GetNextEmittedBlock();
1297
1298 if (right_block == left_block) {
1299 EmitGoto(left_block);
1300 } else if (left_block == next_block) {
1301 branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
1302 } else if (right_block == next_block) {
1303 branch.Emit(chunk_->GetAssemblyLabel(left_block));
1304 } else {
1305 branch.Emit(chunk_->GetAssemblyLabel(left_block));
1306 __ B(chunk_->GetAssemblyLabel(right_block));
1307 }
1308 }
1309
1310
1311 template<class InstrType>
1312 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
1313 ASSERT((condition != al) && (condition != nv));
1314 BranchOnCondition branch(this, condition);
1315 EmitBranchGeneric(instr, branch);
1316 }
1317
1318
1319 template<class InstrType>
1320 void LCodeGen::EmitCompareAndBranch(InstrType instr,
1321 Condition condition,
1322 const Register& lhs,
1323 const Operand& rhs) {
1324 ASSERT((condition != al) && (condition != nv));
1325 CompareAndBranch branch(this, condition, lhs, rhs);
1326 EmitBranchGeneric(instr, branch);
1327 }
1328
1329
1330 template<class InstrType>
1331 void LCodeGen::EmitTestAndBranch(InstrType instr,
1332 Condition condition,
1333 const Register& value,
1334 uint64_t mask) {
1335 ASSERT((condition != al) && (condition != nv));
1336 TestAndBranch branch(this, condition, value, mask);
1337 EmitBranchGeneric(instr, branch);
1338 }
1339
1340
1341 template<class InstrType>
1342 void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr,
1343 const FPRegister& value,
1344 const FPRegister& scratch) {
1345 BranchIfNonZeroNumber branch(this, value, scratch);
1346 EmitBranchGeneric(instr, branch);
1347 }
1348
1349
1350 template<class InstrType>
1351 void LCodeGen::EmitBranchIfHeapNumber(InstrType instr,
1352 const Register& value) {
1353 BranchIfHeapNumber branch(this, value);
1354 EmitBranchGeneric(instr, branch);
1355 }
1356
1357
1358 template<class InstrType>
1359 void LCodeGen::EmitBranchIfRoot(InstrType instr,
1360 const Register& value,
1361 Heap::RootListIndex index) {
1362 BranchIfRoot branch(this, value, index);
1363 EmitBranchGeneric(instr, branch);
1364 }
1365
1366
1367 void LCodeGen::DoGap(LGap* gap) {
1368 for (int i = LGap::FIRST_INNER_POSITION;
1369 i <= LGap::LAST_INNER_POSITION;
1370 i++) {
1371 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1372 LParallelMove* move = gap->GetParallelMove(inner_pos);
1373 if (move != NULL) {
1374 resolver_.Resolve(move);
1375 }
1376 }
1377 }
1378
1379
1380 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
1381 Register arguments = ToRegister(instr->arguments());
1382 Register result = ToRegister(instr->result());
1383
1384 // The pointer to the arguments array come from DoArgumentsElements.
1385 // It does not point directly to the arguments and there is an offest of
1386 // two words that we must take into account when accessing an argument.
1387 // Subtracting the index from length accounts for one, so we add one more.
1388
1389 if (instr->length()->IsConstantOperand() &&
1390 instr->index()->IsConstantOperand()) {
1391 int index = ToInteger32(LConstantOperand::cast(instr->index()));
1392 int length = ToInteger32(LConstantOperand::cast(instr->length()));
1393 int offset = ((length - index) + 1) * kPointerSize;
1394 __ Ldr(result, MemOperand(arguments, offset));
1395 } else if (instr->index()->IsConstantOperand()) {
1396 Register length = ToRegister32(instr->length());
1397 int index = ToInteger32(LConstantOperand::cast(instr->index()));
1398 int loc = index - 1;
1399 if (loc != 0) {
1400 __ Sub(result.W(), length, loc);
1401 __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
1402 } else {
1403 __ Ldr(result, MemOperand(arguments, length, UXTW, kPointerSizeLog2));
1404 }
1405 } else {
1406 Register length = ToRegister32(instr->length());
1407 Operand index = ToOperand32I(instr->index());
1408 __ Sub(result.W(), length, index);
1409 __ Add(result.W(), result.W(), 1);
1410 __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
1411 }
1412 }
1413
1414
1415 void LCodeGen::DoAddE(LAddE* instr) {
1416 Register result = ToRegister(instr->result());
1417 Register left = ToRegister(instr->left());
1418 Operand right = (instr->right()->IsConstantOperand())
1419 ? ToInteger32(LConstantOperand::cast(instr->right()))
1420 : Operand(ToRegister32(instr->right()), SXTW);
1421
1422 ASSERT(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
1423 __ Add(result, left, right);
1424 }
1425
1426
1427 void LCodeGen::DoAddI(LAddI* instr) {
1428 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1429 Register result = ToRegister32(instr->result());
1430 Register left = ToRegister32(instr->left());
1431 Operand right = ToOperand32I(instr->right());
1432 if (can_overflow) {
1433 __ Adds(result, left, right);
1434 DeoptimizeIf(vs, instr->environment());
1435 } else {
1436 __ Add(result, left, right);
1437 }
1438 }
1439
1440
1441 void LCodeGen::DoAddS(LAddS* instr) {
1442 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1443 Register result = ToRegister(instr->result());
1444 Register left = ToRegister(instr->left());
1445 Operand right = ToOperand(instr->right());
1446 if (can_overflow) {
1447 __ Adds(result, left, right);
1448 DeoptimizeIf(vs, instr->environment());
1449 } else {
1450 __ Add(result, left, right);
1451 }
1452 }
1453
1454
1455 void LCodeGen::DoAllocate(LAllocate* instr) {
1456 class DeferredAllocate: public LDeferredCode {
1457 public:
1458 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
1459 : LDeferredCode(codegen), instr_(instr) { }
1460 virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
1461 virtual LInstruction* instr() { return instr_; }
1462 private:
1463 LAllocate* instr_;
1464 };
1465
1466 DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
1467
1468 Register result = ToRegister(instr->result());
1469 Register temp1 = ToRegister(instr->temp1());
1470 Register temp2 = ToRegister(instr->temp2());
1471
1472 // Allocate memory for the object.
1473 AllocationFlags flags = TAG_OBJECT;
1474 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
1475 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
1476 }
1477
1478 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1479 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
1480 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1481 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
1482 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1483 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1484 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
1485 }
1486
1487 if (instr->size()->IsConstantOperand()) {
1488 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1489 __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
1490 } else {
1491 Register size = ToRegister32(instr->size());
1492 __ Sxtw(size.X(), size);
1493 __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags);
1494 }
1495
1496 __ Bind(deferred->exit());
1497
1498 if (instr->hydrogen()->MustPrefillWithFiller()) {
1499 if (instr->size()->IsConstantOperand()) {
1500 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1501 __ Mov(temp1, size - kPointerSize);
1502 } else {
1503 __ Sub(temp1.W(), ToRegister32(instr->size()), kPointerSize);
1504 }
1505 __ Sub(result, result, kHeapObjectTag);
1506
1507 // TODO(jbramley): Optimize this loop using stp.
1508 Label loop;
1509 __ Bind(&loop);
1510 __ Mov(temp2, Operand(isolate()->factory()->one_pointer_filler_map()));
1511 __ Str(temp2, MemOperand(result, temp1));
1512 __ Subs(temp1, temp1, kPointerSize);
1513 __ B(ge, &loop);
1514
1515 __ Add(result, result, kHeapObjectTag);
1516 }
1517 }
1518
1519
1520 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
1521 // TODO(3095996): Get rid of this. For now, we need to make the
1522 // result register contain a valid pointer because it is already
1523 // contained in the register pointer map.
1524 __ Mov(ToRegister(instr->result()), Operand(Smi::FromInt(0)));
1525
1526 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
1527 // We're in a SafepointRegistersScope so we can use any scratch registers.
1528 Register size = x0;
1529 if (instr->size()->IsConstantOperand()) {
1530 __ Mov(size, Operand(ToSmi(LConstantOperand::cast(instr->size()))));
1531 } else {
1532 __ SmiTag(size, ToRegister32(instr->size()).X());
1533 }
1534 int flags = AllocateDoubleAlignFlag::encode(
1535 instr->hydrogen()->MustAllocateDoubleAligned());
1536 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1537 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
1538 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1539 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
1540 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1541 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
1542 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
1543 } else {
1544 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
1545 }
1546 __ Mov(x10, Operand(Smi::FromInt(flags)));
1547 __ Push(size, x10);
1548
1549 CallRuntimeFromDeferred(
1550 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
1551 __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
1552 }
1553
1554
1555 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
1556 Register receiver = ToRegister(instr->receiver());
1557 Register function = ToRegister(instr->function());
1558 Register length = ToRegister32(instr->length());
1559
1560 Register elements = ToRegister(instr->elements());
1561 Register scratch = x5;
1562 ASSERT(receiver.Is(x0)); // Used for parameter count.
1563 ASSERT(function.Is(x1)); // Required by InvokeFunction.
1564 ASSERT(ToRegister(instr->result()).Is(x0));
1565 ASSERT(instr->IsMarkedAsCall());
1566
1567 // Copy the arguments to this function possibly from the
1568 // adaptor frame below it.
1569 const uint32_t kArgumentsLimit = 1 * KB;
1570 __ Cmp(length, kArgumentsLimit);
1571 DeoptimizeIf(hi, instr->environment());
1572
1573 // Push the receiver and use the register to keep the original
1574 // number of arguments.
1575 __ Push(receiver);
1576 Register argc = receiver;
1577 receiver = NoReg;
1578 __ Sxtw(argc, length);
1579 // The arguments are at a one pointer size offset from elements.
1580 __ Add(elements, elements, 1 * kPointerSize);
1581
1582 // Loop through the arguments pushing them onto the execution
1583 // stack.
1584 Label invoke, loop;
1585 // length is a small non-negative integer, due to the test above.
1586 __ Cbz(length, &invoke);
1587 __ Bind(&loop);
1588 __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2));
1589 __ Push(scratch);
1590 __ Subs(length, length, 1);
1591 __ B(ne, &loop);
1592
1593 __ Bind(&invoke);
1594 ASSERT(instr->HasPointerMap());
1595 LPointerMap* pointers = instr->pointer_map();
1596 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
1597 // The number of arguments is stored in argc (receiver) which is x0, as
1598 // expected by InvokeFunction.
1599 ParameterCount actual(argc);
1600 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
1601 }
1602
1603
1604 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
1605 Register result = ToRegister(instr->result());
1606
1607 if (instr->hydrogen()->from_inlined()) {
1608 // When we are inside an inlined function, the arguments are the last things
1609 // that have been pushed on the stack. Therefore the arguments array can be
1610 // accessed directly from jssp.
1611 // However in the normal case, it is accessed via fp but there are two words
1612 // on the stack between fp and the arguments (the saved lr and fp) and the
1613 // LAccessArgumentsAt implementation take that into account.
1614 // In the inlined case we need to subtract the size of 2 words to jssp to
1615 // get a pointer which will work well with LAccessArgumentsAt.
1616 ASSERT(masm()->StackPointer().Is(jssp));
1617 __ Sub(result, jssp, 2 * kPointerSize);
1618 } else {
1619 ASSERT(instr->temp() != NULL);
1620 Register previous_fp = ToRegister(instr->temp());
1621
1622 __ Ldr(previous_fp,
1623 MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1624 __ Ldr(result,
1625 MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
1626 __ Cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1627 __ Csel(result, fp, previous_fp, ne);
1628 }
1629 }
1630
1631
1632 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
1633 Register elements = ToRegister(instr->elements());
1634 Register result = ToRegister32(instr->result());
1635 Label done;
1636
1637 // If no arguments adaptor frame the number of arguments is fixed.
1638 __ Cmp(fp, elements);
1639 __ Mov(result, scope()->num_parameters());
1640 __ B(eq, &done);
1641
1642 // Arguments adaptor frame present. Get argument length from there.
1643 __ Ldr(result.X(), MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1644 __ Ldr(result,
1645 UntagSmiMemOperand(result.X(),
1646 ArgumentsAdaptorFrameConstants::kLengthOffset));
1647
1648 // Argument length is in result register.
1649 __ Bind(&done);
1650 }
1651
1652
1653 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1654 DoubleRegister left = ToDoubleRegister(instr->left());
1655 DoubleRegister right = ToDoubleRegister(instr->right());
1656 DoubleRegister result = ToDoubleRegister(instr->result());
1657
1658 switch (instr->op()) {
1659 case Token::ADD: __ Fadd(result, left, right); break;
1660 case Token::SUB: __ Fsub(result, left, right); break;
1661 case Token::MUL: __ Fmul(result, left, right); break;
1662 case Token::DIV: __ Fdiv(result, left, right); break;
1663 case Token::MOD: {
1664 // The ECMA-262 remainder operator is the remainder from a truncating
1665 // (round-towards-zero) division. Note that this differs from IEEE-754.
1666 //
1667 // TODO(jbramley): See if it's possible to do this inline, rather than by
1668 // calling a helper function. With frintz (to produce the intermediate
1669 // quotient) and fmsub (to calculate the remainder without loss of
1670 // precision), it should be possible. However, we would need support for
1671 // fdiv in round-towards-zero mode, and the A64 simulator doesn't support
1672 // that yet.
1673 ASSERT(left.Is(d0));
1674 ASSERT(right.Is(d1));
1675 __ CallCFunction(
1676 ExternalReference::mod_two_doubles_operation(isolate()),
1677 0, 2);
1678 ASSERT(result.Is(d0));
1679 break;
1680 }
1681 default:
1682 UNREACHABLE();
1683 break;
1684 }
1685 }
1686
1687
1688 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1689 ASSERT(ToRegister(instr->context()).is(cp));
1690 ASSERT(ToRegister(instr->left()).is(x1));
1691 ASSERT(ToRegister(instr->right()).is(x0));
1692 ASSERT(ToRegister(instr->result()).is(x0));
1693
1694 BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
1695 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1696 }
1697
1698
1699 void LCodeGen::DoBitI(LBitI* instr) {
1700 Register result = ToRegister32(instr->result());
1701 Register left = ToRegister32(instr->left());
1702 Operand right = ToOperand32U(instr->right());
1703
1704 switch (instr->op()) {
1705 case Token::BIT_AND: __ And(result, left, right); break;
1706 case Token::BIT_OR: __ Orr(result, left, right); break;
1707 case Token::BIT_XOR: __ Eor(result, left, right); break;
1708 default:
1709 UNREACHABLE();
1710 break;
1711 }
1712 }
1713
1714
1715 void LCodeGen::DoBitS(LBitS* instr) {
1716 Register result = ToRegister(instr->result());
1717 Register left = ToRegister(instr->left());
1718 Operand right = ToOperand(instr->right());
1719
1720 switch (instr->op()) {
1721 case Token::BIT_AND: __ And(result, left, right); break;
1722 case Token::BIT_OR: __ Orr(result, left, right); break;
1723 case Token::BIT_XOR: __ Eor(result, left, right); break;
1724 default:
1725 UNREACHABLE();
1726 break;
1727 }
1728 }
1729
1730
1731 void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
1732 if (FLAG_debug_code && check->hydrogen()->skip_check()) {
1733 __ Assert(InvertCondition(cc), kEliminatedBoundsCheckFailed);
1734 } else {
1735 DeoptimizeIf(cc, check->environment());
1736 }
1737 }
1738
1739
1740 void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
1741 if (instr->hydrogen()->skip_check()) return;
1742
1743 ASSERT(instr->hydrogen()->length()->representation().IsInteger32());
1744 Register length = ToRegister32(instr->length());
1745
1746 if (instr->index()->IsConstantOperand()) {
1747 int constant_index =
1748 ToInteger32(LConstantOperand::cast(instr->index()));
1749
1750 if (instr->hydrogen()->length()->representation().IsSmi()) {
1751 __ Cmp(length, Operand(Smi::FromInt(constant_index)));
1752 } else {
1753 __ Cmp(length, Operand(constant_index));
1754 }
1755 } else {
1756 ASSERT(instr->hydrogen()->index()->representation().IsInteger32());
1757 __ Cmp(length, ToRegister32(instr->index()));
1758 }
1759 Condition condition = instr->hydrogen()->allow_equality() ? lo : ls;
1760 ApplyCheckIf(condition, instr);
1761 }
1762
1763
1764 void LCodeGen::DoBranch(LBranch* instr) {
1765 Representation r = instr->hydrogen()->value()->representation();
1766 Label* true_label = instr->TrueLabel(chunk_);
1767 Label* false_label = instr->FalseLabel(chunk_);
1768
1769 if (r.IsInteger32()) {
1770 ASSERT(!info()->IsStub());
1771 EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
1772 } else if (r.IsSmi()) {
1773 ASSERT(!info()->IsStub());
1774 STATIC_ASSERT(kSmiTag == 0);
1775 EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
1776 } else if (r.IsDouble()) {
1777 DoubleRegister value = ToDoubleRegister(instr->value());
1778 // Test the double value. Zero and NaN are false.
1779 EmitBranchIfNonZeroNumber(instr, value, double_scratch());
1780 } else {
1781 ASSERT(r.IsTagged());
1782 Register value = ToRegister(instr->value());
1783 HType type = instr->hydrogen()->value()->type();
1784
1785 if (type.IsBoolean()) {
1786 ASSERT(!info()->IsStub());
1787 __ CompareRoot(value, Heap::kTrueValueRootIndex);
1788 EmitBranch(instr, eq);
1789 } else if (type.IsSmi()) {
1790 ASSERT(!info()->IsStub());
1791 EmitCompareAndBranch(instr, ne, value, Operand(Smi::FromInt(0)));
1792 } else if (type.IsJSArray()) {
1793 ASSERT(!info()->IsStub());
1794 EmitGoto(instr->TrueDestination(chunk()));
1795 } else if (type.IsHeapNumber()) {
1796 ASSERT(!info()->IsStub());
1797 __ Ldr(double_scratch(), FieldMemOperand(value,
1798 HeapNumber::kValueOffset));
1799 // Test the double value. Zero and NaN are false.
1800 EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
1801 } else if (type.IsString()) {
1802 ASSERT(!info()->IsStub());
1803 Register temp = ToRegister(instr->temp1());
1804 __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
1805 EmitCompareAndBranch(instr, ne, temp, 0);
1806 } else {
1807 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1808 // Avoid deopts in the case where we've never executed this path before.
1809 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
1810
1811 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1812 // undefined -> false.
1813 __ JumpIfRoot(
1814 value, Heap::kUndefinedValueRootIndex, false_label);
1815 }
1816
1817 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1818 // Boolean -> its value.
1819 __ JumpIfRoot(
1820 value, Heap::kTrueValueRootIndex, true_label);
1821 __ JumpIfRoot(
1822 value, Heap::kFalseValueRootIndex, false_label);
1823 }
1824
1825 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1826 // 'null' -> false.
1827 __ JumpIfRoot(
1828 value, Heap::kNullValueRootIndex, false_label);
1829 }
1830
1831 if (expected.Contains(ToBooleanStub::SMI)) {
1832 // Smis: 0 -> false, all other -> true.
1833 ASSERT(Smi::FromInt(0) == 0);
1834 __ Cbz(value, false_label);
1835 __ JumpIfSmi(value, true_label);
1836 } else if (expected.NeedsMap()) {
1837 // If we need a map later and have a smi, deopt.
1838 DeoptimizeIfSmi(value, instr->environment());
1839 }
1840
1841 Register map = NoReg;
1842 Register scratch = NoReg;
1843
1844 if (expected.NeedsMap()) {
1845 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
1846 map = ToRegister(instr->temp1());
1847 scratch = ToRegister(instr->temp2());
1848
1849 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
1850
1851 if (expected.CanBeUndetectable()) {
1852 // Undetectable -> false.
1853 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
1854 __ TestAndBranchIfAnySet(
1855 scratch, 1 << Map::kIsUndetectable, false_label);
1856 }
1857 }
1858
1859 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1860 // spec object -> true.
1861 __ CompareInstanceType(map, scratch, FIRST_SPEC_OBJECT_TYPE);
1862 __ B(ge, true_label);
1863 }
1864
1865 if (expected.Contains(ToBooleanStub::STRING)) {
1866 // String value -> false iff empty.
1867 Label not_string;
1868 __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
1869 __ B(ge, &not_string);
1870 __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset));
1871 __ Cbz(scratch, false_label);
1872 __ B(true_label);
1873 __ Bind(&not_string);
1874 }
1875
1876 if (expected.Contains(ToBooleanStub::SYMBOL)) {
1877 // Symbol value -> true.
1878 __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
1879 __ B(eq, true_label);
1880 }
1881
1882 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1883 Label not_heap_number;
1884 __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, &not_heap_number);
1885
1886 __ Ldr(double_scratch(),
1887 FieldMemOperand(value, HeapNumber::kValueOffset));
1888 __ Fcmp(double_scratch(), 0.0);
1889 // If we got a NaN (overflow bit is set), jump to the false branch.
1890 __ B(vs, false_label);
1891 __ B(eq, false_label);
1892 __ B(true_label);
1893 __ Bind(&not_heap_number);
1894 }
1895
1896 if (!expected.IsGeneric()) {
1897 // We've seen something for the first time -> deopt.
1898 // This can only happen if we are not generic already.
1899 Deoptimize(instr->environment());
1900 }
1901 }
1902 }
1903 }
1904
1905
1906 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
1907 int formal_parameter_count,
1908 int arity,
1909 LInstruction* instr,
1910 Register function_reg) {
1911 bool dont_adapt_arguments =
1912 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1913 bool can_invoke_directly =
1914 dont_adapt_arguments || formal_parameter_count == arity;
1915
1916 // The function interface relies on the following register assignments.
1917 ASSERT(function_reg.Is(x1) || function_reg.IsNone());
1918 Register arity_reg = x0;
1919
1920 LPointerMap* pointers = instr->pointer_map();
1921
1922 // If necessary, load the function object.
1923 if (function_reg.IsNone()) {
1924 function_reg = x1;
1925 __ LoadObject(function_reg, function);
1926 }
1927
1928 if (FLAG_debug_code) {
1929 Label is_not_smi;
1930 // Try to confirm that function_reg (x1) is a tagged pointer.
1931 __ JumpIfNotSmi(function_reg, &is_not_smi);
1932 __ Abort(kExpectedFunctionObject);
1933 __ Bind(&is_not_smi);
1934 }
1935
1936 if (can_invoke_directly) {
1937 // Change context.
1938 __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
1939
1940 // Set the arguments count if adaption is not needed. Assumes that x0 is
1941 // available to write to at this point.
1942 if (dont_adapt_arguments) {
1943 __ Mov(arity_reg, arity);
1944 }
1945
1946 // Invoke function.
1947 __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
1948 __ Call(x10);
1949
1950 // Set up deoptimization.
1951 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
1952 } else {
1953 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
1954 ParameterCount count(arity);
1955 ParameterCount expected(formal_parameter_count);
1956 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
1957 }
1958 }
1959
1960
1961 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
1962 ASSERT(instr->IsMarkedAsCall());
1963 ASSERT(ToRegister(instr->result()).Is(x0));
1964
1965 LPointerMap* pointers = instr->pointer_map();
1966 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
1967
1968 if (instr->target()->IsConstantOperand()) {
1969 LConstantOperand* target = LConstantOperand::cast(instr->target());
1970 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
1971 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
1972 // TODO(all): on ARM we use a call descriptor to specify a storage mode
1973 // but on A64 we only have one storage mode so it isn't necessary. Check
1974 // this understanding is correct.
1975 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
1976 } else {
1977 ASSERT(instr->target()->IsRegister());
1978 Register target = ToRegister(instr->target());
1979 generator.BeforeCall(__ CallSize(target));
1980 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
1981 __ Call(target);
1982 }
1983 generator.AfterCall();
1984 }
1985
1986
1987 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
1988 ASSERT(instr->IsMarkedAsCall());
1989 ASSERT(ToRegister(instr->function()).is(x1));
1990
1991 if (instr->hydrogen()->pass_argument_count()) {
1992 __ Mov(x0, Operand(instr->arity()));
1993 }
1994
1995 // Change context.
1996 __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
1997
1998 // Load the code entry address
1999 __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
2000 __ Call(x10);
2001
2002 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2003 }
2004
2005
2006 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
2007 CallRuntime(instr->function(), instr->arity(), instr);
2008 }
2009
2010
2011 void LCodeGen::DoCallStub(LCallStub* instr) {
2012 ASSERT(ToRegister(instr->context()).is(cp));
2013 ASSERT(ToRegister(instr->result()).is(x0));
2014 switch (instr->hydrogen()->major_key()) {
2015 case CodeStub::RegExpExec: {
2016 RegExpExecStub stub;
2017 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2018 break;
2019 }
2020 case CodeStub::SubString: {
2021 SubStringStub stub;
2022 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2023 break;
2024 }
2025 case CodeStub::StringCompare: {
2026 StringCompareStub stub;
2027 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2028 break;
2029 }
2030 default:
2031 UNREACHABLE();
2032 }
2033 }
2034
2035
2036 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
2037 GenerateOsrPrologue();
2038 }
2039
2040
2041 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
2042 Register temp = ToRegister(instr->temp());
2043 {
2044 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2045 __ Push(object);
2046 __ Mov(cp, 0);
2047 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
2048 RecordSafepointWithRegisters(
2049 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
2050 __ StoreToSafepointRegisterSlot(x0, temp);
2051 }
2052 DeoptimizeIfSmi(temp, instr->environment());
2053 }
2054
2055
2056 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
2057 class DeferredCheckMaps: public LDeferredCode {
2058 public:
2059 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
2060 : LDeferredCode(codegen), instr_(instr), object_(object) {
2061 SetExit(check_maps());
2062 }
2063 virtual void Generate() {
2064 codegen()->DoDeferredInstanceMigration(instr_, object_);
2065 }
2066 Label* check_maps() { return &check_maps_; }
2067 virtual LInstruction* instr() { return instr_; }
2068 private:
2069 LCheckMaps* instr_;
2070 Label check_maps_;
2071 Register object_;
2072 };
2073
2074 if (instr->hydrogen()->CanOmitMapChecks()) {
2075 ASSERT(instr->value() == NULL);
2076 ASSERT(instr->temp() == NULL);
2077 return;
2078 }
2079
2080 Register object = ToRegister(instr->value());
2081 Register map_reg = ToRegister(instr->temp());
2082
2083 __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
2084
2085 DeferredCheckMaps* deferred = NULL;
2086 if (instr->hydrogen()->has_migration_target()) {
2087 deferred = new(zone()) DeferredCheckMaps(this, instr, object);
2088 __ Bind(deferred->check_maps());
2089 }
2090
2091 UniqueSet<Map> map_set = instr->hydrogen()->map_set();
2092 Label success;
2093 for (int i = 0; i < map_set.size(); i++) {
2094 Handle<Map> map = map_set.at(i).handle();
2095 __ CompareMap(map_reg, map);
2096 __ B(eq, &success);
2097 }
2098
2099 // We didn't match a map.
2100 if (instr->hydrogen()->has_migration_target()) {
2101 __ B(deferred->entry());
2102 } else {
2103 Deoptimize(instr->environment());
2104 }
2105
2106 __ Bind(&success);
2107 }
2108
2109
2110 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
2111 if (!instr->hydrogen()->value()->IsHeapObject()) {
2112 DeoptimizeIfSmi(ToRegister(instr->value()), instr->environment());
2113 }
2114 }
2115
2116
2117 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
2118 Register value = ToRegister(instr->value());
2119 ASSERT(!instr->result() || ToRegister(instr->result()).Is(value));
2120 DeoptimizeIfNotSmi(value, instr->environment());
2121 }
2122
2123
2124 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
2125 Register input = ToRegister(instr->value());
2126 Register scratch = ToRegister(instr->temp());
2127
2128 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2129 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2130
2131 if (instr->hydrogen()->is_interval_check()) {
2132 InstanceType first, last;
2133 instr->hydrogen()->GetCheckInterval(&first, &last);
2134
2135 __ Cmp(scratch, first);
2136 if (first == last) {
2137 // If there is only one type in the interval check for equality.
2138 DeoptimizeIf(ne, instr->environment());
2139 } else if (last == LAST_TYPE) {
2140 // We don't need to compare with the higher bound of the interval.
2141 DeoptimizeIf(lo, instr->environment());
2142 } else {
2143 // If we are below the lower bound, set the C flag and clear the Z flag
2144 // to force a deopt.
2145 __ Ccmp(scratch, last, CFlag, hs);
2146 DeoptimizeIf(hi, instr->environment());
2147 }
2148 } else {
2149 uint8_t mask;
2150 uint8_t tag;
2151 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
2152
2153 if (IsPowerOf2(mask)) {
2154 ASSERT((tag == 0) || (tag == mask));
2155 // TODO(all): We might be able to use tbz/tbnz if we can guarantee that
2156 // the deopt handler is reachable by a tbz instruction.
2157 __ Tst(scratch, mask);
2158 DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
2159 } else {
2160 if (tag == 0) {
2161 __ Tst(scratch, mask);
2162 } else {
2163 __ And(scratch, scratch, mask);
2164 __ Cmp(scratch, tag);
2165 }
2166 DeoptimizeIf(ne, instr->environment());
2167 }
2168 }
2169 }
2170
2171
2172 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
2173 DoubleRegister input = ToDoubleRegister(instr->unclamped());
2174 Register result = ToRegister32(instr->result());
2175 __ ClampDoubleToUint8(result, input, double_scratch());
2176 }
2177
2178
2179 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
2180 Register input = ToRegister32(instr->unclamped());
2181 Register result = ToRegister32(instr->result());
2182 __ ClampInt32ToUint8(result, input);
2183 }
2184
2185
2186 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
2187 Register input = ToRegister(instr->unclamped());
2188 Register result = ToRegister32(instr->result());
2189 Register scratch = ToRegister(instr->temp1());
2190 Label done;
2191
2192 // Both smi and heap number cases are handled.
2193 Label is_not_smi;
2194 __ JumpIfNotSmi(input, &is_not_smi);
2195 __ SmiUntag(result.X(), input);
2196 __ ClampInt32ToUint8(result);
2197 __ B(&done);
2198
2199 __ Bind(&is_not_smi);
2200
2201 // Check for heap number.
2202 Label is_heap_number;
2203 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2204 __ JumpIfRoot(scratch, Heap::kHeapNumberMapRootIndex, &is_heap_number);
2205
2206 // Check for undefined. Undefined is coverted to zero for clamping conversion.
2207 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
2208 instr->environment());
2209 __ Mov(result, 0);
2210 __ B(&done);
2211
2212 // Heap number case.
2213 __ Bind(&is_heap_number);
2214 DoubleRegister dbl_scratch = double_scratch();
2215 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp2());
2216 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
2217 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
2218
2219 __ Bind(&done);
2220 }
2221
2222
2223 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2224 Handle<String> class_name = instr->hydrogen()->class_name();
2225 Label* true_label = instr->TrueLabel(chunk_);
2226 Label* false_label = instr->FalseLabel(chunk_);
2227 Register input = ToRegister(instr->value());
2228 Register scratch1 = ToRegister(instr->temp1());
2229 Register scratch2 = ToRegister(instr->temp2());
2230
2231 __ JumpIfSmi(input, false_label);
2232
2233 Register map = scratch2;
2234 if (class_name->IsUtf8EqualTo(CStrVector("Function"))) {
2235 // Assuming the following assertions, we can use the same compares to test
2236 // for both being a function type and being in the object type range.
2237 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2238 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2239 FIRST_SPEC_OBJECT_TYPE + 1);
2240 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2241 LAST_SPEC_OBJECT_TYPE - 1);
2242 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2243
2244 // We expect CompareObjectType to load the object instance type in scratch1.
2245 __ CompareObjectType(input, map, scratch1, FIRST_SPEC_OBJECT_TYPE);
2246 __ B(lt, false_label);
2247 __ B(eq, true_label);
2248 __ Cmp(scratch1, LAST_SPEC_OBJECT_TYPE);
2249 __ B(eq, true_label);
2250 } else {
2251 __ IsObjectJSObjectType(input, map, scratch1, false_label);
2252 }
2253
2254 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2255 // Check if the constructor in the map is a function.
2256 __ Ldr(scratch1, FieldMemOperand(map, Map::kConstructorOffset));
2257
2258 // Objects with a non-function constructor have class 'Object'.
2259 if (class_name->IsUtf8EqualTo(CStrVector("Object"))) {
2260 __ JumpIfNotObjectType(
2261 scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, true_label);
2262 } else {
2263 __ JumpIfNotObjectType(
2264 scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, false_label);
2265 }
2266
2267 // The constructor function is in scratch1. Get its instance class name.
2268 __ Ldr(scratch1,
2269 FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
2270 __ Ldr(scratch1,
2271 FieldMemOperand(scratch1,
2272 SharedFunctionInfo::kInstanceClassNameOffset));
2273
2274 // The class name we are testing against is internalized since it's a literal.
2275 // The name in the constructor is internalized because of the way the context
2276 // is booted. This routine isn't expected to work for random API-created
2277 // classes and it doesn't have to because you can't access it with natives
2278 // syntax. Since both sides are internalized it is sufficient to use an
2279 // identity comparison.
2280 EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name));
2281 }
2282
2283
2284 void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
2285 ASSERT(instr->hydrogen()->representation().IsDouble());
2286 FPRegister object = ToDoubleRegister(instr->object());
2287 Register temp = ToRegister(instr->temp());
2288
2289 // If we don't have a NaN, we don't have the hole, so branch now to avoid the
2290 // (relatively expensive) hole-NaN check.
2291 __ Fcmp(object, object);
2292 __ B(vc, instr->FalseLabel(chunk_));
2293
2294 // We have a NaN, but is it the hole?
2295 __ Fmov(temp, object);
2296 EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64);
2297 }
2298
2299
2300 void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
2301 ASSERT(instr->hydrogen()->representation().IsTagged());
2302 Register object = ToRegister(instr->object());
2303
2304 EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
2305 }
2306
2307
2308 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2309 Register value = ToRegister(instr->value());
2310 Register map = ToRegister(instr->temp());
2311
2312 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
2313 EmitCompareAndBranch(instr, eq, map, Operand(instr->map()));
2314 }
2315
2316
2317 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2318 Representation rep = instr->hydrogen()->value()->representation();
2319 ASSERT(!rep.IsInteger32());
2320 Register scratch = ToRegister(instr->temp());
2321
2322 if (rep.IsDouble()) {
2323 __ JumpIfMinusZero(ToDoubleRegister(instr->value()),
2324 instr->TrueLabel(chunk()));
2325 } else {
2326 Register value = ToRegister(instr->value());
2327 __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
2328 instr->FalseLabel(chunk()), DO_SMI_CHECK);
2329 __ Ldr(double_scratch(), FieldMemOperand(value, HeapNumber::kValueOffset));
2330 __ JumpIfMinusZero(double_scratch(), instr->TrueLabel(chunk()));
2331 }
2332 EmitGoto(instr->FalseDestination(chunk()));
2333 }
2334
2335
2336 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2337 LOperand* left = instr->left();
2338 LOperand* right = instr->right();
2339 Condition cond = TokenToCondition(instr->op(), false);
2340
2341 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2342 // We can statically evaluate the comparison.
2343 double left_val = ToDouble(LConstantOperand::cast(left));
2344 double right_val = ToDouble(LConstantOperand::cast(right));
2345 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2346 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2347 EmitGoto(next_block);
2348 } else {
2349 if (instr->is_double()) {
2350 if (right->IsConstantOperand()) {
2351 __ Fcmp(ToDoubleRegister(left),
2352 ToDouble(LConstantOperand::cast(right)));
2353 } else if (left->IsConstantOperand()) {
2354 // Transpose the operands and reverse the condition.
2355 __ Fcmp(ToDoubleRegister(right),
2356 ToDouble(LConstantOperand::cast(left)));
2357 cond = ReverseConditionForCmp(cond);
2358 } else {
2359 __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
2360 }
2361
2362 // If a NaN is involved, i.e. the result is unordered (V set),
2363 // jump to false block label.
2364 __ B(vs, instr->FalseLabel(chunk_));
2365 EmitBranch(instr, cond);
2366 } else {
2367 if (instr->hydrogen_value()->representation().IsInteger32()) {
2368 if (right->IsConstantOperand()) {
2369 EmitCompareAndBranch(instr,
2370 cond,
2371 ToRegister32(left),
2372 ToOperand32I(right));
2373 } else {
2374 // Transpose the operands and reverse the condition.
2375 EmitCompareAndBranch(instr,
2376 ReverseConditionForCmp(cond),
2377 ToRegister32(right),
2378 ToOperand32I(left));
2379 }
2380 } else {
2381 ASSERT(instr->hydrogen_value()->representation().IsSmi());
2382 if (right->IsConstantOperand()) {
2383 int32_t value = ToInteger32(LConstantOperand::cast(right));
2384 EmitCompareAndBranch(instr,
2385 cond,
2386 ToRegister(left),
2387 Operand(Smi::FromInt(value)));
2388 } else if (left->IsConstantOperand()) {
2389 // Transpose the operands and reverse the condition.
2390 int32_t value = ToInteger32(LConstantOperand::cast(left));
2391 EmitCompareAndBranch(instr,
2392 ReverseConditionForCmp(cond),
2393 ToRegister(right),
2394 Operand(Smi::FromInt(value)));
2395 } else {
2396 EmitCompareAndBranch(instr,
2397 cond,
2398 ToRegister(left),
2399 ToRegister(right));
2400 }
2401 }
2402 }
2403 }
2404 }
2405
2406
2407 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2408 Register left = ToRegister(instr->left());
2409 Register right = ToRegister(instr->right());
2410 EmitCompareAndBranch(instr, eq, left, right);
2411 }
2412
2413
2414 void LCodeGen::DoCmpT(LCmpT* instr) {
2415 ASSERT(ToRegister(instr->context()).is(cp));
2416 Token::Value op = instr->op();
2417 Condition cond = TokenToCondition(op, false);
2418
2419 ASSERT(ToRegister(instr->left()).Is(x1));
2420 ASSERT(ToRegister(instr->right()).Is(x0));
2421 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2422 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2423 // Signal that we don't inline smi code before this stub.
2424 InlineSmiCheckInfo::EmitNotInlined(masm());
2425
2426 // Return true or false depending on CompareIC result.
2427 // This instruction is marked as call. We can clobber any register.
2428 ASSERT(instr->IsMarkedAsCall());
2429 __ LoadTrueFalseRoots(x1, x2);
2430 __ Cmp(x0, 0);
2431 __ Csel(ToRegister(instr->result()), x1, x2, cond);
2432 }
2433
2434
2435 void LCodeGen::DoConstantD(LConstantD* instr) {
2436 ASSERT(instr->result()->IsDoubleRegister());
2437 DoubleRegister result = ToDoubleRegister(instr->result());
2438 __ Fmov(result, instr->value());
2439 }
2440
2441
2442 void LCodeGen::DoConstantE(LConstantE* instr) {
2443 __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2444 }
2445
2446
2447 void LCodeGen::DoConstantI(LConstantI* instr) {
2448 ASSERT(is_int32(instr->value()));
2449 // Cast the value here to ensure that the value isn't sign extended by the
2450 // implicit Operand constructor.
2451 __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
2452 }
2453
2454
2455 void LCodeGen::DoConstantS(LConstantS* instr) {
2456 __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2457 }
2458
2459
2460 void LCodeGen::DoConstantT(LConstantT* instr) {
2461 Handle<Object> value = instr->value(isolate());
2462 AllowDeferredHandleDereference smi_check;
2463 __ LoadObject(ToRegister(instr->result()), value);
2464 }
2465
2466
2467 void LCodeGen::DoContext(LContext* instr) {
2468 // If there is a non-return use, the context must be moved to a register.
2469 Register result = ToRegister(instr->result());
2470 if (info()->IsOptimizing()) {
2471 __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
2472 } else {
2473 // If there is no frame, the context must be in cp.
2474 ASSERT(result.is(cp));
2475 }
2476 }
2477
2478
2479 void LCodeGen::DoCheckValue(LCheckValue* instr) {
2480 Register reg = ToRegister(instr->value());
2481 Handle<HeapObject> object = instr->hydrogen()->object().handle();
2482 AllowDeferredHandleDereference smi_check;
2483 if (isolate()->heap()->InNewSpace(*object)) {
2484 Register temp = ToRegister(instr->temp());
2485 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2486 __ Mov(temp, Operand(Handle<Object>(cell)));
2487 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
2488 __ Cmp(reg, temp);
2489 } else {
2490 __ Cmp(reg, Operand(object));
2491 }
2492 DeoptimizeIf(ne, instr->environment());
2493 }
2494
2495
2496 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
2497 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
2498 ASSERT(instr->HasEnvironment());
2499 LEnvironment* env = instr->environment();
2500 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
2501 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2502 }
2503
2504
2505 void LCodeGen::DoDateField(LDateField* instr) {
2506 Register object = ToRegister(instr->date());
2507 Register result = ToRegister(instr->result());
2508 Register temp1 = x10;
2509 Register temp2 = x11;
2510 Smi* index = instr->index();
2511 Label runtime, done, deopt, obj_ok;
2512
2513 ASSERT(object.is(result) && object.Is(x0));
2514 ASSERT(instr->IsMarkedAsCall());
2515
2516 __ JumpIfSmi(object, &deopt);
2517 __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
2518 __ B(eq, &obj_ok);
2519
2520 __ Bind(&deopt);
2521 Deoptimize(instr->environment());
2522
2523 __ Bind(&obj_ok);
2524 if (index->value() == 0) {
2525 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
2526 } else {
2527 if (index->value() < JSDate::kFirstUncachedField) {
2528 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
2529 __ Mov(temp1, Operand(stamp));
2530 __ Ldr(temp1, MemOperand(temp1));
2531 __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset));
2532 __ Cmp(temp1, temp2);
2533 __ B(ne, &runtime);
2534 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
2535 kPointerSize * index->value()));
2536 __ B(&done);
2537 }
2538
2539 __ Bind(&runtime);
2540 __ Mov(x1, Operand(index));
2541 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
2542 }
2543
2544 __ Bind(&done);
2545 }
2546
2547
2548 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
2549 Deoptimizer::BailoutType type = instr->hydrogen()->type();
2550 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
2551 // needed return address), even though the implementation of LAZY and EAGER is
2552 // now identical. When LAZY is eventually completely folded into EAGER, remove
2553 // the special case below.
2554 if (info()->IsStub() && (type == Deoptimizer::EAGER)) {
2555 type = Deoptimizer::LAZY;
2556 }
2557
2558 Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
2559 Deoptimize(instr->environment(), &type);
2560 }
2561
2562
2563 void LCodeGen::DoDivI(LDivI* instr) {
2564 if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) {
2565 HDiv* hdiv = instr->hydrogen();
2566 Register dividend = ToRegister32(instr->left());
2567 int32_t divisor = hdiv->right()->GetInteger32Constant();
2568 Register result = ToRegister32(instr->result());
2569 ASSERT(!result.is(dividend));
2570
2571 // Check for (0 / -x) that will produce negative zero.
2572 if (hdiv->left()->RangeCanInclude(0) && divisor < 0 &&
2573 hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
2574 __ Cmp(dividend, 0);
2575 DeoptimizeIf(eq, instr->environment());
2576 }
2577 // Check for (kMinInt / -1).
2578 if (hdiv->left()->RangeCanInclude(kMinInt) && divisor == -1 &&
2579 hdiv->CheckFlag(HValue::kCanOverflow)) {
2580 __ Cmp(dividend, kMinInt);
2581 DeoptimizeIf(eq, instr->environment());
2582 }
2583 // Deoptimize if remainder will not be 0.
2584 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
2585 Abs(divisor) != 1) {
2586 __ Tst(dividend, Abs(divisor) - 1);
2587 DeoptimizeIf(ne, instr->environment());
2588 }
2589 if (divisor == -1) { // Nice shortcut, not needed for correctness.
2590 __ Neg(result, dividend);
2591 return;
2592 }
2593 int32_t shift = WhichPowerOf2(Abs(divisor));
2594 if (shift == 0) {
2595 __ Mov(result, dividend);
2596 } else if (shift == 1) {
2597 __ Add(result, dividend, Operand(dividend, LSR, 31));
2598 } else {
2599 __ Mov(result, Operand(dividend, ASR, 31));
2600 __ Add(result, dividend, Operand(result, LSR, 32 - shift));
2601 }
2602 if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
2603 if (divisor < 0) __ Neg(result, result);
2604 return;
2605 }
2606
2607 Register dividend = ToRegister32(instr->left());
2608 Register divisor = ToRegister32(instr->right());
2609 Register result = ToRegister32(instr->result());
2610 HValue* hdiv = instr->hydrogen_value();
2611
2612 // Issue the division first, and then check for any deopt cases whilst the
2613 // result is computed.
2614 __ Sdiv(result, dividend, divisor);
2615
2616 if (hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
2617 ASSERT_EQ(NULL, instr->temp());
2618 return;
2619 }
2620
2621 Label deopt;
2622 // Check for x / 0.
2623 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
2624 __ Cbz(divisor, &deopt);
2625 }
2626
2627 // Check for (0 / -x) as that will produce negative zero.
2628 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
2629 __ Cmp(divisor, 0);
2630
2631 // If the divisor < 0 (mi), compare the dividend, and deopt if it is
2632 // zero, ie. zero dividend with negative divisor deopts.
2633 // If the divisor >= 0 (pl, the opposite of mi) set the flags to
2634 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
2635 __ Ccmp(dividend, 0, NoFlag, mi);
2636 __ B(eq, &deopt);
2637 }
2638
2639 // Check for (kMinInt / -1).
2640 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
2641 // Test dividend for kMinInt by subtracting one (cmp) and checking for
2642 // overflow.
2643 __ Cmp(dividend, 1);
2644 // If overflow is set, ie. dividend = kMinInt, compare the divisor with
2645 // -1. If overflow is clear, set the flags for condition ne, as the
2646 // dividend isn't -1, and thus we shouldn't deopt.
2647 __ Ccmp(divisor, -1, NoFlag, vs);
2648 __ B(eq, &deopt);
2649 }
2650
2651 // Compute remainder and deopt if it's not zero.
2652 Register remainder = ToRegister32(instr->temp());
2653 __ Msub(remainder, result, divisor, dividend);
2654 __ Cbnz(remainder, &deopt);
2655
2656 Label div_ok;
2657 __ B(&div_ok);
2658 __ Bind(&deopt);
2659 Deoptimize(instr->environment());
2660 __ Bind(&div_ok);
2661 }
2662
2663
2664 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
2665 DoubleRegister input = ToDoubleRegister(instr->value());
2666 Register result = ToRegister32(instr->result());
2667 Label done, deopt;
2668
2669 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2670 __ JumpIfMinusZero(input, &deopt);
2671 }
2672
2673 __ TryConvertDoubleToInt32(result, input, double_scratch(), &done);
2674 __ Bind(&deopt);
2675 Deoptimize(instr->environment());
2676 __ Bind(&done);
2677
2678 if (instr->tag_result()) {
2679 __ SmiTag(result.X());
2680 }
2681 }
2682
2683
2684 void LCodeGen::DoDrop(LDrop* instr) {
2685 __ Drop(instr->count());
2686 }
2687
2688
2689 void LCodeGen::DoDummy(LDummy* instr) {
2690 // Nothing to see here, move on!
2691 }
2692
2693
2694 void LCodeGen::DoDummyUse(LDummyUse* instr) {
2695 // Nothing to see here, move on!
2696 }
2697
2698
2699 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
2700 ASSERT(ToRegister(instr->context()).is(cp));
2701 // FunctionLiteral instruction is marked as call, we can trash any register.
2702 ASSERT(instr->IsMarkedAsCall());
2703
2704 // Use the fast case closure allocation code that allocates in new
2705 // space for nested functions that don't need literals cloning.
2706 bool pretenure = instr->hydrogen()->pretenure();
2707 if (!pretenure && instr->hydrogen()->has_no_literals()) {
2708 FastNewClosureStub stub(instr->hydrogen()->language_mode(),
2709 instr->hydrogen()->is_generator());
2710 __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2711 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2712 } else {
2713 __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2714 __ Mov(x1, Operand(pretenure ? factory()->true_value()
2715 : factory()->false_value()));
2716 __ Push(cp, x2, x1);
2717 CallRuntime(Runtime::kNewClosure, 3, instr);
2718 }
2719 }
2720
2721
2722 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
2723 Register map = ToRegister(instr->map());
2724 Register result = ToRegister(instr->result());
2725 Label load_cache, done;
2726
2727 __ EnumLengthUntagged(result, map);
2728 __ Cbnz(result, &load_cache);
2729
2730 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
2731 __ B(&done);
2732
2733 __ Bind(&load_cache);
2734 __ LoadInstanceDescriptors(map, result);
2735 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
2736 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
2737 DeoptimizeIfZero(result, instr->environment());
2738
2739 __ Bind(&done);
2740 }
2741
2742
2743 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
2744 Register object = ToRegister(instr->object());
2745 Register null_value = x5;
2746
2747 ASSERT(instr->IsMarkedAsCall());
2748 ASSERT(object.Is(x0));
2749
2750 Label deopt;
2751
2752 __ JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &deopt);
2753
2754 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
2755 __ Cmp(object, null_value);
2756 __ B(eq, &deopt);
2757
2758 __ JumpIfSmi(object, &deopt);
2759
2760 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
2761 __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
2762 __ B(le, &deopt);
2763
2764 Label use_cache, call_runtime;
2765 __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
2766
2767 __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2768 __ B(&use_cache);
2769
2770 __ Bind(&deopt);
2771 Deoptimize(instr->environment());
2772
2773 // Get the set of properties to enumerate.
2774 __ Bind(&call_runtime);
2775 __ Push(object);
2776 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
2777
2778 __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
2779 __ JumpIfNotRoot(x1, Heap::kMetaMapRootIndex, &deopt);
2780
2781 __ Bind(&use_cache);
2782 }
2783
2784
2785 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2786 Register input = ToRegister(instr->value());
2787 Register result = ToRegister(instr->result());
2788
2789 __ AssertString(input);
2790
2791 // Assert that we can use a W register load to get the hash.
2792 ASSERT((String::kHashShift + String::kArrayIndexValueBits) < kWRegSize);
2793 __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
2794 __ IndexFromHash(result, result);
2795 }
2796
2797
2798 void LCodeGen::EmitGoto(int block) {
2799 // Do not emit jump if we are emitting a goto to the next block.
2800 if (!IsNextEmittedBlock(block)) {
2801 __ B(chunk_->GetAssemblyLabel(LookupDestination(block)));
2802 }
2803 }
2804
2805
2806 void LCodeGen::DoGoto(LGoto* instr) {
2807 EmitGoto(instr->block_id());
2808 }
2809
2810
2811 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2812 LHasCachedArrayIndexAndBranch* instr) {
2813 Register input = ToRegister(instr->value());
2814 Register temp = ToRegister32(instr->temp());
2815
2816 // Assert that the cache status bits fit in a W register.
2817 ASSERT(is_uint32(String::kContainsCachedArrayIndexMask));
2818 __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
2819 __ Tst(temp, String::kContainsCachedArrayIndexMask);
2820 EmitBranch(instr, eq);
2821 }
2822
2823
2824 // HHasInstanceTypeAndBranch instruction is built with an interval of type
2825 // to test but is only used in very restricted ways. The only possible kinds
2826 // of intervals are:
2827 // - [ FIRST_TYPE, instr->to() ]
2828 // - [ instr->form(), LAST_TYPE ]
2829 // - instr->from() == instr->to()
2830 //
2831 // These kinds of intervals can be check with only one compare instruction
2832 // providing the correct value and test condition are used.
2833 //
2834 // TestType() will return the value to use in the compare instruction and
2835 // BranchCondition() will return the condition to use depending on the kind
2836 // of interval actually specified in the instruction.
2837 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2838 InstanceType from = instr->from();
2839 InstanceType to = instr->to();
2840 if (from == FIRST_TYPE) return to;
2841 ASSERT((from == to) || (to == LAST_TYPE));
2842 return from;
2843 }
2844
2845
2846 // See comment above TestType function for what this function does.
2847 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2848 InstanceType from = instr->from();
2849 InstanceType to = instr->to();
2850 if (from == to) return eq;
2851 if (to == LAST_TYPE) return hs;
2852 if (from == FIRST_TYPE) return ls;
2853 UNREACHABLE();
2854 return eq;
2855 }
2856
2857
2858 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2859 Register input = ToRegister(instr->value());
2860 Register scratch = ToRegister(instr->temp());
2861
2862 if (!instr->hydrogen()->value()->IsHeapObject()) {
2863 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2864 }
2865 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2866 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2867 }
2868
2869
2870 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
2871 Register result = ToRegister(instr->result());
2872 Register base = ToRegister(instr->base_object());
2873 if (instr->offset()->IsConstantOperand()) {
2874 __ Add(result, base, ToOperand32I(instr->offset()));
2875 } else {
2876 __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
2877 }
2878 }
2879
2880
2881 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2882 ASSERT(ToRegister(instr->context()).is(cp));
2883 // Assert that the arguments are in the registers expected by InstanceofStub.
2884 ASSERT(ToRegister(instr->left()).Is(InstanceofStub::left()));
2885 ASSERT(ToRegister(instr->right()).Is(InstanceofStub::right()));
2886
2887 InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2888 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2889
2890 // InstanceofStub returns a result in x0:
2891 // 0 => not an instance
2892 // smi 1 => instance.
2893 __ Cmp(x0, 0);
2894 __ LoadTrueFalseRoots(x0, x1);
2895 __ Csel(x0, x0, x1, eq);
2896 }
2897
2898
2899 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2900 class DeferredInstanceOfKnownGlobal: public LDeferredCode {
2901 public:
2902 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2903 LInstanceOfKnownGlobal* instr)
2904 : LDeferredCode(codegen), instr_(instr) { }
2905 virtual void Generate() {
2906 codegen()->DoDeferredInstanceOfKnownGlobal(instr_);
2907 }
2908 virtual LInstruction* instr() { return instr_; }
2909 private:
2910 LInstanceOfKnownGlobal* instr_;
2911 };
2912
2913 DeferredInstanceOfKnownGlobal* deferred =
2914 new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2915
2916 Label map_check, return_false, cache_miss, done;
2917 Register object = ToRegister(instr->value());
2918 Register result = ToRegister(instr->result());
2919 // x4 is expected in the associated deferred code and stub.
2920 Register map_check_site = x4;
2921 Register map = x5;
2922
2923 // This instruction is marked as call. We can clobber any register.
2924 ASSERT(instr->IsMarkedAsCall());
2925
2926 // We must take into account that object is in x11.
2927 ASSERT(object.Is(x11));
2928 Register scratch = x10;
2929
2930 // A Smi is not instance of anything.
2931 __ JumpIfSmi(object, &return_false);
2932
2933 // This is the inlined call site instanceof cache. The two occurences of the
2934 // hole value will be patched to the last map/result pair generated by the
2935 // instanceof stub.
2936 __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2937 {
2938 // Below we use Factory::the_hole_value() on purpose instead of loading from
2939 // the root array to force relocation and later be able to patch with a
2940 // custom value.
2941 InstructionAccurateScope scope(masm(), 5);
2942 __ bind(&map_check);
2943 // Will be patched with the cached map.
2944 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2945 __ LoadRelocated(scratch, Operand(Handle<Object>(cell)));
2946 __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
2947 __ cmp(map, Operand(scratch));
2948 __ b(&cache_miss, ne);
2949 // The address of this instruction is computed relative to the map check
2950 // above, so check the size of the code generated.
2951 ASSERT(masm()->InstructionsGeneratedSince(&map_check) == 4);
2952 // Will be patched with the cached result.
2953 __ LoadRelocated(result, Operand(factory()->the_hole_value()));
2954 }
2955 __ B(&done);
2956
2957 // The inlined call site cache did not match.
2958 // Check null and string before calling the deferred code.
2959 __ Bind(&cache_miss);
2960 // Compute the address of the map check. It must not be clobbered until the
2961 // InstanceOfStub has used it.
2962 __ Adr(map_check_site, &map_check);
2963 // Null is not instance of anything.
2964 __ JumpIfRoot(object, Heap::kNullValueRootIndex, &return_false);
2965
2966 // String values are not instances of anything.
2967 // Return false if the object is a string. Otherwise, jump to the deferred
2968 // code.
2969 // Note that we can't jump directly to deferred code from
2970 // IsObjectJSStringType, because it uses tbz for the jump and the deferred
2971 // code can be out of range.
2972 __ IsObjectJSStringType(object, scratch, NULL, &return_false);
2973 __ B(deferred->entry());
2974
2975 __ Bind(&return_false);
2976 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2977
2978 // Here result is either true or false.
2979 __ Bind(deferred->exit());
2980 __ Bind(&done);
2981 }
2982
2983
2984 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2985 Register result = ToRegister(instr->result());
2986 ASSERT(result.Is(x0)); // InstanceofStub returns its result in x0.
2987 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2988 flags = static_cast<InstanceofStub::Flags>(
2989 flags | InstanceofStub::kArgsInRegisters);
2990 flags = static_cast<InstanceofStub::Flags>(
2991 flags | InstanceofStub::kReturnTrueFalseObject);
2992 flags = static_cast<InstanceofStub::Flags>(
2993 flags | InstanceofStub::kCallSiteInlineCheck);
2994
2995 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2996 LoadContextFromDeferred(instr->context());
2997
2998 // Prepare InstanceofStub arguments.
2999 ASSERT(ToRegister(instr->value()).Is(InstanceofStub::left()));
3000 __ LoadObject(InstanceofStub::right(), instr->function());
3001
3002 InstanceofStub stub(flags);
3003 CallCodeGeneric(stub.GetCode(isolate()),
3004 RelocInfo::CODE_TARGET,
3005 instr,
3006 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
3007 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
3008 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
3009
3010 // Put the result value into the result register slot.
3011 __ StoreToSafepointRegisterSlot(result, result);
3012 }
3013
3014
3015 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
3016 DoGap(instr);
3017 }
3018
3019
3020 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3021 Register value = ToRegister32(instr->value());
3022 DoubleRegister result = ToDoubleRegister(instr->result());
3023 __ Scvtf(result, value);
3024 }
3025
3026
3027 void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
3028 // A64 smis can represent all Integer32 values, so this cannot deoptimize.
3029 ASSERT(!instr->hydrogen()->value()->HasRange() ||
3030 instr->hydrogen()->value()->range()->IsInSmiRange());
3031
3032 Register value = ToRegister32(instr->value());
3033 Register result = ToRegister(instr->result());
3034 __ SmiTag(result, value.X());
3035 }
3036
3037
3038 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3039 ASSERT(ToRegister(instr->context()).is(cp));
3040 // The function is required to be in x1.
3041 ASSERT(ToRegister(instr->function()).is(x1));
3042 ASSERT(instr->HasPointerMap());
3043
3044 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3045 if (known_function.is_null()) {
3046 LPointerMap* pointers = instr->pointer_map();
3047 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3048 ParameterCount count(instr->arity());
3049 __ InvokeFunction(x1, count, CALL_FUNCTION, generator);
3050 } else {
3051 CallKnownFunction(known_function,
3052 instr->hydrogen()->formal_parameter_count(),
3053 instr->arity(),
3054 instr,
3055 x1);
3056 }
3057 }
3058
3059
3060 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
3061 Register temp1 = ToRegister(instr->temp1());
3062 Register temp2 = ToRegister(instr->temp2());
3063
3064 // Get the frame pointer for the calling frame.
3065 __ Ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3066
3067 // Skip the arguments adaptor frame if it exists.
3068 Label check_frame_marker;
3069 __ Ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
3070 __ Cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3071 __ B(ne, &check_frame_marker);
3072 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
3073
3074 // Check the marker in the calling frame.
3075 __ Bind(&check_frame_marker);
3076 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
3077
3078 EmitCompareAndBranch(
3079 instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
3080 }
3081
3082
3083 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
3084 Label* is_object = instr->TrueLabel(chunk_);
3085 Label* is_not_object = instr->FalseLabel(chunk_);
3086 Register value = ToRegister(instr->value());
3087 Register map = ToRegister(instr->temp1());
3088 Register scratch = ToRegister(instr->temp2());
3089
3090 __ JumpIfSmi(value, is_not_object);
3091 __ JumpIfRoot(value, Heap::kNullValueRootIndex, is_object);
3092
3093 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
3094
3095 // Check for undetectable objects.
3096 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
3097 __ TestAndBranchIfAnySet(scratch, 1 << Map::kIsUndetectable, is_not_object);
3098
3099 // Check that instance type is in object type range.
3100 __ IsInstanceJSObjectType(map, scratch, NULL);
3101 // Flags have been updated by IsInstanceJSObjectType. We can now test the
3102 // flags for "le" condition to check if the object's type is a valid
3103 // JS object type.
3104 EmitBranch(instr, le);
3105 }
3106
3107
3108 Condition LCodeGen::EmitIsString(Register input,
3109 Register temp1,
3110 Label* is_not_string,
3111 SmiCheck check_needed = INLINE_SMI_CHECK) {
3112 if (check_needed == INLINE_SMI_CHECK) {
3113 __ JumpIfSmi(input, is_not_string);
3114 }
3115 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
3116
3117 return lt;
3118 }
3119
3120
3121 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
3122 Register val = ToRegister(instr->value());
3123 Register scratch = ToRegister(instr->temp());
3124
3125 SmiCheck check_needed =
3126 instr->hydrogen()->value()->IsHeapObject()
3127 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3128 Condition true_cond =
3129 EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed);
3130
3131 EmitBranch(instr, true_cond);
3132 }
3133
3134
3135 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
3136 Register value = ToRegister(instr->value());
3137 STATIC_ASSERT(kSmiTag == 0);
3138 EmitTestAndBranch(instr, eq, value, kSmiTagMask);
3139 }
3140
3141
3142 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
3143 Register input = ToRegister(instr->value());
3144 Register temp = ToRegister(instr->temp());
3145
3146 if (!instr->hydrogen()->value()->IsHeapObject()) {
3147 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
3148 }
3149 __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
3150 __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
3151
3152 EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable);
3153 }
3154
3155
3156 static const char* LabelType(LLabel* label) {
3157 if (label->is_loop_header()) return " (loop header)";
3158 if (label->is_osr_entry()) return " (OSR entry)";
3159 return "";
3160 }
3161
3162
3163 void LCodeGen::DoLabel(LLabel* label) {
3164 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
3165 current_instruction_,
3166 label->hydrogen_value()->id(),
3167 label->block_id(),
3168 LabelType(label));
3169
3170 __ Bind(label->label());
3171 current_block_ = label->block_id();
3172 DoGap(label);
3173 }
3174
3175
3176 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3177 Register context = ToRegister(instr->context());
3178 Register result = ToRegister(instr->result());
3179 __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
3180 if (instr->hydrogen()->RequiresHoleCheck()) {
3181 if (instr->hydrogen()->DeoptimizesOnHole()) {
3182 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
3183 instr->environment());
3184 } else {
3185 Label not_the_hole;
3186 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
3187 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3188 __ Bind(&not_the_hole);
3189 }
3190 }
3191 }
3192
3193
3194 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3195 Register function = ToRegister(instr->function());
3196 Register result = ToRegister(instr->result());
3197 Register temp = ToRegister(instr->temp());
3198 Label deopt;
3199
3200 // Check that the function really is a function. Leaves map in the result
3201 // register.
3202 __ JumpIfNotObjectType(function, result, temp, JS_FUNCTION_TYPE, &deopt);
3203
3204 // Make sure that the function has an instance prototype.
3205 Label non_instance;
3206 __ Ldrb(temp, FieldMemOperand(result, Map::kBitFieldOffset));
3207 __ Tbnz(temp, Map::kHasNonInstancePrototype, &non_instance);
3208
3209 // Get the prototype or initial map from the function.
3210 __ Ldr(result, FieldMemOperand(function,
3211 JSFunction::kPrototypeOrInitialMapOffset));
3212
3213 // Check that the function has a prototype or an initial map.
3214 __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &deopt);
3215
3216 // If the function does not have an initial map, we're done.
3217 Label done;
3218 __ CompareObjectType(result, temp, temp, MAP_TYPE);
3219 __ B(ne, &done);
3220
3221 // Get the prototype from the initial map.
3222 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3223 __ B(&done);
3224
3225 // Non-instance prototype: fetch prototype from constructor field in initial
3226 // map.
3227 __ Bind(&non_instance);
3228 __ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
3229 __ B(&done);
3230
3231 // Deoptimize case.
3232 __ Bind(&deopt);
3233 Deoptimize(instr->environment());
3234
3235 // All done.
3236 __ Bind(&done);
3237 }
3238
3239
3240 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
3241 Register result = ToRegister(instr->result());
3242 __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
3243 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
3244 if (instr->hydrogen()->RequiresHoleCheck()) {
3245 DeoptimizeIfRoot(
3246 result, Heap::kTheHoleValueRootIndex, instr->environment());
3247 }
3248 }
3249
3250
3251 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3252 ASSERT(ToRegister(instr->context()).is(cp));
3253 ASSERT(ToRegister(instr->global_object()).Is(x0));
3254 ASSERT(ToRegister(instr->result()).Is(x0));
3255 __ Mov(x2, Operand(instr->name()));
3256 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
3257 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
3258 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3259 }
3260
3261
3262 MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
3263 Register key,
3264 Register base,
3265 Register scratch,
3266 bool key_is_smi,
3267 bool key_is_constant,
3268 int constant_key,
3269 ElementsKind elements_kind,
3270 int additional_index) {
3271 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3272 int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
3273 ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
3274 : 0;
3275
3276 if (key_is_constant) {
3277 int base_offset = ((constant_key + additional_index) << element_size_shift);
3278 return MemOperand(base, base_offset + additional_offset);
3279 }
3280
3281 if (additional_index == 0) {
3282 if (key_is_smi) {
3283 // Key is smi: untag, and scale by element size.
3284 __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
3285 return MemOperand(scratch, additional_offset);
3286 } else {
3287 // Key is not smi, and element size is not byte: scale by element size.
3288 if (additional_offset == 0) {
3289 return MemOperand(base, key, SXTW, element_size_shift);
3290 } else {
3291 __ Add(scratch, base, Operand(key, SXTW, element_size_shift));
3292 return MemOperand(scratch, additional_offset);
3293 }
3294 }
3295 } else {
3296 // TODO(all): Try to combine these cases a bit more intelligently.
3297 if (additional_offset == 0) {
3298 if (key_is_smi) {
3299 __ SmiUntag(scratch, key);
3300 __ Add(scratch.W(), scratch.W(), additional_index);
3301 } else {
3302 __ Add(scratch.W(), key.W(), additional_index);
3303 }
3304 return MemOperand(base, scratch, LSL, element_size_shift);
3305 } else {
3306 if (key_is_smi) {
3307 __ Add(scratch, base,
3308 Operand::UntagSmiAndScale(key, element_size_shift));
3309 } else {
3310 __ Add(scratch, base, Operand(key, SXTW, element_size_shift));
3311 }
3312 return MemOperand(
3313 scratch,
3314 (additional_index << element_size_shift) + additional_offset);
3315 }
3316 }
3317 }
3318
3319
3320 void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
3321 Register ext_ptr = ToRegister(instr->elements());
3322 Register scratch;
3323 ElementsKind elements_kind = instr->elements_kind();
3324
3325 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3326 bool key_is_constant = instr->key()->IsConstantOperand();
3327 Register key = no_reg;
3328 int constant_key = 0;
3329 if (key_is_constant) {
3330 ASSERT(instr->temp() == NULL);
3331 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3332 if (constant_key & 0xf0000000) {
3333 Abort(kArrayIndexConstantValueTooBig);
3334 }
3335 } else {
3336 scratch = ToRegister(instr->temp());
3337 key = ToRegister(instr->key());
3338 }
3339
3340 MemOperand mem_op =
3341 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
3342 key_is_constant, constant_key,
3343 elements_kind,
3344 instr->additional_index());
3345
3346 if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
3347 (elements_kind == FLOAT32_ELEMENTS)) {
3348 DoubleRegister result = ToDoubleRegister(instr->result());
3349 __ Ldr(result.S(), mem_op);
3350 __ Fcvt(result, result.S());
3351 } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
3352 (elements_kind == FLOAT64_ELEMENTS)) {
3353 DoubleRegister result = ToDoubleRegister(instr->result());
3354 __ Ldr(result, mem_op);
3355 } else {
3356 Register result = ToRegister(instr->result());
3357
3358 switch (elements_kind) {
3359 case EXTERNAL_INT8_ELEMENTS:
3360 case INT8_ELEMENTS:
3361 __ Ldrsb(result, mem_op);
3362 break;
3363 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3364 case EXTERNAL_UINT8_ELEMENTS:
3365 case UINT8_ELEMENTS:
3366 case UINT8_CLAMPED_ELEMENTS:
3367 __ Ldrb(result, mem_op);
3368 break;
3369 case EXTERNAL_INT16_ELEMENTS:
3370 case INT16_ELEMENTS:
3371 __ Ldrsh(result, mem_op);
3372 break;
3373 case EXTERNAL_UINT16_ELEMENTS:
3374 case UINT16_ELEMENTS:
3375 __ Ldrh(result, mem_op);
3376 break;
3377 case EXTERNAL_INT32_ELEMENTS:
3378 case INT32_ELEMENTS:
3379 __ Ldrsw(result, mem_op);
3380 break;
3381 case EXTERNAL_UINT32_ELEMENTS:
3382 case UINT32_ELEMENTS:
3383 __ Ldr(result.W(), mem_op);
3384 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3385 // Deopt if value > 0x80000000.
3386 __ Tst(result, 0xFFFFFFFF80000000);
3387 DeoptimizeIf(ne, instr->environment());
3388 }
3389 break;
3390 case FLOAT32_ELEMENTS:
3391 case FLOAT64_ELEMENTS:
3392 case EXTERNAL_FLOAT32_ELEMENTS:
3393 case EXTERNAL_FLOAT64_ELEMENTS:
3394 case FAST_HOLEY_DOUBLE_ELEMENTS:
3395 case FAST_HOLEY_ELEMENTS:
3396 case FAST_HOLEY_SMI_ELEMENTS:
3397 case FAST_DOUBLE_ELEMENTS:
3398 case FAST_ELEMENTS:
3399 case FAST_SMI_ELEMENTS:
3400 case DICTIONARY_ELEMENTS:
3401 case NON_STRICT_ARGUMENTS_ELEMENTS:
3402 UNREACHABLE();
3403 break;
3404 }
3405 }
3406 }
3407
3408
3409 void LCodeGen::CalcKeyedArrayBaseRegister(Register base,
3410 Register elements,
3411 Register key,
3412 bool key_is_tagged,
3413 ElementsKind elements_kind) {
3414 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3415
3416 // Even though the HLoad/StoreKeyed instructions force the input
3417 // representation for the key to be an integer, the input gets replaced during
3418 // bounds check elimination with the index argument to the bounds check, which
3419 // can be tagged, so that case must be handled here, too.
3420 if (key_is_tagged) {
3421 __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
3422 } else {
3423 // Sign extend key because it could be a 32-bit negative value or contain
3424 // garbage in the top 32-bits. The address computation happens in 64-bit.
3425 ASSERT((element_size_shift >= 0) && (element_size_shift <= 4));
3426 __ Add(base, elements, Operand(key, SXTW, element_size_shift));
3427 }
3428 }
3429
3430
3431 void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
3432 Register elements = ToRegister(instr->elements());
3433 DoubleRegister result = ToDoubleRegister(instr->result());
3434 Register load_base;
3435 int offset = 0;
3436
3437 if (instr->key()->IsConstantOperand()) {
3438 ASSERT(instr->hydrogen()->RequiresHoleCheck() ||
3439 (instr->temp() == NULL));
3440
3441 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3442 if (constant_key & 0xf0000000) {
3443 Abort(kArrayIndexConstantValueTooBig);
3444 }
3445 offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
3446 instr->additional_index());
3447 load_base = elements;
3448 } else {
3449 load_base = ToRegister(instr->temp());
3450 Register key = ToRegister(instr->key());
3451 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3452 CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
3453 instr->hydrogen()->elements_kind());
3454 offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
3455 }
3456 __ Ldr(result, FieldMemOperand(load_base, offset));
3457
3458 if (instr->hydrogen()->RequiresHoleCheck()) {
3459 Register scratch = ToRegister(instr->temp());
3460
3461 // TODO(all): Is it faster to reload this value to an integer register, or
3462 // move from fp to integer?
3463 __ Fmov(scratch, result);
3464 __ Cmp(scratch, kHoleNanInt64);
3465 DeoptimizeIf(eq, instr->environment());
3466 }
3467 }
3468
3469
3470 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
3471 Register elements = ToRegister(instr->elements());
3472 Register result = ToRegister(instr->result());
3473 Register load_base;
3474 int offset = 0;
3475
3476 if (instr->key()->IsConstantOperand()) {
3477 ASSERT(instr->temp() == NULL);
3478 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3479 offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
3480 instr->additional_index());
3481 load_base = elements;
3482 } else {
3483 load_base = ToRegister(instr->temp());
3484 Register key = ToRegister(instr->key());
3485 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
3486 CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
3487 instr->hydrogen()->elements_kind());
3488 offset = FixedArray::OffsetOfElementAt(instr->additional_index());
3489 }
3490 Representation representation = instr->hydrogen()->representation();
3491
3492 if (representation.IsInteger32() &&
3493 instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS) {
3494 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
3495 __ Load(result, UntagSmiFieldMemOperand(load_base, offset),
3496 Representation::Integer32());
3497 } else {
3498 __ Load(result, FieldMemOperand(load_base, offset),
3499 representation);
3500 }
3501
3502 if (instr->hydrogen()->RequiresHoleCheck()) {
3503 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3504 DeoptimizeIfNotSmi(result, instr->environment());
3505 } else {
3506 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
3507 instr->environment());
3508 }
3509 }
3510 }
3511
3512
3513 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3514 ASSERT(ToRegister(instr->context()).is(cp));
3515 ASSERT(ToRegister(instr->object()).Is(x1));
3516 ASSERT(ToRegister(instr->key()).Is(x0));
3517
3518 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3519 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3520
3521 ASSERT(ToRegister(instr->result()).Is(x0));
3522 }
3523
3524
3525 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3526 HObjectAccess access = instr->hydrogen()->access();
3527 int offset = access.offset();
3528 Register object = ToRegister(instr->object());
3529
3530 if (access.IsExternalMemory()) {
3531 Register result = ToRegister(instr->result());
3532 __ Load(result, MemOperand(object, offset), access.representation());
3533 return;
3534 }
3535
3536 if (instr->hydrogen()->representation().IsDouble()) {
3537 FPRegister result = ToDoubleRegister(instr->result());
3538 __ Ldr(result, FieldMemOperand(object, offset));
3539 return;
3540 }
3541
3542 Register result = ToRegister(instr->result());
3543 Register source;
3544 if (access.IsInobject()) {
3545 source = object;
3546 } else {
3547 // Load the properties array, using result as a scratch register.
3548 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3549 source = result;
3550 }
3551
3552 if (access.representation().IsSmi() &&
3553 instr->hydrogen()->representation().IsInteger32()) {
3554 // Read int value directly from upper half of the smi.
3555 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
3556 __ Load(result, UntagSmiFieldMemOperand(source, offset),
3557 Representation::Integer32());
3558 } else {
3559 __ Load(result, FieldMemOperand(source, offset), access.representation());
3560 }
3561 }
3562
3563
3564 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3565 ASSERT(ToRegister(instr->context()).is(cp));
3566 // LoadIC expects x2 to hold the name, and x0 to hold the receiver.
3567 ASSERT(ToRegister(instr->object()).is(x0));
3568 __ Mov(x2, Operand(instr->name()));
3569
3570 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
3571 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3572
3573 ASSERT(ToRegister(instr->result()).is(x0));
3574 }
3575
3576
3577 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3578 Register result = ToRegister(instr->result());
3579 __ LoadRoot(result, instr->index());
3580 }
3581
3582
3583 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
3584 Register result = ToRegister(instr->result());
3585 Register map = ToRegister(instr->value());
3586 __ EnumLengthSmi(result, map);
3587 }
3588
3589
3590 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3591 Representation r = instr->hydrogen()->value()->representation();
3592 if (r.IsDouble()) {
3593 DoubleRegister input = ToDoubleRegister(instr->value());
3594 DoubleRegister result = ToDoubleRegister(instr->result());
3595 __ Fabs(result, input);
3596 } else if (r.IsSmi() || r.IsInteger32()) {
3597 Register input = r.IsSmi() ? ToRegister(instr->value())
3598 : ToRegister32(instr->value());
3599 Register result = r.IsSmi() ? ToRegister(instr->result())
3600 : ToRegister32(instr->result());
3601 Label done;
3602 __ Abs(result, input, NULL, &done);
3603 Deoptimize(instr->environment());
3604 __ Bind(&done);
3605 }
3606 }
3607
3608
3609 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
3610 Label* exit,
3611 Label* allocation_entry) {
3612 // Handle the tricky cases of MathAbsTagged:
3613 // - HeapNumber inputs.
3614 // - Negative inputs produce a positive result, so a new HeapNumber is
3615 // allocated to hold it.
3616 // - Positive inputs are returned as-is, since there is no need to allocate
3617 // a new HeapNumber for the result.
3618 // - The (smi) input -0x80000000, produces +0x80000000, which does not fit
3619 // a smi. In this case, the inline code sets the result and jumps directly
3620 // to the allocation_entry label.
3621 ASSERT(instr->context() != NULL);
3622 ASSERT(ToRegister(instr->context()).is(cp));
3623 Register input = ToRegister(instr->value());
3624 Register temp1 = ToRegister(instr->temp1());
3625 Register temp2 = ToRegister(instr->temp2());
3626 Register result_bits = ToRegister(instr->temp3());
3627 Register result = ToRegister(instr->result());
3628
3629 Label runtime_allocation;
3630
3631 // Deoptimize if the input is not a HeapNumber.
3632 __ Ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
3633 DeoptimizeIfNotRoot(temp1, Heap::kHeapNumberMapRootIndex,
3634 instr->environment());
3635
3636 // If the argument is positive, we can return it as-is, without any need to
3637 // allocate a new HeapNumber for the result. We have to do this in integer
3638 // registers (rather than with fabs) because we need to be able to distinguish
3639 // the two zeroes.
3640 __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset));
3641 __ Mov(result, input);
3642 __ Tbz(result_bits, kXSignBit, exit);
3643
3644 // Calculate abs(input) by clearing the sign bit.
3645 __ Bic(result_bits, result_bits, kXSignMask);
3646
3647 // Allocate a new HeapNumber to hold the result.
3648 // result_bits The bit representation of the (double) result.
3649 __ Bind(allocation_entry);
3650 __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2);
3651 // The inline (non-deferred) code will store result_bits into result.
3652 __ B(exit);
3653
3654 __ Bind(&runtime_allocation);
3655 if (FLAG_debug_code) {
3656 // Because result is in the pointer map, we need to make sure it has a valid
3657 // tagged value before we call the runtime. We speculatively set it to the
3658 // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already
3659 // be valid.
3660 Label result_ok;
3661 Register input = ToRegister(instr->value());
3662 __ JumpIfSmi(result, &result_ok);
3663 __ Cmp(input, result);
3664 // TODO(all): Shouldn't we assert here?
3665 DeoptimizeIf(ne, instr->environment());
3666 __ Bind(&result_ok);
3667 }
3668
3669 { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3670 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3671 instr->context());
3672 __ StoreToSafepointRegisterSlot(x0, result);
3673 }
3674 // The inline (non-deferred) code will store result_bits into result.
3675 }
3676
3677
3678 void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
3679 // Class for deferred case.
3680 class DeferredMathAbsTagged: public LDeferredCode {
3681 public:
3682 DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr)
3683 : LDeferredCode(codegen), instr_(instr) { }
3684 virtual void Generate() {
3685 codegen()->DoDeferredMathAbsTagged(instr_, exit(),
3686 allocation_entry());
3687 }
3688 virtual LInstruction* instr() { return instr_; }
3689 Label* allocation_entry() { return &allocation; }
3690 private:
3691 LMathAbsTagged* instr_;
3692 Label allocation;
3693 };
3694
3695 // TODO(jbramley): The early-exit mechanism would skip the new frame handling
3696 // in GenerateDeferredCode. Tidy this up.
3697 ASSERT(!NeedsDeferredFrame());
3698
3699 DeferredMathAbsTagged* deferred =
3700 new(zone()) DeferredMathAbsTagged(this, instr);
3701
3702 ASSERT(instr->hydrogen()->value()->representation().IsTagged() ||
3703 instr->hydrogen()->value()->representation().IsSmi());
3704 Register input = ToRegister(instr->value());
3705 Register result_bits = ToRegister(instr->temp3());
3706 Register result = ToRegister(instr->result());
3707 Label done;
3708
3709 // Handle smis inline.
3710 // We can treat smis as 64-bit integers, since the (low-order) tag bits will
3711 // never get set by the negation. This is therefore the same as the Integer32
3712 // case in DoMathAbs, except that it operates on 64-bit values.
3713 STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
3714
3715 // TODO(jbramley): We can't use JumpIfNotSmi here because the tbz it uses
3716 // doesn't always have enough range. Consider making a variant of it, or a
3717 // TestIsSmi helper.
3718 STATIC_ASSERT(kSmiTag == 0);
3719 __ Tst(input, kSmiTagMask);
3720 __ B(ne, deferred->entry());
3721
3722 __ Abs(result, input, NULL, &done);
3723
3724 // The result is the magnitude (abs) of the smallest value a smi can
3725 // represent, encoded as a double.
3726 __ Mov(result_bits, double_to_rawbits(0x80000000));
3727 __ B(deferred->allocation_entry());
3728
3729 __ Bind(deferred->exit());
3730 __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset));
3731
3732 __ Bind(&done);
3733 }
3734
3735
3736 void LCodeGen::DoMathExp(LMathExp* instr) {
3737 DoubleRegister input = ToDoubleRegister(instr->value());
3738 DoubleRegister result = ToDoubleRegister(instr->result());
3739 DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
3740 DoubleRegister double_temp2 = double_scratch();
3741 Register temp1 = ToRegister(instr->temp1());
3742 Register temp2 = ToRegister(instr->temp2());
3743 Register temp3 = ToRegister(instr->temp3());
3744
3745 MathExpGenerator::EmitMathExp(masm(), input, result,
3746 double_temp1, double_temp2,
3747 temp1, temp2, temp3);
3748 }
3749
3750
3751 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3752 // TODO(jbramley): If we could provide a double result, we could use frintm
3753 // and produce a valid double result in a single instruction.
3754 DoubleRegister input = ToDoubleRegister(instr->value());
3755 Register result = ToRegister(instr->result());
3756 Label deopt;
3757 Label done;
3758
3759 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3760 __ JumpIfMinusZero(input, &deopt);
3761 }
3762
3763 __ Fcvtms(result, input);
3764
3765 // Check that the result fits into a 32-bit integer.
3766 // - The result did not overflow.
3767 __ Cmp(result, Operand(result, SXTW));
3768 // - The input was not NaN.
3769 __ Fccmp(input, input, NoFlag, eq);
3770 __ B(&done, eq);
3771
3772 __ Bind(&deopt);
3773 Deoptimize(instr->environment());
3774
3775 __ Bind(&done);
3776 }
3777
3778
3779 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
3780 Register result = ToRegister32(instr->result());
3781 Register left = ToRegister32(instr->left());
3782 Register right = ToRegister32(instr->right());
3783 Register remainder = ToRegister32(instr->temp());
3784
3785 // This can't cause an exception on ARM, so we can speculatively
3786 // execute it already now.
3787 __ Sdiv(result, left, right);
3788
3789 // Check for x / 0.
3790 DeoptimizeIfZero(right, instr->environment());
3791
3792 // Check for (kMinInt / -1).
3793 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
3794 // The V flag will be set iff left == kMinInt.
3795 __ Cmp(left, 1);
3796 __ Ccmp(right, -1, NoFlag, vs);
3797 DeoptimizeIf(eq, instr->environment());
3798 }
3799
3800 // Check for (0 / -x) that will produce negative zero.
3801 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3802 __ Cmp(right, 0);
3803 __ Ccmp(left, 0, ZFlag, mi);
3804 // "right" can't be null because the code would have already been
3805 // deoptimized. The Z flag is set only if (right < 0) and (left == 0).
3806 // In this case we need to deoptimize to produce a -0.
3807 DeoptimizeIf(eq, instr->environment());
3808 }
3809
3810 Label done;
3811 // If both operands have the same sign then we are done.
3812 __ Eor(remainder, left, right);
3813 __ Tbz(remainder, kWSignBit, &done);
3814
3815 // Check if the result needs to be corrected.
3816 __ Msub(remainder, result, right, left);
3817 __ Cbz(remainder, &done);
3818 __ Sub(result, result, 1);
3819
3820 __ Bind(&done);
3821 }
3822
3823
3824 void LCodeGen::DoMathLog(LMathLog* instr) {
3825 ASSERT(instr->IsMarkedAsCall());
3826 ASSERT(ToDoubleRegister(instr->value()).is(d0));
3827 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3828 0, 1);
3829 ASSERT(ToDoubleRegister(instr->result()).Is(d0));
3830 }
3831
3832
3833 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3834 Register input = ToRegister32(instr->value());
3835 Register result = ToRegister32(instr->result());
3836 __ Clz(result, input);
3837 }
3838
3839
3840 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3841 DoubleRegister input = ToDoubleRegister(instr->value());
3842 DoubleRegister result = ToDoubleRegister(instr->result());
3843 Label done;
3844
3845 // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases:
3846 // Math.pow(-Infinity, 0.5) == +Infinity
3847 // Math.pow(-0.0, 0.5) == +0.0
3848
3849 // Catch -infinity inputs first.
3850 // TODO(jbramley): A constant infinity register would be helpful here.
3851 __ Fmov(double_scratch(), kFP64NegativeInfinity);
3852 __ Fcmp(double_scratch(), input);
3853 __ Fabs(result, input);
3854 __ B(&done, eq);
3855
3856 // Add +0.0 to convert -0.0 to +0.0.
3857 // TODO(jbramley): A constant zero register would be helpful here.
3858 __ Fmov(double_scratch(), 0.0);
3859 __ Fadd(double_scratch(), input, double_scratch());
3860 __ Fsqrt(result, double_scratch());
3861
3862 __ Bind(&done);
3863 }
3864
3865
3866 void LCodeGen::DoPower(LPower* instr) {
3867 Representation exponent_type = instr->hydrogen()->right()->representation();
3868 // Having marked this as a call, we can use any registers.
3869 // Just make sure that the input/output registers are the expected ones.
3870 ASSERT(!instr->right()->IsDoubleRegister() ||
3871 ToDoubleRegister(instr->right()).is(d1));
3872 ASSERT(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
3873 ToRegister(instr->right()).is(x11));
3874 ASSERT(!exponent_type.IsInteger32() || ToRegister(instr->right()).is(x12));
3875 ASSERT(ToDoubleRegister(instr->left()).is(d0));
3876 ASSERT(ToDoubleRegister(instr->result()).is(d0));
3877
3878 if (exponent_type.IsSmi()) {
3879 MathPowStub stub(MathPowStub::TAGGED);
3880 __ CallStub(&stub);
3881 } else if (exponent_type.IsTagged()) {
3882 Label no_deopt;
3883 __ JumpIfSmi(x11, &no_deopt);
3884 __ Ldr(x0, FieldMemOperand(x11, HeapObject::kMapOffset));
3885 DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex,
3886 instr->environment());
3887 __ Bind(&no_deopt);
3888 MathPowStub stub(MathPowStub::TAGGED);
3889 __ CallStub(&stub);
3890 } else if (exponent_type.IsInteger32()) {
3891 // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
3892 // supports large integer exponents.
3893 Register exponent = ToRegister(instr->right());
3894 __ Sxtw(exponent, exponent);
3895 MathPowStub stub(MathPowStub::INTEGER);
3896 __ CallStub(&stub);
3897 } else {
3898 ASSERT(exponent_type.IsDouble());
3899 MathPowStub stub(MathPowStub::DOUBLE);
3900 __ CallStub(&stub);
3901 }
3902 }
3903
3904
3905 void LCodeGen::DoMathRound(LMathRound* instr) {
3906 // TODO(jbramley): We could provide a double result here using frint.
3907 DoubleRegister input = ToDoubleRegister(instr->value());
3908 DoubleRegister temp1 = ToDoubleRegister(instr->temp1());
3909 Register result = ToRegister(instr->result());
3910 Label try_rounding;
3911 Label deopt;
3912 Label done;
3913
3914 // Math.round() rounds to the nearest integer, with ties going towards
3915 // +infinity. This does not match any IEEE-754 rounding mode.
3916 // - Infinities and NaNs are propagated unchanged, but cause deopts because
3917 // they can't be represented as integers.
3918 // - The sign of the result is the same as the sign of the input. This means
3919 // that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a
3920 // result of -0.0.
3921
3922 DoubleRegister dot_five = double_scratch();
3923 __ Fmov(dot_five, 0.5);
3924 __ Fabs(temp1, input);
3925 __ Fcmp(temp1, dot_five);
3926 // If input is in [-0.5, -0], the result is -0.
3927 // If input is in [+0, +0.5[, the result is +0.
3928 // If the input is +0.5, the result is 1.
3929 __ B(hi, &try_rounding); // hi so NaN will also branch.
3930
3931 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3932 __ Fmov(result, input);
3933 __ Cmp(result, 0);
3934 DeoptimizeIf(mi, instr->environment()); // [-0.5, -0.0].
3935 }
3936 __ Fcmp(input, dot_five);
3937 __ Mov(result, 1); // +0.5.
3938 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3939 // flag kBailoutOnMinusZero, will return 0 (xzr).
3940 __ Csel(result, result, xzr, eq);
3941 __ B(&done);
3942
3943 __ Bind(&deopt);
3944 Deoptimize(instr->environment());
3945
3946 __ Bind(&try_rounding);
3947 // Since we're providing a 32-bit result, we can implement ties-to-infinity by
3948 // adding 0.5 to the input, then taking the floor of the result. This does not
3949 // work for very large positive doubles because adding 0.5 would cause an
3950 // intermediate rounding stage, so a different approach will be necessary if a
3951 // double result is needed.
3952 __ Fadd(temp1, input, dot_five);
3953 __ Fcvtms(result, temp1);
3954
3955 // Deopt if
3956 // * the input was NaN
3957 // * the result is not representable using a 32-bit integer.
3958 __ Fcmp(input, 0.0);
3959 __ Ccmp(result, Operand(result.W(), SXTW), NoFlag, vc);
3960 __ B(ne, &deopt);
3961
3962 __ Bind(&done);
3963 }
3964
3965
3966 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3967 DoubleRegister input = ToDoubleRegister(instr->value());
3968 DoubleRegister result = ToDoubleRegister(instr->result());
3969 __ Fsqrt(result, input);
3970 }
3971
3972
3973 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
3974 HMathMinMax::Operation op = instr->hydrogen()->operation();
3975 if (instr->hydrogen()->representation().IsInteger32()) {
3976 Register result = ToRegister32(instr->result());
3977 Register left = ToRegister32(instr->left());
3978 Operand right = ToOperand32I(instr->right());
3979
3980 __ Cmp(left, right);
3981 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
3982 } else if (instr->hydrogen()->representation().IsSmi()) {
3983 Register result = ToRegister(instr->result());
3984 Register left = ToRegister(instr->left());
3985 Operand right = ToOperand(instr->right());
3986
3987 __ Cmp(left, right);
3988 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
3989 } else {
3990 ASSERT(instr->hydrogen()->representation().IsDouble());
3991 DoubleRegister result = ToDoubleRegister(instr->result());
3992 DoubleRegister left = ToDoubleRegister(instr->left());
3993 DoubleRegister right = ToDoubleRegister(instr->right());
3994
3995 if (op == HMathMinMax::kMathMax) {
3996 __ Fmax(result, left, right);
3997 } else {
3998 ASSERT(op == HMathMinMax::kMathMin);
3999 __ Fmin(result, left, right);
4000 }
4001 }
4002 }
4003
4004
4005 void LCodeGen::DoModI(LModI* instr) {
4006 HMod* hmod = instr->hydrogen();
4007 HValue* hleft = hmod->left();
4008 HValue* hright = hmod->right();
4009
4010 Label done;
4011 Register result = ToRegister32(instr->result());
4012 Register dividend = ToRegister32(instr->left());
4013
4014 bool need_minus_zero_check = (hmod->CheckFlag(HValue::kBailoutOnMinusZero) &&
4015 hleft->CanBeNegative() && hmod->CanBeZero());
4016
4017 if (hmod->RightIsPowerOf2()) {
4018 // Note: The code below even works when right contains kMinInt.
4019 int32_t divisor = Abs(hright->GetInteger32Constant());
4020
4021 if (hleft->CanBeNegative()) {
4022 __ Cmp(dividend, 0);
4023 __ Cneg(result, dividend, mi);
4024 __ And(result, result, divisor - 1);
4025 __ Cneg(result, result, mi);
4026 if (need_minus_zero_check) {
4027 __ Cbnz(result, &done);
4028 // The result is 0. Deoptimize if the dividend was negative.
4029 DeoptimizeIf(mi, instr->environment());
4030 }
4031 } else {
4032 __ And(result, dividend, divisor - 1);
4033 }
4034
4035 } else {
4036 Label deopt;
4037 Register divisor = ToRegister32(instr->right());
4038 // Compute:
4039 // modulo = dividend - quotient * divisor
4040 __ Sdiv(result, dividend, divisor);
4041 if (hright->CanBeZero()) {
4042 // Combine the deoptimization sites.
4043 Label ok;
4044 __ Cbnz(divisor, &ok);
4045 __ Bind(&deopt);
4046 Deoptimize(instr->environment());
4047 __ Bind(&ok);
4048 }
4049 __ Msub(result, result, divisor, dividend);
4050 if (need_minus_zero_check) {
4051 __ Cbnz(result, &done);
4052 if (deopt.is_bound()) {
4053 __ Tbnz(dividend, kWSignBit, &deopt);
4054 } else {
4055 DeoptimizeIfNegative(dividend, instr->environment());
4056 }
4057 }
4058 }
4059 __ Bind(&done);
4060 }
4061
4062
4063 void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
4064 ASSERT(instr->hydrogen()->representation().IsSmiOrInteger32());
4065 bool is_smi = instr->hydrogen()->representation().IsSmi();
4066 Register result =
4067 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
4068 Register left =
4069 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
4070 int32_t right = ToInteger32(instr->right());
4071
4072 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4073 bool bailout_on_minus_zero =
4074 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4075
4076 if (bailout_on_minus_zero) {
4077 if (right < 0) {
4078 // The result is -0 if right is negative and left is zero.
4079 DeoptimizeIfZero(left, instr->environment());
4080 } else if (right == 0) {
4081 // The result is -0 if the right is zero and the left is negative.
4082 DeoptimizeIfNegative(left, instr->environment());
4083 }
4084 }
4085
4086 switch (right) {
4087 // Cases which can detect overflow.
4088 case -1:
4089 if (can_overflow) {
4090 // Only 0x80000000 can overflow here.
4091 __ Negs(result, left);
4092 DeoptimizeIf(vs, instr->environment());
4093 } else {
4094 __ Neg(result, left);
4095 }
4096 break;
4097 case 0:
4098 // This case can never overflow.
4099 __ Mov(result, 0);
4100 break;
4101 case 1:
4102 // This case can never overflow.
4103 __ Mov(result, left, kDiscardForSameWReg);
4104 break;
4105 case 2:
4106 if (can_overflow) {
4107 __ Adds(result, left, left);
4108 DeoptimizeIf(vs, instr->environment());
4109 } else {
4110 __ Add(result, left, left);
4111 }
4112 break;
4113
4114 // All other cases cannot detect overflow, because it would probably be no
4115 // faster than using the smull method in LMulI.
4116 // TODO(jbramley): Investigate this, and add overflow support if it would
4117 // be useful.
4118 default:
4119 ASSERT(!can_overflow);
4120
4121 // Multiplication by constant powers of two (and some related values)
4122 // can be done efficiently with shifted operands.
4123 if (right >= 0) {
4124 if (IsPowerOf2(right)) {
4125 // result = left << log2(right)
4126 __ Lsl(result, left, WhichPowerOf2(right));
4127 } else if (IsPowerOf2(right - 1)) {
4128 // result = left + left << log2(right - 1)
4129 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
4130 } else if (IsPowerOf2(right + 1)) {
4131 // result = -left + left << log2(right + 1)
4132 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
4133 __ Neg(result, result);
4134 } else {
4135 UNREACHABLE();
4136 }
4137 } else {
4138 if (IsPowerOf2(-right)) {
4139 // result = -left << log2(-right)
4140 __ Neg(result, Operand(left, LSL, WhichPowerOf2(-right)));
4141 } else if (IsPowerOf2(-right + 1)) {
4142 // result = left - left << log2(-right + 1)
4143 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
4144 } else if (IsPowerOf2(-right - 1)) {
4145 // result = -left - left << log2(-right - 1)
4146 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
4147 __ Neg(result, result);
4148 } else {
4149 UNREACHABLE();
4150 }
4151 }
4152 break;
4153 }
4154 }
4155
4156
4157 void LCodeGen::DoMulI(LMulI* instr) {
4158 Register result = ToRegister32(instr->result());
4159 Register left = ToRegister32(instr->left());
4160 Register right = ToRegister32(instr->right());
4161
4162 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4163 bool bailout_on_minus_zero =
4164 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4165
4166 if (bailout_on_minus_zero) {
4167 // If one operand is zero and the other is negative, the result is -0.
4168 // - Set Z (eq) if either left or right, or both, are 0.
4169 __ Cmp(left, 0);
4170 __ Ccmp(right, 0, ZFlag, ne);
4171 // - If so (eq), set N (mi) if left + right is negative.
4172 // - Otherwise, clear N.
4173 __ Ccmn(left, right, NoFlag, eq);
4174 DeoptimizeIf(mi, instr->environment());
4175 }
4176
4177 if (can_overflow) {
4178 __ Smull(result.X(), left, right);
4179 __ Cmp(result.X(), Operand(result, SXTW));
4180 DeoptimizeIf(ne, instr->environment());
4181 } else {
4182 __ Mul(result, left, right);
4183 }
4184 }
4185
4186
4187 void LCodeGen::DoMulS(LMulS* instr) {
4188 Register result = ToRegister(instr->result());
4189 Register left = ToRegister(instr->left());
4190 Register right = ToRegister(instr->right());
4191
4192 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4193 bool bailout_on_minus_zero =
4194 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4195
4196 if (bailout_on_minus_zero) {
4197 // If one operand is zero and the other is negative, the result is -0.
4198 // - Set Z (eq) if either left or right, or both, are 0.
4199 __ Cmp(left, 0);
4200 __ Ccmp(right, 0, ZFlag, ne);
4201 // - If so (eq), set N (mi) if left + right is negative.
4202 // - Otherwise, clear N.
4203 __ Ccmn(left, right, NoFlag, eq);
4204 DeoptimizeIf(mi, instr->environment());
4205 }
4206
4207 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
4208 if (can_overflow) {
4209 __ Smulh(result, left, right);
4210 __ Cmp(result, Operand(result.W(), SXTW));
4211 __ SmiTag(result);
4212 DeoptimizeIf(ne, instr->environment());
4213 } else {
4214 // TODO(jbramley): This could be rewritten to support UseRegisterAtStart.
4215 ASSERT(!AreAliased(result, right));
4216 __ SmiUntag(result, left);
4217 __ Mul(result, result, right);
4218 }
4219 }
4220
4221
4222 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4223 // TODO(3095996): Get rid of this. For now, we need to make the
4224 // result register contain a valid pointer because it is already
4225 // contained in the register pointer map.
4226 Register result = ToRegister(instr->result());
4227 __ Mov(result, 0);
4228
4229 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4230 // NumberTagU and NumberTagD use the context from the frame, rather than
4231 // the environment's HContext or HInlinedContext value.
4232 // They only call Runtime::kAllocateHeapNumber.
4233 // The corresponding HChange instructions are added in a phase that does
4234 // not have easy access to the local context.
4235 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4236 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4237 RecordSafepointWithRegisters(
4238 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4239 __ StoreToSafepointRegisterSlot(x0, result);
4240 }
4241
4242
4243 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4244 class DeferredNumberTagD: public LDeferredCode {
4245 public:
4246 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4247 : LDeferredCode(codegen), instr_(instr) { }
4248 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4249 virtual LInstruction* instr() { return instr_; }
4250 private:
4251 LNumberTagD* instr_;
4252 };
4253
4254 DoubleRegister input = ToDoubleRegister(instr->value());
4255 Register result = ToRegister(instr->result());
4256 Register temp1 = ToRegister(instr->temp1());
4257 Register temp2 = ToRegister(instr->temp2());
4258
4259 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4260 if (FLAG_inline_new) {
4261 __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2);
4262 } else {
4263 __ B(deferred->entry());
4264 }
4265
4266 __ Bind(deferred->exit());
4267 __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset));
4268 }
4269
4270
4271 void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
4272 LOperand* value,
4273 LOperand* temp1,
4274 LOperand* temp2) {
4275 Label slow, convert_and_store;
4276 Register src = ToRegister32(value);
4277 Register dst = ToRegister(instr->result());
4278 Register scratch1 = ToRegister(temp1);
4279
4280 if (FLAG_inline_new) {
4281 Register scratch2 = ToRegister(temp2);
4282 __ AllocateHeapNumber(dst, &slow, scratch1, scratch2);
4283 __ B(&convert_and_store);
4284 }
4285
4286 // Slow case: call the runtime system to do the number allocation.
4287 __ Bind(&slow);
4288 // TODO(3095996): Put a valid pointer value in the stack slot where the result
4289 // register is stored, as this register is in the pointer map, but contains an
4290 // integer value.
4291 __ Mov(dst, 0);
4292 {
4293 // Preserve the value of all registers.
4294 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4295
4296 // NumberTagU and NumberTagD use the context from the frame, rather than
4297 // the environment's HContext or HInlinedContext value.
4298 // They only call Runtime::kAllocateHeapNumber.
4299 // The corresponding HChange instructions are added in a phase that does
4300 // not have easy access to the local context.
4301 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4302 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4303 RecordSafepointWithRegisters(
4304 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4305 __ StoreToSafepointRegisterSlot(x0, dst);
4306 }
4307
4308 // Convert number to floating point and store in the newly allocated heap
4309 // number.
4310 __ Bind(&convert_and_store);
4311 DoubleRegister dbl_scratch = double_scratch();
4312 __ Ucvtf(dbl_scratch, src);
4313 __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4314 }
4315
4316
4317 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4318 class DeferredNumberTagU: public LDeferredCode {
4319 public:
4320 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4321 : LDeferredCode(codegen), instr_(instr) { }
4322 virtual void Generate() {
4323 codegen()->DoDeferredNumberTagU(instr_,
4324 instr_->value(),
4325 instr_->temp1(),
4326 instr_->temp2());
4327 }
4328 virtual LInstruction* instr() { return instr_; }
4329 private:
4330 LNumberTagU* instr_;
4331 };
4332
4333 Register value = ToRegister32(instr->value());
4334 Register result = ToRegister(instr->result());
4335
4336 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4337 __ Cmp(value, Smi::kMaxValue);
4338 __ B(hi, deferred->entry());
4339 __ SmiTag(result, value.X());
4340 __ Bind(deferred->exit());
4341 }
4342
4343
4344 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4345 Register input = ToRegister(instr->value());
4346 Register scratch = ToRegister(instr->temp());
4347 DoubleRegister result = ToDoubleRegister(instr->result());
4348 bool can_convert_undefined_to_nan =
4349 instr->hydrogen()->can_convert_undefined_to_nan();
4350
4351 Label done, load_smi;
4352
4353 // Work out what untag mode we're working with.
4354 HValue* value = instr->hydrogen()->value();
4355 NumberUntagDMode mode = value->representation().IsSmi()
4356 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4357
4358 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4359 __ JumpIfSmi(input, &load_smi);
4360
4361 Label convert_undefined, deopt;
4362
4363 // Heap number map check.
4364 Label* not_heap_number = can_convert_undefined_to_nan ? &convert_undefined
4365 : &deopt;
4366 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
4367 __ JumpIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex, not_heap_number);
4368
4369 // Load heap number.
4370 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
4371 if (instr->hydrogen()->deoptimize_on_minus_zero()) {
4372 __ JumpIfMinusZero(result, &deopt);
4373 }
4374 __ B(&done);
4375
4376 if (can_convert_undefined_to_nan) {
4377 __ Bind(&convert_undefined);
4378 __ JumpIfNotRoot(input, Heap::kUndefinedValueRootIndex, &deopt);
4379
4380 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4381 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4382 __ B(&done);
4383 }
4384
4385 __ Bind(&deopt);
4386 Deoptimize(instr->environment());
4387 } else {
4388 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
4389 // Fall through to load_smi.
4390 }
4391
4392 // Smi to double register conversion.
4393 __ Bind(&load_smi);
4394 __ SmiUntagToDouble(result, input);
4395
4396 __ Bind(&done);
4397 }
4398
4399
4400 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
4401 // This is a pseudo-instruction that ensures that the environment here is
4402 // properly registered for deoptimization and records the assembler's PC
4403 // offset.
4404 LEnvironment* environment = instr->environment();
4405
4406 // If the environment were already registered, we would have no way of
4407 // backpatching it with the spill slot operands.
4408 ASSERT(!environment->HasBeenRegistered());
4409 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
4410
4411 GenerateOsrPrologue();
4412 }
4413
4414
4415 void LCodeGen::DoParameter(LParameter* instr) {
4416 // Nothing to do.
4417 }
4418
4419
4420 void LCodeGen::DoPushArgument(LPushArgument* instr) {
4421 LOperand* argument = instr->value();
4422 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
4423 Abort(kDoPushArgumentNotImplementedForDoubleType);
4424 } else {
4425 __ Push(ToRegister(argument));
4426 }
4427 }
4428
4429
4430 void LCodeGen::DoReturn(LReturn* instr) {
4431 if (FLAG_trace && info()->IsOptimizing()) {
4432 // Push the return value on the stack as the parameter.
4433 // Runtime::TraceExit returns its parameter in x0. We're leaving the code
4434 // managed by the register allocator and tearing down the frame, it's
4435 // safe to write to the context register.
4436 __ Push(x0);
4437 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4438 __ CallRuntime(Runtime::kTraceExit, 1);
4439 }
4440
4441 if (info()->saves_caller_doubles()) {
4442 RestoreCallerDoubles();
4443 }
4444
4445 int no_frame_start = -1;
4446 if (NeedsEagerFrame()) {
4447 Register stack_pointer = masm()->StackPointer();
4448 __ Mov(stack_pointer, fp);
4449 no_frame_start = masm_->pc_offset();
4450 __ Pop(fp, lr);
4451 }
4452
4453 if (instr->has_constant_parameter_count()) {
4454 int parameter_count = ToInteger32(instr->constant_parameter_count());
4455 __ Drop(parameter_count + 1);
4456 } else {
4457 Register parameter_count = ToRegister(instr->parameter_count());
4458 __ DropBySMI(parameter_count);
4459 }
4460 __ Ret();
4461
4462 if (no_frame_start != -1) {
4463 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
4464 }
4465 }
4466
4467
4468 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
4469 Register temp,
4470 LOperand* index,
4471 String::Encoding encoding) {
4472 if (index->IsConstantOperand()) {
4473 int offset = ToInteger32(LConstantOperand::cast(index));
4474 if (encoding == String::TWO_BYTE_ENCODING) {
4475 offset *= kUC16Size;
4476 }
4477 STATIC_ASSERT(kCharSize == 1);
4478 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
4479 }
4480 ASSERT(!temp.is(string));
4481 ASSERT(!temp.is(ToRegister(index)));
4482 if (encoding == String::ONE_BYTE_ENCODING) {
4483 __ Add(temp, string, Operand(ToRegister32(index), SXTW));
4484 } else {
4485 STATIC_ASSERT(kUC16Size == 2);
4486 __ Add(temp, string, Operand(ToRegister32(index), SXTW, 1));
4487 }
4488 return FieldMemOperand(temp, SeqString::kHeaderSize);
4489 }
4490
4491
4492 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
4493 String::Encoding encoding = instr->hydrogen()->encoding();
4494 Register string = ToRegister(instr->string());
4495 Register result = ToRegister(instr->result());
4496 Register temp = ToRegister(instr->temp());
4497
4498 if (FLAG_debug_code) {
4499 __ Ldr(temp, FieldMemOperand(string, HeapObject::kMapOffset));
4500 __ Ldrb(temp, FieldMemOperand(temp, Map::kInstanceTypeOffset));
4501
4502 __ And(temp, temp,
4503 Operand(kStringRepresentationMask | kStringEncodingMask));
4504 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4505 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4506 __ Cmp(temp, Operand(encoding == String::ONE_BYTE_ENCODING
4507 ? one_byte_seq_type : two_byte_seq_type));
4508 __ Check(eq, kUnexpectedStringType);
4509 }
4510
4511 MemOperand operand =
4512 BuildSeqStringOperand(string, temp, instr->index(), encoding);
4513 if (encoding == String::ONE_BYTE_ENCODING) {
4514 __ Ldrb(result, operand);
4515 } else {
4516 __ Ldrh(result, operand);
4517 }
4518 }
4519
4520
4521 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
4522 String::Encoding encoding = instr->hydrogen()->encoding();
4523 Register string = ToRegister(instr->string());
4524 Register value = ToRegister(instr->value());
4525 Register temp = ToRegister(instr->temp());
4526
4527 if (FLAG_debug_code) {
4528 ASSERT(ToRegister(instr->context()).is(cp));
4529 Register index = ToRegister(instr->index());
4530 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4531 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4532 int encoding_mask =
4533 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
4534 ? one_byte_seq_type : two_byte_seq_type;
4535 __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp,
4536 encoding_mask);
4537 }
4538 MemOperand operand =
4539 BuildSeqStringOperand(string, temp, instr->index(), encoding);
4540 if (encoding == String::ONE_BYTE_ENCODING) {
4541 __ Strb(value, operand);
4542 } else {
4543 __ Strh(value, operand);
4544 }
4545 }
4546
4547
4548 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4549 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
4550 __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
4551 }
4552
4553
4554 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4555 Register input = ToRegister(instr->value());
4556 Register result = ToRegister(instr->result());
4557 Label done, untag;
4558
4559 if (instr->needs_check()) {
4560 DeoptimizeIfNotSmi(input, instr->environment());
4561 }
4562
4563 __ Bind(&untag);
4564 __ SmiUntag(result, input);
4565 __ Bind(&done);
4566 }
4567
4568
4569 void LCodeGen::DoShiftI(LShiftI* instr) {
4570 LOperand* right_op = instr->right();
4571 Register left = ToRegister32(instr->left());
4572 Register result = ToRegister32(instr->result());
4573
4574 if (right_op->IsRegister()) {
4575 Register right = ToRegister32(instr->right());
4576 switch (instr->op()) {
4577 case Token::ROR: __ Ror(result, left, right); break;
4578 case Token::SAR: __ Asr(result, left, right); break;
4579 case Token::SHL: __ Lsl(result, left, right); break;
4580 case Token::SHR:
4581 if (instr->can_deopt()) {
4582 Label right_not_zero;
4583 __ Cbnz(right, &right_not_zero);
4584 DeoptimizeIfNegative(left, instr->environment());
4585 __ Bind(&right_not_zero);
4586 }
4587 __ Lsr(result, left, right);
4588 break;
4589 default: UNREACHABLE();
4590 }
4591 } else {
4592 ASSERT(right_op->IsConstantOperand());
4593 int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f;
4594 if (shift_count == 0) {
4595 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4596 DeoptimizeIfNegative(left, instr->environment());
4597 }
4598 __ Mov(result, left, kDiscardForSameWReg);
4599 } else {
4600 switch (instr->op()) {
4601 case Token::ROR: __ Ror(result, left, shift_count); break;
4602 case Token::SAR: __ Asr(result, left, shift_count); break;
4603 case Token::SHL: __ Lsl(result, left, shift_count); break;
4604 case Token::SHR: __ Lsr(result, left, shift_count); break;
4605 default: UNREACHABLE();
4606 }
4607 }
4608 }
4609 }
4610
4611
4612 void LCodeGen::DoShiftS(LShiftS* instr) {
4613 LOperand* right_op = instr->right();
4614 Register left = ToRegister(instr->left());
4615 Register result = ToRegister(instr->result());
4616
4617 // Only ROR by register needs a temp.
4618 ASSERT(((instr->op() == Token::ROR) && right_op->IsRegister()) ||
4619 (instr->temp() == NULL));
4620
4621 if (right_op->IsRegister()) {
4622 Register right = ToRegister(instr->right());
4623 switch (instr->op()) {
4624 case Token::ROR: {
4625 Register temp = ToRegister(instr->temp());
4626 __ Ubfx(temp, right, kSmiShift, 5);
4627 __ SmiUntag(result, left);
4628 __ Ror(result.W(), result.W(), temp.W());
4629 __ SmiTag(result);
4630 break;
4631 }
4632 case Token::SAR:
4633 __ Ubfx(result, right, kSmiShift, 5);
4634 __ Asr(result, left, result);
4635 __ Bic(result, result, kSmiShiftMask);
4636 break;
4637 case Token::SHL:
4638 __ Ubfx(result, right, kSmiShift, 5);
4639 __ Lsl(result, left, result);
4640 break;
4641 case Token::SHR:
4642 if (instr->can_deopt()) {
4643 Label right_not_zero;
4644 __ Cbnz(right, &right_not_zero);
4645 DeoptimizeIfNegative(left, instr->environment());
4646 __ Bind(&right_not_zero);
4647 }
4648 __ Ubfx(result, right, kSmiShift, 5);
4649 __ Lsr(result, left, result);
4650 __ Bic(result, result, kSmiShiftMask);
4651 break;
4652 default: UNREACHABLE();
4653 }
4654 } else {
4655 ASSERT(right_op->IsConstantOperand());
4656 int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f;
4657 if (shift_count == 0) {
4658 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4659 DeoptimizeIfNegative(left, instr->environment());
4660 }
4661 __ Mov(result, left);
4662 } else {
4663 switch (instr->op()) {
4664 case Token::ROR:
4665 __ SmiUntag(result, left);
4666 __ Ror(result.W(), result.W(), shift_count);
4667 __ SmiTag(result);
4668 break;
4669 case Token::SAR:
4670 __ Asr(result, left, shift_count);
4671 __ Bic(result, result, kSmiShiftMask);
4672 break;
4673 case Token::SHL:
4674 __ Lsl(result, left, shift_count);
4675 break;
4676 case Token::SHR:
4677 __ Lsr(result, left, shift_count);
4678 __ Bic(result, result, kSmiShiftMask);
4679 break;
4680 default: UNREACHABLE();
4681 }
4682 }
4683 }
4684 }
4685
4686
4687 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
4688 __ Debug("LDebugBreak", 0, BREAK);
4689 }
4690
4691
4692 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
4693 ASSERT(ToRegister(instr->context()).is(cp));
4694 Register scratch1 = x5;
4695 Register scratch2 = x6;
4696 ASSERT(instr->IsMarkedAsCall());
4697
4698 ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals");
4699 // TODO(all): if Mov could handle object in new space then it could be used
4700 // here.
4701 __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
4702 __ Mov(scratch2, Operand(Smi::FromInt(instr->hydrogen()->flags())));
4703 __ Push(cp, scratch1, scratch2); // The context is the first argument.
4704 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
4705 }
4706
4707
4708 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
4709 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4710 LoadContextFromDeferred(instr->context());
4711 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
4712 RecordSafepointWithLazyDeopt(
4713 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4714 ASSERT(instr->HasEnvironment());
4715 LEnvironment* env = instr->environment();
4716 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4717 }
4718
4719
4720 void LCodeGen::DoStackCheck(LStackCheck* instr) {
4721 class DeferredStackCheck: public LDeferredCode {
4722 public:
4723 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
4724 : LDeferredCode(codegen), instr_(instr) { }
4725 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
4726 virtual LInstruction* instr() { return instr_; }
4727 private:
4728 LStackCheck* instr_;
4729 };
4730
4731 ASSERT(instr->HasEnvironment());
4732 LEnvironment* env = instr->environment();
4733 // There is no LLazyBailout instruction for stack-checks. We have to
4734 // prepare for lazy deoptimization explicitly here.
4735 if (instr->hydrogen()->is_function_entry()) {
4736 // Perform stack overflow check.
4737 Label done;
4738 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
4739 __ B(hs, &done);
4740
4741 PredictableCodeSizeScope predictable(masm_,
4742 Assembler::kCallSizeWithRelocation);
4743 ASSERT(instr->context()->IsRegister());
4744 ASSERT(ToRegister(instr->context()).is(cp));
4745 CallCode(isolate()->builtins()->StackCheck(),
4746 RelocInfo::CODE_TARGET,
4747 instr);
4748 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
4749
4750 __ Bind(&done);
4751 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4752 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4753 } else {
4754 ASSERT(instr->hydrogen()->is_backwards_branch());
4755 // Perform stack overflow check if this goto needs it before jumping.
4756 DeferredStackCheck* deferred_stack_check =
4757 new(zone()) DeferredStackCheck(this, instr);
4758 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
4759 __ B(lo, deferred_stack_check->entry());
4760
4761 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
4762 __ Bind(instr->done_label());
4763 deferred_stack_check->SetExit(instr->done_label());
4764 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4765 // Don't record a deoptimization index for the safepoint here.
4766 // This will be done explicitly when emitting call and the safepoint in
4767 // the deferred code.
4768 }
4769 }
4770
4771
4772 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4773 Register function = ToRegister(instr->function());
4774 Register code_object = ToRegister(instr->code_object());
4775 Register temp = ToRegister(instr->temp());
4776 __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag);
4777 __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
4778 }
4779
4780
4781 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
4782 Register context = ToRegister(instr->context());
4783 Register value = ToRegister(instr->value());
4784 Register scratch = ToRegister(instr->temp());
4785 MemOperand target = ContextMemOperand(context, instr->slot_index());
4786
4787 Label skip_assignment;
4788
4789 if (instr->hydrogen()->RequiresHoleCheck()) {
4790 __ Ldr(scratch, target);
4791 if (instr->hydrogen()->DeoptimizesOnHole()) {
4792 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex,
4793 instr->environment());
4794 } else {
4795 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
4796 }
4797 }
4798
4799 __ Str(value, target);
4800 if (instr->hydrogen()->NeedsWriteBarrier()) {
4801 SmiCheck check_needed =
4802 instr->hydrogen()->value()->IsHeapObject()
4803 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4804 __ RecordWriteContextSlot(context,
4805 target.offset(),
4806 value,
4807 scratch,
4808 GetLinkRegisterState(),
4809 kSaveFPRegs,
4810 EMIT_REMEMBERED_SET,
4811 check_needed);
4812 }
4813 __ Bind(&skip_assignment);
4814 }
4815
4816
4817 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
4818 Register value = ToRegister(instr->value());
4819 Register cell = ToRegister(instr->temp1());
4820
4821 // Load the cell.
4822 __ Mov(cell, Operand(instr->hydrogen()->cell().handle()));
4823
4824 // If the cell we are storing to contains the hole it could have
4825 // been deleted from the property dictionary. In that case, we need
4826 // to update the property details in the property dictionary to mark
4827 // it as no longer deleted. We deoptimize in that case.
4828 if (instr->hydrogen()->RequiresHoleCheck()) {
4829 Register payload = ToRegister(instr->temp2());
4830 __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
4831 DeoptimizeIfRoot(
4832 payload, Heap::kTheHoleValueRootIndex, instr->environment());
4833 }
4834
4835 // Store the value.
4836 __ Str(value, FieldMemOperand(cell, Cell::kValueOffset));
4837 // Cells are always rescanned, so no write barrier here.
4838 }
4839
4840
4841 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
4842 Register ext_ptr = ToRegister(instr->elements());
4843 Register key = no_reg;
4844 Register scratch;
4845 ElementsKind elements_kind = instr->elements_kind();
4846
4847 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
4848 bool key_is_constant = instr->key()->IsConstantOperand();
4849 int constant_key = 0;
4850 if (key_is_constant) {
4851 ASSERT(instr->temp() == NULL);
4852 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4853 if (constant_key & 0xf0000000) {
4854 Abort(kArrayIndexConstantValueTooBig);
4855 }
4856 } else {
4857 key = ToRegister(instr->key());
4858 scratch = ToRegister(instr->temp());
4859 }
4860
4861 MemOperand dst =
4862 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
4863 key_is_constant, constant_key,
4864 elements_kind,
4865 instr->additional_index());
4866
4867 if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
4868 (elements_kind == FLOAT32_ELEMENTS)) {
4869 DoubleRegister value = ToDoubleRegister(instr->value());
4870 DoubleRegister dbl_scratch = double_scratch();
4871 __ Fcvt(dbl_scratch.S(), value);
4872 __ Str(dbl_scratch.S(), dst);
4873 } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) ||
4874 (elements_kind == FLOAT64_ELEMENTS)) {
4875 DoubleRegister value = ToDoubleRegister(instr->value());
4876 __ Str(value, dst);
4877 } else {
4878 Register value = ToRegister(instr->value());
4879
4880 switch (elements_kind) {
4881 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4882 case EXTERNAL_INT8_ELEMENTS:
4883 case EXTERNAL_UINT8_ELEMENTS:
4884 case UINT8_ELEMENTS:
4885 case UINT8_CLAMPED_ELEMENTS:
4886 case INT8_ELEMENTS:
4887 __ Strb(value, dst);
4888 break;
4889 case EXTERNAL_INT16_ELEMENTS:
4890 case EXTERNAL_UINT16_ELEMENTS:
4891 case INT16_ELEMENTS:
4892 case UINT16_ELEMENTS:
4893 __ Strh(value, dst);
4894 break;
4895 case EXTERNAL_INT32_ELEMENTS:
4896 case EXTERNAL_UINT32_ELEMENTS:
4897 case INT32_ELEMENTS:
4898 case UINT32_ELEMENTS:
4899 __ Str(value.W(), dst);
4900 break;
4901 case FLOAT32_ELEMENTS:
4902 case FLOAT64_ELEMENTS:
4903 case EXTERNAL_FLOAT32_ELEMENTS:
4904 case EXTERNAL_FLOAT64_ELEMENTS:
4905 case FAST_DOUBLE_ELEMENTS:
4906 case FAST_ELEMENTS:
4907 case FAST_SMI_ELEMENTS:
4908 case FAST_HOLEY_DOUBLE_ELEMENTS:
4909 case FAST_HOLEY_ELEMENTS:
4910 case FAST_HOLEY_SMI_ELEMENTS:
4911 case DICTIONARY_ELEMENTS:
4912 case NON_STRICT_ARGUMENTS_ELEMENTS:
4913 UNREACHABLE();
4914 break;
4915 }
4916 }
4917 }
4918
4919
4920 void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
4921 Register elements = ToRegister(instr->elements());
4922 DoubleRegister value = ToDoubleRegister(instr->value());
4923 Register store_base = ToRegister(instr->temp());
4924 int offset = 0;
4925
4926 if (instr->key()->IsConstantOperand()) {
4927 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4928 if (constant_key & 0xf0000000) {
4929 Abort(kArrayIndexConstantValueTooBig);
4930 }
4931 offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
4932 instr->additional_index());
4933 store_base = elements;
4934 } else {
4935 Register key = ToRegister(instr->key());
4936 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
4937 CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
4938 instr->hydrogen()->elements_kind());
4939 offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
4940 }
4941
4942 if (instr->NeedsCanonicalization()) {
4943 DoubleRegister dbl_scratch = double_scratch();
4944 __ Fmov(dbl_scratch,
4945 FixedDoubleArray::canonical_not_the_hole_nan_as_double());
4946 __ Fmaxnm(dbl_scratch, dbl_scratch, value);
4947 __ Str(dbl_scratch, FieldMemOperand(store_base, offset));
4948 } else {
4949 __ Str(value, FieldMemOperand(store_base, offset));
4950 }
4951 }
4952
4953
4954 void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
4955 Register value = ToRegister(instr->value());
4956 Register elements = ToRegister(instr->elements());
4957 Register store_base = ToRegister(instr->temp());
4958 Register key = no_reg;
4959 int offset = 0;
4960
4961 if (instr->key()->IsConstantOperand()) {
4962 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4963 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4964 offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
4965 instr->additional_index());
4966 store_base = elements;
4967 } else {
4968 key = ToRegister(instr->key());
4969 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
4970 CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
4971 instr->hydrogen()->elements_kind());
4972 offset = FixedArray::OffsetOfElementAt(instr->additional_index());
4973 }
4974 Representation representation = instr->hydrogen()->value()->representation();
4975 if (representation.IsInteger32()) {
4976 ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4977 ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
4978 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
4979 __ Store(value, UntagSmiFieldMemOperand(store_base, offset),
4980 Representation::Integer32());
4981 } else {
4982 __ Store(value, FieldMemOperand(store_base, offset), representation);
4983 }
4984
4985 if (instr->hydrogen()->NeedsWriteBarrier()) {
4986 SmiCheck check_needed =
4987 instr->hydrogen()->value()->IsHeapObject()
4988 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4989 // Compute address of modified element and store it into key register.
4990 __ Add(key, store_base, offset - kHeapObjectTag);
4991 __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
4992 EMIT_REMEMBERED_SET, check_needed);
4993 }
4994 }
4995
4996
4997 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4998 ASSERT(ToRegister(instr->context()).is(cp));
4999 ASSERT(ToRegister(instr->object()).Is(x2));
5000 ASSERT(ToRegister(instr->key()).Is(x1));
5001 ASSERT(ToRegister(instr->value()).Is(x0));
5002
5003 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
5004 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
5005 : isolate()->builtins()->KeyedStoreIC_Initialize();
5006 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5007 }
5008
5009
5010 // TODO(jbramley): Once the merge is done and we're tracking bleeding_edge, try
5011 // to tidy up this function.
5012 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
5013 Representation representation = instr->representation();
5014
5015 Register object = ToRegister(instr->object());
5016 Register temp0 = ToRegister(instr->temp0());
5017 Register temp1 = ToRegister(instr->temp1());
5018 HObjectAccess access = instr->hydrogen()->access();
5019 int offset = access.offset();
5020
5021 if (access.IsExternalMemory()) {
5022 Register value = ToRegister(instr->value());
5023 __ Store(value, MemOperand(object, offset), representation);
5024 return;
5025 }
5026
5027 Handle<Map> transition = instr->transition();
5028 SmiCheck check_needed =
5029 instr->hydrogen()->value()->IsHeapObject()
5030 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5031
5032 if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
5033 Register value = ToRegister(instr->value());
5034 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5035 DeoptimizeIfSmi(value, instr->environment());
5036
5037 // We know that value is a smi now, so we can omit the check below.
5038 check_needed = OMIT_SMI_CHECK;
5039 }
5040 } else if (representation.IsDouble()) {
5041 ASSERT(transition.is_null());
5042 ASSERT(access.IsInobject());
5043 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
5044 FPRegister value = ToDoubleRegister(instr->value());
5045 __ Str(value, FieldMemOperand(object, offset));
5046 return;
5047 }
5048
5049 if (!transition.is_null()) {
5050 // Store the new map value.
5051 Register new_map_value = temp0;
5052 __ Mov(new_map_value, Operand(transition));
5053 __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
5054 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
5055 // Update the write barrier for the map field.
5056 __ RecordWriteField(object,
5057 HeapObject::kMapOffset,
5058 new_map_value,
5059 temp1,
5060 GetLinkRegisterState(),
5061 kSaveFPRegs,
5062 OMIT_REMEMBERED_SET,
5063 OMIT_SMI_CHECK);
5064 }
5065 }
5066
5067 // Do the store.
5068 Register value = ToRegister(instr->value());
5069 Register destination;
5070 if (access.IsInobject()) {
5071 destination = object;
5072 } else {
5073 __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset));
5074 destination = temp0;
5075 }
5076
5077 if (representation.IsSmi() &&
5078 instr->hydrogen()->value()->representation().IsInteger32()) {
5079 ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5080 #ifdef DEBUG
5081 __ Ldr(temp1, FieldMemOperand(destination, offset));
5082 __ AssertSmi(temp1);
5083 #endif
5084 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
5085 __ Store(value, UntagSmiFieldMemOperand(destination, offset),
5086 Representation::Integer32());
5087 } else {
5088 __ Store(value, FieldMemOperand(destination, offset), representation);
5089 }
5090 if (instr->hydrogen()->NeedsWriteBarrier()) {
5091 __ RecordWriteField(destination,
5092 offset,
5093 value, // Clobbered.
5094 temp1, // Clobbered.
5095 GetLinkRegisterState(),
5096 kSaveFPRegs,
5097 EMIT_REMEMBERED_SET,
5098 check_needed);
5099 }
5100 }
5101
5102
5103 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
5104 ASSERT(ToRegister(instr->context()).is(cp));
5105 ASSERT(ToRegister(instr->value()).is(x0));
5106 ASSERT(ToRegister(instr->object()).is(x1));
5107
5108 // Name must be in x2.
5109 __ Mov(x2, Operand(instr->name()));
5110 Handle<Code> ic = StoreIC::initialize_stub(isolate(),
5111 instr->strict_mode_flag());
5112 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5113 }
5114
5115
5116 void LCodeGen::DoStringAdd(LStringAdd* instr) {
5117 ASSERT(ToRegister(instr->context()).is(cp));
5118 ASSERT(ToRegister(instr->left()).Is(x1));
5119 ASSERT(ToRegister(instr->right()).Is(x0));
5120 StringAddStub stub(instr->hydrogen()->flags(),
5121 instr->hydrogen()->pretenure_flag());
5122 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5123 }
5124
5125
5126 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
5127 class DeferredStringCharCodeAt: public LDeferredCode {
5128 public:
5129 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
5130 : LDeferredCode(codegen), instr_(instr) { }
5131 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
5132 virtual LInstruction* instr() { return instr_; }
5133 private:
5134 LStringCharCodeAt* instr_;
5135 };
5136
5137 DeferredStringCharCodeAt* deferred =
5138 new(zone()) DeferredStringCharCodeAt(this, instr);
5139
5140 StringCharLoadGenerator::Generate(masm(),
5141 ToRegister(instr->string()),
5142 ToRegister32(instr->index()),
5143 ToRegister(instr->result()),
5144 deferred->entry());
5145 __ Bind(deferred->exit());
5146 }
5147
5148
5149 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
5150 Register string = ToRegister(instr->string());
5151 Register result = ToRegister(instr->result());
5152
5153 // TODO(3095996): Get rid of this. For now, we need to make the
5154 // result register contain a valid pointer because it is already
5155 // contained in the register pointer map.
5156 __ Mov(result, 0);
5157
5158 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5159 __ Push(string);
5160 // Push the index as a smi. This is safe because of the checks in
5161 // DoStringCharCodeAt above.
5162 Register index = ToRegister(instr->index());
5163 __ SmiTag(index);
5164 __ Push(index);
5165
5166 CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
5167 instr->context());
5168 __ AssertSmi(x0);
5169 __ SmiUntag(x0);
5170 __ StoreToSafepointRegisterSlot(x0, result);
5171 }
5172
5173
5174 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
5175 class DeferredStringCharFromCode: public LDeferredCode {
5176 public:
5177 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
5178 : LDeferredCode(codegen), instr_(instr) { }
5179 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
5180 virtual LInstruction* instr() { return instr_; }
5181 private:
5182 LStringCharFromCode* instr_;
5183 };
5184
5185 DeferredStringCharFromCode* deferred =
5186 new(zone()) DeferredStringCharFromCode(this, instr);
5187
5188 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
5189 Register char_code = ToRegister32(instr->char_code());
5190 Register result = ToRegister(instr->result());
5191
5192 __ Cmp(char_code, String::kMaxOneByteCharCode);
5193 __ B(hi, deferred->entry());
5194 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
5195 __ Add(result, result, Operand(char_code, SXTW, kPointerSizeLog2));
5196 __ Ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
5197 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
5198 __ B(eq, deferred->entry());
5199 __ Bind(deferred->exit());
5200 }
5201
5202
5203 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
5204 Register char_code = ToRegister(instr->char_code());
5205 Register result = ToRegister(instr->result());
5206
5207 // TODO(3095996): Get rid of this. For now, we need to make the
5208 // result register contain a valid pointer because it is already
5209 // contained in the register pointer map.
5210 __ Mov(result, 0);
5211
5212 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5213 __ SmiTag(char_code);
5214 __ Push(char_code);
5215 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
5216 __ StoreToSafepointRegisterSlot(x0, result);
5217 }
5218
5219
5220 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
5221 ASSERT(ToRegister(instr->context()).is(cp));
5222 Token::Value op = instr->op();
5223
5224 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
5225 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5226 InlineSmiCheckInfo::EmitNotInlined(masm());
5227
5228 Condition condition = TokenToCondition(op, false);
5229
5230 EmitCompareAndBranch(instr, condition, x0, 0);
5231 }
5232
5233
5234 void LCodeGen::DoSubI(LSubI* instr) {
5235 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5236 Register result = ToRegister32(instr->result());
5237 Register left = ToRegister32(instr->left());
5238 Operand right = ToOperand32I(instr->right());
5239 if (can_overflow) {
5240 __ Subs(result, left, right);
5241 DeoptimizeIf(vs, instr->environment());
5242 } else {
5243 __ Sub(result, left, right);
5244 }
5245 }
5246
5247
5248 void LCodeGen::DoSubS(LSubS* instr) {
5249 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
5250 Register result = ToRegister(instr->result());
5251 Register left = ToRegister(instr->left());
5252 Operand right = ToOperand(instr->right());
5253 if (can_overflow) {
5254 __ Subs(result, left, right);
5255 DeoptimizeIf(vs, instr->environment());
5256 } else {
5257 __ Sub(result, left, right);
5258 }
5259 }
5260
5261
5262 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
5263 LOperand* value,
5264 LOperand* temp1,
5265 LOperand* temp2) {
5266 Register input = ToRegister(value);
5267 Register scratch1 = ToRegister(temp1);
5268 DoubleRegister dbl_scratch1 = double_scratch();
5269
5270 Label done;
5271
5272 // Load heap object map.
5273 __ Ldr(scratch1, FieldMemOperand(input, HeapObject::kMapOffset));
5274
5275 if (instr->truncating()) {
5276 Register output = ToRegister(instr->result());
5277 Register scratch2 = ToRegister(temp2);
5278 Label check_bools;
5279
5280 // If it's not a heap number, jump to undefined check.
5281 __ JumpIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, &check_bools);
5282
5283 // A heap number: load value and convert to int32 using truncating function.
5284 __ TruncateHeapNumberToI(output, input);
5285 __ B(&done);
5286
5287 __ Bind(&check_bools);
5288
5289 Register true_root = output;
5290 Register false_root = scratch2;
5291 __ LoadTrueFalseRoots(true_root, false_root);
5292 __ Cmp(scratch1, true_root);
5293 __ Cset(output, eq);
5294 __ Ccmp(scratch1, false_root, ZFlag, ne);
5295 __ B(eq, &done);
5296
5297 // Output contains zero, undefined is converted to zero for truncating
5298 // conversions.
5299 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
5300 instr->environment());
5301 } else {
5302 Register output = ToRegister32(instr->result());
5303
5304 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
5305 Label converted;
5306
5307 // Deoptimized if it's not a heap number.
5308 DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex,
5309 instr->environment());
5310
5311 // A heap number: load value and convert to int32 using non-truncating
5312 // function. If the result is out of range, branch to deoptimize.
5313 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
5314 __ TryConvertDoubleToInt32(output, dbl_scratch1, dbl_scratch2, &converted);
5315 Deoptimize(instr->environment());
5316
5317 __ Bind(&converted);
5318
5319 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5320 __ Cmp(output, 0);
5321 __ B(ne, &done);
5322 __ Fmov(scratch1, dbl_scratch1);
5323 DeoptimizeIfNegative(scratch1, instr->environment());
5324 }
5325 }
5326 __ Bind(&done);
5327 }
5328
5329
5330 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5331 class DeferredTaggedToI: public LDeferredCode {
5332 public:
5333 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5334 : LDeferredCode(codegen), instr_(instr) { }
5335 virtual void Generate() {
5336 codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(),
5337 instr_->temp2());
5338 }
5339
5340 virtual LInstruction* instr() { return instr_; }
5341 private:
5342 LTaggedToI* instr_;
5343 };
5344
5345 Register input = ToRegister(instr->value());
5346 Register output = ToRegister(instr->result());
5347
5348 if (instr->hydrogen()->value()->representation().IsSmi()) {
5349 __ SmiUntag(output, input);
5350 } else {
5351 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5352
5353 __ JumpIfNotSmi(input, deferred->entry());
5354 __ SmiUntag(output, input);
5355 __ Bind(deferred->exit());
5356 }
5357 }
5358
5359
5360 void LCodeGen::DoThisFunction(LThisFunction* instr) {
5361 Register result = ToRegister(instr->result());
5362 __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
5363 }
5364
5365
5366 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5367 ASSERT(ToRegister(instr->value()).Is(x0));
5368 ASSERT(ToRegister(instr->result()).Is(x0));
5369 __ Push(x0);
5370 CallRuntime(Runtime::kToFastProperties, 1, instr);
5371 }
5372
5373
5374 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5375 ASSERT(ToRegister(instr->context()).is(cp));
5376 Label materialized;
5377 // Registers will be used as follows:
5378 // x7 = literals array.
5379 // x1 = regexp literal.
5380 // x0 = regexp literal clone.
5381 // x10-x12 are used as temporaries.
5382 int literal_offset =
5383 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5384 __ LoadObject(x7, instr->hydrogen()->literals());
5385 __ Ldr(x1, FieldMemOperand(x7, literal_offset));
5386 __ JumpIfNotRoot(x1, Heap::kUndefinedValueRootIndex, &materialized);
5387
5388 // Create regexp literal using runtime function
5389 // Result will be in x0.
5390 __ Mov(x12, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5391 __ Mov(x11, Operand(instr->hydrogen()->pattern()));
5392 __ Mov(x10, Operand(instr->hydrogen()->flags()));
5393 __ Push(x7, x12, x11, x10);
5394 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5395 __ Mov(x1, x0);
5396
5397 __ Bind(&materialized);
5398 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5399 Label allocated, runtime_allocate;
5400
5401 __ Allocate(size, x0, x10, x11, &runtime_allocate, TAG_OBJECT);
5402 __ B(&allocated);
5403
5404 __ Bind(&runtime_allocate);
5405 __ Mov(x0, Operand(Smi::FromInt(size)));
5406 __ Push(x1, x0);
5407 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5408 __ Pop(x1);
5409
5410 __ Bind(&allocated);
5411 // Copy the content into the newly allocated memory.
5412 __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize);
5413 }
5414
5415
5416 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
5417 Register object = ToRegister(instr->object());
5418 Register temp1 = ToRegister(instr->temp1());
5419
5420 Handle<Map> from_map = instr->original_map();
5421 Handle<Map> to_map = instr->transitioned_map();
5422 ElementsKind from_kind = instr->from_kind();
5423 ElementsKind to_kind = instr->to_kind();
5424
5425 Label not_applicable;
5426 __ CheckMap(object, temp1, from_map, &not_applicable, DONT_DO_SMI_CHECK);
5427
5428 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
5429 Register new_map = ToRegister(instr->temp2());
5430 __ Mov(new_map, Operand(to_map));
5431 __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
5432 // Write barrier.
5433 __ RecordWriteField(object, HeapObject::kMapOffset, new_map, temp1,
5434 GetLinkRegisterState(), kDontSaveFPRegs);
5435 } else {
5436 ASSERT(ToRegister(instr->context()).is(cp));
5437 PushSafepointRegistersScope scope(
5438 this, Safepoint::kWithRegistersAndDoubles);
5439 __ Mov(x0, object);
5440 __ Mov(x1, Operand(to_map));
5441 TransitionElementsKindStub stub(from_kind, to_kind);
5442 __ CallStub(&stub);
5443 RecordSafepointWithRegistersAndDoubles(
5444 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5445 }
5446 __ Bind(&not_applicable);
5447 }
5448
5449
5450 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
5451 Register object = ToRegister(instr->object());
5452 Register temp1 = ToRegister(instr->temp1());
5453 Register temp2 = ToRegister(instr->temp2());
5454
5455 Label no_memento_found;
5456 __ JumpIfJSArrayHasAllocationMemento(object, temp1, temp2, &no_memento_found);
5457 Deoptimize(instr->environment());
5458 __ Bind(&no_memento_found);
5459 }
5460
5461
5462 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
5463 DoubleRegister input = ToDoubleRegister(instr->value());
5464 Register result = ToRegister(instr->result());
5465 __ TruncateDoubleToI(result, input);
5466 if (instr->tag_result()) {
5467 __ SmiTag(result, result);
5468 }
5469 }
5470
5471
5472 void LCodeGen::DoTypeof(LTypeof* instr) {
5473 Register input = ToRegister(instr->value());
5474 __ Push(input);
5475 CallRuntime(Runtime::kTypeof, 1, instr);
5476 }
5477
5478
5479 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5480 Handle<String> type_name = instr->type_literal();
5481 Label* true_label = instr->TrueLabel(chunk_);
5482 Label* false_label = instr->FalseLabel(chunk_);
5483 Register value = ToRegister(instr->value());
5484
5485 if (type_name->Equals(heap()->number_string())) {
5486 ASSERT(instr->temp1() != NULL);
5487 Register map = ToRegister(instr->temp1());
5488
5489 __ JumpIfSmi(value, true_label);
5490 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
5491 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
5492 EmitBranch(instr, eq);
5493
5494 } else if (type_name->Equals(heap()->string_string())) {
5495 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
5496 Register map = ToRegister(instr->temp1());
5497 Register scratch = ToRegister(instr->temp2());
5498
5499 __ JumpIfSmi(value, false_label);
5500 __ JumpIfObjectType(
5501 value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge);
5502 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5503 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5504
5505 } else if (type_name->Equals(heap()->symbol_string())) {
5506 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
5507 Register map = ToRegister(instr->temp1());
5508 Register scratch = ToRegister(instr->temp2());
5509
5510 __ JumpIfSmi(value, false_label);
5511 __ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
5512 EmitBranch(instr, eq);
5513
5514 } else if (type_name->Equals(heap()->boolean_string())) {
5515 __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
5516 __ CompareRoot(value, Heap::kFalseValueRootIndex);
5517 EmitBranch(instr, eq);
5518
5519 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
5520 __ CompareRoot(value, Heap::kNullValueRootIndex);
5521 EmitBranch(instr, eq);
5522
5523 } else if (type_name->Equals(heap()->undefined_string())) {
5524 ASSERT(instr->temp1() != NULL);
5525 Register scratch = ToRegister(instr->temp1());
5526
5527 __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
5528 __ JumpIfSmi(value, false_label);
5529 // Check for undetectable objects and jump to the true branch in this case.
5530 __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5531 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5532 EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
5533
5534 } else if (type_name->Equals(heap()->function_string())) {
5535 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5536 ASSERT(instr->temp1() != NULL);
5537 Register type = ToRegister(instr->temp1());
5538
5539 __ JumpIfSmi(value, false_label);
5540 __ JumpIfObjectType(value, type, type, JS_FUNCTION_TYPE, true_label);
5541 // HeapObject's type has been loaded into type register by JumpIfObjectType.
5542 EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE);
5543
5544 } else if (type_name->Equals(heap()->object_string())) {
5545 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
5546 Register map = ToRegister(instr->temp1());
5547 Register scratch = ToRegister(instr->temp2());
5548
5549 __ JumpIfSmi(value, false_label);
5550 if (!FLAG_harmony_typeof) {
5551 __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
5552 }
5553 __ JumpIfObjectType(value, map, scratch,
5554 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt);
5555 __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5556 __ B(gt, false_label);
5557 // Check for undetectable objects => false.
5558 __ Ldrb(scratch, FieldMemOperand(value, Map::kBitFieldOffset));
5559 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5560
5561 } else {
5562 __ B(false_label);
5563 }
5564 }
5565
5566
5567 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
5568 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
5569 }
5570
5571
5572 void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
5573 Register value = ToRegister(instr->value());
5574 Register result = ToRegister(instr->result());
5575
5576 if (!instr->hydrogen()->value()->HasRange() ||
5577 !instr->hydrogen()->value()->range()->IsInSmiRange() ||
5578 instr->hydrogen()->value()->range()->upper() == kMaxInt) {
5579 // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32]
5580 // interval, so we treat kMaxInt as a sentinel for this entire interval.
5581 DeoptimizeIfNegative(value.W(), instr->environment());
5582 }
5583 __ SmiTag(result, value);
5584 }
5585
5586
5587 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5588 Register object = ToRegister(instr->value());
5589 Register map = ToRegister(instr->map());
5590 Register temp = ToRegister(instr->temp());
5591 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
5592 __ Cmp(map, temp);
5593 DeoptimizeIf(ne, instr->environment());
5594 }
5595
5596
5597 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
5598 Register receiver = ToRegister(instr->receiver());
5599 Register function = ToRegister(instr->function());
5600 Register result = ToRegister(instr->result());
5601
5602 // If the receiver is null or undefined, we have to pass the global object as
5603 // a receiver to normal functions. Values have to be passed unchanged to
5604 // builtins and strict-mode functions.
5605 Label global_object, done, deopt;
5606
5607 if (!instr->hydrogen()->known_function()) {
5608 __ Ldr(result, FieldMemOperand(function,
5609 JSFunction::kSharedFunctionInfoOffset));
5610
5611 // CompilerHints is an int32 field. See objects.h.
5612 __ Ldr(result.W(),
5613 FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset));
5614
5615 // Do not transform the receiver to object for strict mode functions.
5616 __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, &done);
5617
5618 // Do not transform the receiver to object for builtins.
5619 __ Tbnz(result, SharedFunctionInfo::kNative, &done);
5620 }
5621
5622 // Normal function. Replace undefined or null with global receiver.
5623 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
5624 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
5625
5626 // Deoptimize if the receiver is not a JS object.
5627 __ JumpIfSmi(receiver, &deopt);
5628 __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
5629 __ Mov(result, receiver);
5630 __ B(ge, &done);
5631 // Otherwise, fall through to deopt.
5632
5633 __ Bind(&deopt);
5634 Deoptimize(instr->environment());
5635
5636 __ Bind(&global_object);
5637 __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
5638 __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
5639 __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
5640
5641 __ Bind(&done);
5642 }
5643
5644
5645 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5646 Register object = ToRegister(instr->object());
5647 Register index = ToRegister(instr->index());
5648 Register result = ToRegister(instr->result());
5649
5650 __ AssertSmi(index);
5651
5652 Label out_of_object, done;
5653 __ Cmp(index, Operand(Smi::FromInt(0)));
5654 __ B(lt, &out_of_object);
5655
5656 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
5657 __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
5658 __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize));
5659
5660 __ B(&done);
5661
5662 __ Bind(&out_of_object);
5663 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5664 // Index is equal to negated out of object property index plus 1.
5665 __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
5666 __ Ldr(result, FieldMemOperand(result,
5667 FixedArray::kHeaderSize - kPointerSize));
5668 __ Bind(&done);
5669 }
5670
5671 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/a64/lithium-codegen-a64.h ('k') | src/a64/lithium-gap-resolver-a64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698