Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(558)

Side by Side Diff: src/mips/lithium-codegen-mips.cc

Issue 1405363003: Move Hydrogen and Lithium to src/crankshaft/ (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: rebased Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/mips/lithium-codegen-mips.h ('k') | src/mips/lithium-gap-resolver-mips.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2012 the V8 project authors. All rights reserved.7
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "src/base/bits.h"
29 #include "src/code-factory.h"
30 #include "src/code-stubs.h"
31 #include "src/hydrogen-osr.h"
32 #include "src/ic/ic.h"
33 #include "src/ic/stub-cache.h"
34 #include "src/mips/lithium-codegen-mips.h"
35 #include "src/mips/lithium-gap-resolver-mips.h"
36 #include "src/profiler/cpu-profiler.h"
37
38
39 namespace v8 {
40 namespace internal {
41
42
43 class SafepointGenerator final : public CallWrapper {
44 public:
45 SafepointGenerator(LCodeGen* codegen,
46 LPointerMap* pointers,
47 Safepoint::DeoptMode mode)
48 : codegen_(codegen),
49 pointers_(pointers),
50 deopt_mode_(mode) { }
51 virtual ~SafepointGenerator() {}
52
53 void BeforeCall(int call_size) const override {}
54
55 void AfterCall() const override {
56 codegen_->RecordSafepoint(pointers_, deopt_mode_);
57 }
58
59 private:
60 LCodeGen* codegen_;
61 LPointerMap* pointers_;
62 Safepoint::DeoptMode deopt_mode_;
63 };
64
65
66 #define __ masm()->
67
68 bool LCodeGen::GenerateCode() {
69 LPhase phase("Z_Code generation", chunk());
70 DCHECK(is_unused());
71 status_ = GENERATING;
72
73 // Open a frame scope to indicate that there is a frame on the stack. The
74 // NONE indicates that the scope shouldn't actually generate code to set up
75 // the frame (that is done in GeneratePrologue).
76 FrameScope frame_scope(masm_, StackFrame::NONE);
77
78 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
79 GenerateJumpTable() && GenerateSafepointTable();
80 }
81
82
83 void LCodeGen::FinishCode(Handle<Code> code) {
84 DCHECK(is_done());
85 code->set_stack_slots(GetStackSlotCount());
86 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
87 PopulateDeoptimizationData(code);
88 }
89
90
91 void LCodeGen::SaveCallerDoubles() {
92 DCHECK(info()->saves_caller_doubles());
93 DCHECK(NeedsEagerFrame());
94 Comment(";;; Save clobbered callee double registers");
95 int count = 0;
96 BitVector* doubles = chunk()->allocated_double_registers();
97 BitVector::Iterator save_iterator(doubles);
98 while (!save_iterator.Done()) {
99 __ sdc1(DoubleRegister::from_code(save_iterator.Current()),
100 MemOperand(sp, count * kDoubleSize));
101 save_iterator.Advance();
102 count++;
103 }
104 }
105
106
107 void LCodeGen::RestoreCallerDoubles() {
108 DCHECK(info()->saves_caller_doubles());
109 DCHECK(NeedsEagerFrame());
110 Comment(";;; Restore clobbered callee double registers");
111 BitVector* doubles = chunk()->allocated_double_registers();
112 BitVector::Iterator save_iterator(doubles);
113 int count = 0;
114 while (!save_iterator.Done()) {
115 __ ldc1(DoubleRegister::from_code(save_iterator.Current()),
116 MemOperand(sp, count * kDoubleSize));
117 save_iterator.Advance();
118 count++;
119 }
120 }
121
122
123 bool LCodeGen::GeneratePrologue() {
124 DCHECK(is_generating());
125
126 if (info()->IsOptimizing()) {
127 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
128
129 #ifdef DEBUG
130 if (strlen(FLAG_stop_at) > 0 &&
131 info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
132 __ stop("stop_at");
133 }
134 #endif
135
136 // a1: Callee's JS function.
137 // cp: Callee's context.
138 // fp: Caller's frame pointer.
139 // lr: Caller's pc.
140
141 // Sloppy mode functions and builtins need to replace the receiver with the
142 // global proxy when called as functions (without an explicit receiver
143 // object).
144 if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
145 Label ok;
146 int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
147 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
148 __ lw(a2, MemOperand(sp, receiver_offset));
149 __ Branch(&ok, ne, a2, Operand(at));
150
151 __ lw(a2, GlobalObjectOperand());
152 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
153
154 __ sw(a2, MemOperand(sp, receiver_offset));
155
156 __ bind(&ok);
157 }
158 }
159
160 info()->set_prologue_offset(masm_->pc_offset());
161 if (NeedsEagerFrame()) {
162 if (info()->IsStub()) {
163 __ StubPrologue();
164 } else {
165 __ Prologue(info()->IsCodePreAgingActive());
166 }
167 frame_is_built_ = true;
168 }
169
170 // Reserve space for the stack slots needed by the code.
171 int slots = GetStackSlotCount();
172 if (slots > 0) {
173 if (FLAG_debug_code) {
174 __ Subu(sp, sp, Operand(slots * kPointerSize));
175 __ Push(a0, a1);
176 __ Addu(a0, sp, Operand(slots * kPointerSize));
177 __ li(a1, Operand(kSlotsZapValue));
178 Label loop;
179 __ bind(&loop);
180 __ Subu(a0, a0, Operand(kPointerSize));
181 __ sw(a1, MemOperand(a0, 2 * kPointerSize));
182 __ Branch(&loop, ne, a0, Operand(sp));
183 __ Pop(a0, a1);
184 } else {
185 __ Subu(sp, sp, Operand(slots * kPointerSize));
186 }
187 }
188
189 if (info()->saves_caller_doubles()) {
190 SaveCallerDoubles();
191 }
192 return !is_aborted();
193 }
194
195
196 void LCodeGen::DoPrologue(LPrologue* instr) {
197 Comment(";;; Prologue begin");
198
199 // Possibly allocate a local context.
200 if (info()->scope()->num_heap_slots() > 0) {
201 Comment(";;; Allocate local context");
202 bool need_write_barrier = true;
203 // Argument to NewContext is the function, which is in a1.
204 int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
205 Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
206 if (info()->scope()->is_script_scope()) {
207 __ push(a1);
208 __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
209 __ CallRuntime(Runtime::kNewScriptContext, 2);
210 deopt_mode = Safepoint::kLazyDeopt;
211 } else if (slots <= FastNewContextStub::kMaximumSlots) {
212 FastNewContextStub stub(isolate(), slots);
213 __ CallStub(&stub);
214 // Result of FastNewContextStub is always in new space.
215 need_write_barrier = false;
216 } else {
217 __ push(a1);
218 __ CallRuntime(Runtime::kNewFunctionContext, 1);
219 }
220 RecordSafepoint(deopt_mode);
221
222 // Context is returned in both v0. It replaces the context passed to us.
223 // It's saved in the stack and kept live in cp.
224 __ mov(cp, v0);
225 __ sw(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
226 // Copy any necessary parameters into the context.
227 int num_parameters = scope()->num_parameters();
228 int first_parameter = scope()->has_this_declaration() ? -1 : 0;
229 for (int i = first_parameter; i < num_parameters; i++) {
230 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
231 if (var->IsContextSlot()) {
232 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
233 (num_parameters - 1 - i) * kPointerSize;
234 // Load parameter from stack.
235 __ lw(a0, MemOperand(fp, parameter_offset));
236 // Store it in the context.
237 MemOperand target = ContextOperand(cp, var->index());
238 __ sw(a0, target);
239 // Update the write barrier. This clobbers a3 and a0.
240 if (need_write_barrier) {
241 __ RecordWriteContextSlot(
242 cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
243 } else if (FLAG_debug_code) {
244 Label done;
245 __ JumpIfInNewSpace(cp, a0, &done);
246 __ Abort(kExpectedNewSpaceObject);
247 __ bind(&done);
248 }
249 }
250 }
251 Comment(";;; End allocate local context");
252 }
253
254 Comment(";;; Prologue end");
255 }
256
257
258 void LCodeGen::GenerateOsrPrologue() {
259 // Generate the OSR entry prologue at the first unknown OSR value, or if there
260 // are none, at the OSR entrypoint instruction.
261 if (osr_pc_offset_ >= 0) return;
262
263 osr_pc_offset_ = masm()->pc_offset();
264
265 // Adjust the frame size, subsuming the unoptimized frame into the
266 // optimized frame.
267 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
268 DCHECK(slots >= 0);
269 __ Subu(sp, sp, Operand(slots * kPointerSize));
270 }
271
272
273 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
274 if (instr->IsCall()) {
275 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
276 }
277 if (!instr->IsLazyBailout() && !instr->IsGap()) {
278 safepoints_.BumpLastLazySafepointIndex();
279 }
280 }
281
282
283 bool LCodeGen::GenerateDeferredCode() {
284 DCHECK(is_generating());
285 if (deferred_.length() > 0) {
286 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
287 LDeferredCode* code = deferred_[i];
288
289 HValue* value =
290 instructions_->at(code->instruction_index())->hydrogen_value();
291 RecordAndWritePosition(
292 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
293
294 Comment(";;; <@%d,#%d> "
295 "-------------------- Deferred %s --------------------",
296 code->instruction_index(),
297 code->instr()->hydrogen_value()->id(),
298 code->instr()->Mnemonic());
299 __ bind(code->entry());
300 if (NeedsDeferredFrame()) {
301 Comment(";;; Build frame");
302 DCHECK(!frame_is_built_);
303 DCHECK(info()->IsStub());
304 frame_is_built_ = true;
305 __ MultiPush(cp.bit() | fp.bit() | ra.bit());
306 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
307 __ push(scratch0());
308 __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
309 Comment(";;; Deferred code");
310 }
311 code->Generate();
312 if (NeedsDeferredFrame()) {
313 Comment(";;; Destroy frame");
314 DCHECK(frame_is_built_);
315 __ pop(at);
316 __ MultiPop(cp.bit() | fp.bit() | ra.bit());
317 frame_is_built_ = false;
318 }
319 __ jmp(code->exit());
320 }
321 }
322 // Deferred code is the last part of the instruction sequence. Mark
323 // the generated code as done unless we bailed out.
324 if (!is_aborted()) status_ = DONE;
325 return !is_aborted();
326 }
327
328
329 bool LCodeGen::GenerateJumpTable() {
330 if (jump_table_.length() > 0) {
331 Label needs_frame, call_deopt_entry;
332
333 Comment(";;; -------------------- Jump table --------------------");
334 Address base = jump_table_[0].address;
335
336 Register entry_offset = t9;
337
338 int length = jump_table_.length();
339 for (int i = 0; i < length; i++) {
340 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
341 __ bind(&table_entry->label);
342
343 DCHECK(table_entry->bailout_type == jump_table_[0].bailout_type);
344 Address entry = table_entry->address;
345 DeoptComment(table_entry->deopt_info);
346
347 // Second-level deopt table entries are contiguous and small, so instead
348 // of loading the full, absolute address of each one, load an immediate
349 // offset which will be added to the base address later.
350 __ li(entry_offset, Operand(entry - base));
351
352 if (table_entry->needs_frame) {
353 DCHECK(!info()->saves_caller_doubles());
354 Comment(";;; call deopt with frame");
355 __ MultiPush(cp.bit() | fp.bit() | ra.bit());
356 __ Call(&needs_frame);
357 } else {
358 __ Call(&call_deopt_entry);
359 }
360 info()->LogDeoptCallPosition(masm()->pc_offset(),
361 table_entry->deopt_info.inlining_id);
362 }
363
364 if (needs_frame.is_linked()) {
365 __ bind(&needs_frame);
366 // This variant of deopt can only be used with stubs. Since we don't
367 // have a function pointer to install in the stack frame that we're
368 // building, install a special marker there instead.
369 DCHECK(info()->IsStub());
370 __ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
371 __ push(at);
372 __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
373 }
374
375 Comment(";;; call deopt");
376 __ bind(&call_deopt_entry);
377
378 if (info()->saves_caller_doubles()) {
379 DCHECK(info()->IsStub());
380 RestoreCallerDoubles();
381 }
382
383 // Add the base address to the offset previously loaded in entry_offset.
384 __ Addu(entry_offset, entry_offset,
385 Operand(ExternalReference::ForDeoptEntry(base)));
386 __ Jump(entry_offset);
387 }
388 __ RecordComment("]");
389
390 // The deoptimization jump table is the last part of the instruction
391 // sequence. Mark the generated code as done unless we bailed out.
392 if (!is_aborted()) status_ = DONE;
393 return !is_aborted();
394 }
395
396
397 bool LCodeGen::GenerateSafepointTable() {
398 DCHECK(is_done());
399 safepoints_.Emit(masm(), GetStackSlotCount());
400 return !is_aborted();
401 }
402
403
404 Register LCodeGen::ToRegister(int index) const {
405 return Register::from_code(index);
406 }
407
408
409 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
410 return DoubleRegister::from_code(index);
411 }
412
413
414 Register LCodeGen::ToRegister(LOperand* op) const {
415 DCHECK(op->IsRegister());
416 return ToRegister(op->index());
417 }
418
419
420 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
421 if (op->IsRegister()) {
422 return ToRegister(op->index());
423 } else if (op->IsConstantOperand()) {
424 LConstantOperand* const_op = LConstantOperand::cast(op);
425 HConstant* constant = chunk_->LookupConstant(const_op);
426 Handle<Object> literal = constant->handle(isolate());
427 Representation r = chunk_->LookupLiteralRepresentation(const_op);
428 if (r.IsInteger32()) {
429 AllowDeferredHandleDereference get_number;
430 DCHECK(literal->IsNumber());
431 __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
432 } else if (r.IsSmi()) {
433 DCHECK(constant->HasSmiValue());
434 __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
435 } else if (r.IsDouble()) {
436 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
437 } else {
438 DCHECK(r.IsSmiOrTagged());
439 __ li(scratch, literal);
440 }
441 return scratch;
442 } else if (op->IsStackSlot()) {
443 __ lw(scratch, ToMemOperand(op));
444 return scratch;
445 }
446 UNREACHABLE();
447 return scratch;
448 }
449
450
451 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
452 DCHECK(op->IsDoubleRegister());
453 return ToDoubleRegister(op->index());
454 }
455
456
457 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
458 FloatRegister flt_scratch,
459 DoubleRegister dbl_scratch) {
460 if (op->IsDoubleRegister()) {
461 return ToDoubleRegister(op->index());
462 } else if (op->IsConstantOperand()) {
463 LConstantOperand* const_op = LConstantOperand::cast(op);
464 HConstant* constant = chunk_->LookupConstant(const_op);
465 Handle<Object> literal = constant->handle(isolate());
466 Representation r = chunk_->LookupLiteralRepresentation(const_op);
467 if (r.IsInteger32()) {
468 DCHECK(literal->IsNumber());
469 __ li(at, Operand(static_cast<int32_t>(literal->Number())));
470 __ mtc1(at, flt_scratch);
471 __ cvt_d_w(dbl_scratch, flt_scratch);
472 return dbl_scratch;
473 } else if (r.IsDouble()) {
474 Abort(kUnsupportedDoubleImmediate);
475 } else if (r.IsTagged()) {
476 Abort(kUnsupportedTaggedImmediate);
477 }
478 } else if (op->IsStackSlot()) {
479 MemOperand mem_op = ToMemOperand(op);
480 __ ldc1(dbl_scratch, mem_op);
481 return dbl_scratch;
482 }
483 UNREACHABLE();
484 return dbl_scratch;
485 }
486
487
488 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
489 HConstant* constant = chunk_->LookupConstant(op);
490 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
491 return constant->handle(isolate());
492 }
493
494
495 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
496 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
497 }
498
499
500 bool LCodeGen::IsSmi(LConstantOperand* op) const {
501 return chunk_->LookupLiteralRepresentation(op).IsSmi();
502 }
503
504
505 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
506 return ToRepresentation(op, Representation::Integer32());
507 }
508
509
510 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
511 const Representation& r) const {
512 HConstant* constant = chunk_->LookupConstant(op);
513 int32_t value = constant->Integer32Value();
514 if (r.IsInteger32()) return value;
515 DCHECK(r.IsSmiOrTagged());
516 return reinterpret_cast<int32_t>(Smi::FromInt(value));
517 }
518
519
520 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
521 HConstant* constant = chunk_->LookupConstant(op);
522 return Smi::FromInt(constant->Integer32Value());
523 }
524
525
526 double LCodeGen::ToDouble(LConstantOperand* op) const {
527 HConstant* constant = chunk_->LookupConstant(op);
528 DCHECK(constant->HasDoubleValue());
529 return constant->DoubleValue();
530 }
531
532
533 Operand LCodeGen::ToOperand(LOperand* op) {
534 if (op->IsConstantOperand()) {
535 LConstantOperand* const_op = LConstantOperand::cast(op);
536 HConstant* constant = chunk()->LookupConstant(const_op);
537 Representation r = chunk_->LookupLiteralRepresentation(const_op);
538 if (r.IsSmi()) {
539 DCHECK(constant->HasSmiValue());
540 return Operand(Smi::FromInt(constant->Integer32Value()));
541 } else if (r.IsInteger32()) {
542 DCHECK(constant->HasInteger32Value());
543 return Operand(constant->Integer32Value());
544 } else if (r.IsDouble()) {
545 Abort(kToOperandUnsupportedDoubleImmediate);
546 }
547 DCHECK(r.IsTagged());
548 return Operand(constant->handle(isolate()));
549 } else if (op->IsRegister()) {
550 return Operand(ToRegister(op));
551 } else if (op->IsDoubleRegister()) {
552 Abort(kToOperandIsDoubleRegisterUnimplemented);
553 return Operand(0);
554 }
555 // Stack slots not implemented, use ToMemOperand instead.
556 UNREACHABLE();
557 return Operand(0);
558 }
559
560
561 static int ArgumentsOffsetWithoutFrame(int index) {
562 DCHECK(index < 0);
563 return -(index + 1) * kPointerSize;
564 }
565
566
567 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
568 DCHECK(!op->IsRegister());
569 DCHECK(!op->IsDoubleRegister());
570 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
571 if (NeedsEagerFrame()) {
572 return MemOperand(fp, StackSlotOffset(op->index()));
573 } else {
574 // Retrieve parameter without eager stack-frame relative to the
575 // stack-pointer.
576 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
577 }
578 }
579
580
581 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
582 DCHECK(op->IsDoubleStackSlot());
583 if (NeedsEagerFrame()) {
584 return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
585 } else {
586 // Retrieve parameter without eager stack-frame relative to the
587 // stack-pointer.
588 return MemOperand(
589 sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
590 }
591 }
592
593
594 void LCodeGen::WriteTranslation(LEnvironment* environment,
595 Translation* translation) {
596 if (environment == NULL) return;
597
598 // The translation includes one command per value in the environment.
599 int translation_size = environment->translation_size();
600
601 WriteTranslation(environment->outer(), translation);
602 WriteTranslationFrame(environment, translation);
603
604 int object_index = 0;
605 int dematerialized_index = 0;
606 for (int i = 0; i < translation_size; ++i) {
607 LOperand* value = environment->values()->at(i);
608 AddToTranslation(
609 environment, translation, value, environment->HasTaggedValueAt(i),
610 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
611 }
612 }
613
614
615 void LCodeGen::AddToTranslation(LEnvironment* environment,
616 Translation* translation,
617 LOperand* op,
618 bool is_tagged,
619 bool is_uint32,
620 int* object_index_pointer,
621 int* dematerialized_index_pointer) {
622 if (op == LEnvironment::materialization_marker()) {
623 int object_index = (*object_index_pointer)++;
624 if (environment->ObjectIsDuplicateAt(object_index)) {
625 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
626 translation->DuplicateObject(dupe_of);
627 return;
628 }
629 int object_length = environment->ObjectLengthAt(object_index);
630 if (environment->ObjectIsArgumentsAt(object_index)) {
631 translation->BeginArgumentsObject(object_length);
632 } else {
633 translation->BeginCapturedObject(object_length);
634 }
635 int dematerialized_index = *dematerialized_index_pointer;
636 int env_offset = environment->translation_size() + dematerialized_index;
637 *dematerialized_index_pointer += object_length;
638 for (int i = 0; i < object_length; ++i) {
639 LOperand* value = environment->values()->at(env_offset + i);
640 AddToTranslation(environment,
641 translation,
642 value,
643 environment->HasTaggedValueAt(env_offset + i),
644 environment->HasUint32ValueAt(env_offset + i),
645 object_index_pointer,
646 dematerialized_index_pointer);
647 }
648 return;
649 }
650
651 if (op->IsStackSlot()) {
652 int index = op->index();
653 if (index >= 0) {
654 index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
655 }
656 if (is_tagged) {
657 translation->StoreStackSlot(index);
658 } else if (is_uint32) {
659 translation->StoreUint32StackSlot(index);
660 } else {
661 translation->StoreInt32StackSlot(index);
662 }
663 } else if (op->IsDoubleStackSlot()) {
664 int index = op->index();
665 if (index >= 0) {
666 index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
667 }
668 translation->StoreDoubleStackSlot(index);
669 } else if (op->IsRegister()) {
670 Register reg = ToRegister(op);
671 if (is_tagged) {
672 translation->StoreRegister(reg);
673 } else if (is_uint32) {
674 translation->StoreUint32Register(reg);
675 } else {
676 translation->StoreInt32Register(reg);
677 }
678 } else if (op->IsDoubleRegister()) {
679 DoubleRegister reg = ToDoubleRegister(op);
680 translation->StoreDoubleRegister(reg);
681 } else if (op->IsConstantOperand()) {
682 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
683 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
684 translation->StoreLiteral(src_index);
685 } else {
686 UNREACHABLE();
687 }
688 }
689
690
691 void LCodeGen::CallCode(Handle<Code> code,
692 RelocInfo::Mode mode,
693 LInstruction* instr) {
694 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
695 }
696
697
698 void LCodeGen::CallCodeGeneric(Handle<Code> code,
699 RelocInfo::Mode mode,
700 LInstruction* instr,
701 SafepointMode safepoint_mode) {
702 DCHECK(instr != NULL);
703 __ Call(code, mode);
704 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
705 }
706
707
708 void LCodeGen::CallRuntime(const Runtime::Function* function,
709 int num_arguments,
710 LInstruction* instr,
711 SaveFPRegsMode save_doubles) {
712 DCHECK(instr != NULL);
713
714 __ CallRuntime(function, num_arguments, save_doubles);
715
716 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
717 }
718
719
720 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
721 if (context->IsRegister()) {
722 __ Move(cp, ToRegister(context));
723 } else if (context->IsStackSlot()) {
724 __ lw(cp, ToMemOperand(context));
725 } else if (context->IsConstantOperand()) {
726 HConstant* constant =
727 chunk_->LookupConstant(LConstantOperand::cast(context));
728 __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
729 } else {
730 UNREACHABLE();
731 }
732 }
733
734
735 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
736 int argc,
737 LInstruction* instr,
738 LOperand* context) {
739 LoadContextFromDeferred(context);
740 __ CallRuntimeSaveDoubles(id);
741 RecordSafepointWithRegisters(
742 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
743 }
744
745
746 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
747 Safepoint::DeoptMode mode) {
748 environment->set_has_been_used();
749 if (!environment->HasBeenRegistered()) {
750 // Physical stack frame layout:
751 // -x ............. -4 0 ..................................... y
752 // [incoming arguments] [spill slots] [pushed outgoing arguments]
753
754 // Layout of the environment:
755 // 0 ..................................................... size-1
756 // [parameters] [locals] [expression stack including arguments]
757
758 // Layout of the translation:
759 // 0 ........................................................ size - 1 + 4
760 // [expression stack including arguments] [locals] [4 words] [parameters]
761 // |>------------ translation_size ------------<|
762
763 int frame_count = 0;
764 int jsframe_count = 0;
765 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
766 ++frame_count;
767 if (e->frame_type() == JS_FUNCTION) {
768 ++jsframe_count;
769 }
770 }
771 Translation translation(&translations_, frame_count, jsframe_count, zone());
772 WriteTranslation(environment, &translation);
773 int deoptimization_index = deoptimizations_.length();
774 int pc_offset = masm()->pc_offset();
775 environment->Register(deoptimization_index,
776 translation.index(),
777 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
778 deoptimizations_.Add(environment, zone());
779 }
780 }
781
782
783 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
784 Deoptimizer::DeoptReason deopt_reason,
785 Deoptimizer::BailoutType bailout_type,
786 Register src1, const Operand& src2) {
787 LEnvironment* environment = instr->environment();
788 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
789 DCHECK(environment->HasBeenRegistered());
790 int id = environment->deoptimization_index();
791 Address entry =
792 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
793 if (entry == NULL) {
794 Abort(kBailoutWasNotPrepared);
795 return;
796 }
797
798 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
799 Register scratch = scratch0();
800 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
801 Label no_deopt;
802 __ Push(a1, scratch);
803 __ li(scratch, Operand(count));
804 __ lw(a1, MemOperand(scratch));
805 __ Subu(a1, a1, Operand(1));
806 __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
807 __ li(a1, Operand(FLAG_deopt_every_n_times));
808 __ sw(a1, MemOperand(scratch));
809 __ Pop(a1, scratch);
810
811 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
812 __ bind(&no_deopt);
813 __ sw(a1, MemOperand(scratch));
814 __ Pop(a1, scratch);
815 }
816
817 if (info()->ShouldTrapOnDeopt()) {
818 Label skip;
819 if (condition != al) {
820 __ Branch(&skip, NegateCondition(condition), src1, src2);
821 }
822 __ stop("trap_on_deopt");
823 __ bind(&skip);
824 }
825
826 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
827
828 DCHECK(info()->IsStub() || frame_is_built_);
829 // Go through jump table if we need to handle condition, build frame, or
830 // restore caller doubles.
831 if (condition == al && frame_is_built_ &&
832 !info()->saves_caller_doubles()) {
833 DeoptComment(deopt_info);
834 __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
835 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
836 } else {
837 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
838 !frame_is_built_);
839 // We often have several deopts to the same entry, reuse the last
840 // jump entry if this is the case.
841 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
842 jump_table_.is_empty() ||
843 !table_entry.IsEquivalentTo(jump_table_.last())) {
844 jump_table_.Add(table_entry, zone());
845 }
846 __ Branch(&jump_table_.last().label, condition, src1, src2);
847 }
848 }
849
850
851 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
852 Deoptimizer::DeoptReason deopt_reason,
853 Register src1, const Operand& src2) {
854 Deoptimizer::BailoutType bailout_type = info()->IsStub()
855 ? Deoptimizer::LAZY
856 : Deoptimizer::EAGER;
857 DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2);
858 }
859
860
861 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
862 int length = deoptimizations_.length();
863 if (length == 0) return;
864 Handle<DeoptimizationInputData> data =
865 DeoptimizationInputData::New(isolate(), length, TENURED);
866
867 Handle<ByteArray> translations =
868 translations_.CreateByteArray(isolate()->factory());
869 data->SetTranslationByteArray(*translations);
870 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
871 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
872 if (info_->IsOptimizing()) {
873 // Reference to shared function info does not change between phases.
874 AllowDeferredHandleDereference allow_handle_dereference;
875 data->SetSharedFunctionInfo(*info_->shared_info());
876 } else {
877 data->SetSharedFunctionInfo(Smi::FromInt(0));
878 }
879 data->SetWeakCellCache(Smi::FromInt(0));
880
881 Handle<FixedArray> literals =
882 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
883 { AllowDeferredHandleDereference copy_handles;
884 for (int i = 0; i < deoptimization_literals_.length(); i++) {
885 literals->set(i, *deoptimization_literals_[i]);
886 }
887 data->SetLiteralArray(*literals);
888 }
889
890 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
891 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
892
893 // Populate the deoptimization entries.
894 for (int i = 0; i < length; i++) {
895 LEnvironment* env = deoptimizations_[i];
896 data->SetAstId(i, env->ast_id());
897 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
898 data->SetArgumentsStackHeight(i,
899 Smi::FromInt(env->arguments_stack_height()));
900 data->SetPc(i, Smi::FromInt(env->pc_offset()));
901 }
902 code->set_deoptimization_data(*data);
903 }
904
905
906 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
907 DCHECK_EQ(0, deoptimization_literals_.length());
908 for (auto function : chunk()->inlined_functions()) {
909 DefineDeoptimizationLiteral(function);
910 }
911 inlined_function_count_ = deoptimization_literals_.length();
912 }
913
914
915 void LCodeGen::RecordSafepointWithLazyDeopt(
916 LInstruction* instr, SafepointMode safepoint_mode) {
917 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
918 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
919 } else {
920 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
921 RecordSafepointWithRegisters(
922 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
923 }
924 }
925
926
927 void LCodeGen::RecordSafepoint(
928 LPointerMap* pointers,
929 Safepoint::Kind kind,
930 int arguments,
931 Safepoint::DeoptMode deopt_mode) {
932 DCHECK(expected_safepoint_kind_ == kind);
933
934 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
935 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
936 kind, arguments, deopt_mode);
937 for (int i = 0; i < operands->length(); i++) {
938 LOperand* pointer = operands->at(i);
939 if (pointer->IsStackSlot()) {
940 safepoint.DefinePointerSlot(pointer->index(), zone());
941 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
942 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
943 }
944 }
945 }
946
947
948 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
949 Safepoint::DeoptMode deopt_mode) {
950 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
951 }
952
953
954 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
955 LPointerMap empty_pointers(zone());
956 RecordSafepoint(&empty_pointers, deopt_mode);
957 }
958
959
960 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
961 int arguments,
962 Safepoint::DeoptMode deopt_mode) {
963 RecordSafepoint(
964 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
965 }
966
967
968 void LCodeGen::RecordAndWritePosition(int position) {
969 if (position == RelocInfo::kNoPosition) return;
970 masm()->positions_recorder()->RecordPosition(position);
971 masm()->positions_recorder()->WriteRecordedPositions();
972 }
973
974
975 static const char* LabelType(LLabel* label) {
976 if (label->is_loop_header()) return " (loop header)";
977 if (label->is_osr_entry()) return " (OSR entry)";
978 return "";
979 }
980
981
982 void LCodeGen::DoLabel(LLabel* label) {
983 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
984 current_instruction_,
985 label->hydrogen_value()->id(),
986 label->block_id(),
987 LabelType(label));
988 __ bind(label->label());
989 current_block_ = label->block_id();
990 DoGap(label);
991 }
992
993
994 void LCodeGen::DoParallelMove(LParallelMove* move) {
995 resolver_.Resolve(move);
996 }
997
998
999 void LCodeGen::DoGap(LGap* gap) {
1000 for (int i = LGap::FIRST_INNER_POSITION;
1001 i <= LGap::LAST_INNER_POSITION;
1002 i++) {
1003 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1004 LParallelMove* move = gap->GetParallelMove(inner_pos);
1005 if (move != NULL) DoParallelMove(move);
1006 }
1007 }
1008
1009
1010 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1011 DoGap(instr);
1012 }
1013
1014
1015 void LCodeGen::DoParameter(LParameter* instr) {
1016 // Nothing to do.
1017 }
1018
1019
1020 void LCodeGen::DoCallStub(LCallStub* instr) {
1021 DCHECK(ToRegister(instr->context()).is(cp));
1022 DCHECK(ToRegister(instr->result()).is(v0));
1023 switch (instr->hydrogen()->major_key()) {
1024 case CodeStub::RegExpExec: {
1025 RegExpExecStub stub(isolate());
1026 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1027 break;
1028 }
1029 case CodeStub::SubString: {
1030 SubStringStub stub(isolate());
1031 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1032 break;
1033 }
1034 default:
1035 UNREACHABLE();
1036 }
1037 }
1038
1039
1040 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1041 GenerateOsrPrologue();
1042 }
1043
1044
1045 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1046 Register dividend = ToRegister(instr->dividend());
1047 int32_t divisor = instr->divisor();
1048 DCHECK(dividend.is(ToRegister(instr->result())));
1049
1050 // Theoretically, a variation of the branch-free code for integer division by
1051 // a power of 2 (calculating the remainder via an additional multiplication
1052 // (which gets simplified to an 'and') and subtraction) should be faster, and
1053 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1054 // indicate that positive dividends are heavily favored, so the branching
1055 // version performs better.
1056 HMod* hmod = instr->hydrogen();
1057 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1058 Label dividend_is_not_negative, done;
1059
1060 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1061 __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
1062 // Note: The code below even works when right contains kMinInt.
1063 __ subu(dividend, zero_reg, dividend);
1064 __ And(dividend, dividend, Operand(mask));
1065 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1066 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1067 Operand(zero_reg));
1068 }
1069 __ Branch(USE_DELAY_SLOT, &done);
1070 __ subu(dividend, zero_reg, dividend);
1071 }
1072
1073 __ bind(&dividend_is_not_negative);
1074 __ And(dividend, dividend, Operand(mask));
1075 __ bind(&done);
1076 }
1077
1078
1079 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1080 Register dividend = ToRegister(instr->dividend());
1081 int32_t divisor = instr->divisor();
1082 Register result = ToRegister(instr->result());
1083 DCHECK(!dividend.is(result));
1084
1085 if (divisor == 0) {
1086 DeoptimizeIf(al, instr);
1087 return;
1088 }
1089
1090 __ TruncatingDiv(result, dividend, Abs(divisor));
1091 __ Mul(result, result, Operand(Abs(divisor)));
1092 __ Subu(result, dividend, Operand(result));
1093
1094 // Check for negative zero.
1095 HMod* hmod = instr->hydrogen();
1096 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1097 Label remainder_not_zero;
1098 __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
1099 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, dividend,
1100 Operand(zero_reg));
1101 __ bind(&remainder_not_zero);
1102 }
1103 }
1104
1105
1106 void LCodeGen::DoModI(LModI* instr) {
1107 HMod* hmod = instr->hydrogen();
1108 const Register left_reg = ToRegister(instr->left());
1109 const Register right_reg = ToRegister(instr->right());
1110 const Register result_reg = ToRegister(instr->result());
1111
1112 // div runs in the background while we check for special cases.
1113 __ Mod(result_reg, left_reg, right_reg);
1114
1115 Label done;
1116 // Check for x % 0, we have to deopt in this case because we can't return a
1117 // NaN.
1118 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1119 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, right_reg,
1120 Operand(zero_reg));
1121 }
1122
1123 // Check for kMinInt % -1, div will return kMinInt, which is not what we
1124 // want. We have to deopt if we care about -0, because we can't return that.
1125 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1126 Label no_overflow_possible;
1127 __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
1128 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1129 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, right_reg, Operand(-1));
1130 } else {
1131 __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
1132 __ Branch(USE_DELAY_SLOT, &done);
1133 __ mov(result_reg, zero_reg);
1134 }
1135 __ bind(&no_overflow_possible);
1136 }
1137
1138 // If we care about -0, test if the dividend is <0 and the result is 0.
1139 __ Branch(&done, ge, left_reg, Operand(zero_reg));
1140 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1141 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result_reg,
1142 Operand(zero_reg));
1143 }
1144 __ bind(&done);
1145 }
1146
1147
1148 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1149 Register dividend = ToRegister(instr->dividend());
1150 int32_t divisor = instr->divisor();
1151 Register result = ToRegister(instr->result());
1152 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1153 DCHECK(!result.is(dividend));
1154
1155 // Check for (0 / -x) that will produce negative zero.
1156 HDiv* hdiv = instr->hydrogen();
1157 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1158 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1159 Operand(zero_reg));
1160 }
1161 // Check for (kMinInt / -1).
1162 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1163 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, dividend, Operand(kMinInt));
1164 }
1165 // Deoptimize if remainder will not be 0.
1166 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1167 divisor != 1 && divisor != -1) {
1168 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1169 __ And(at, dividend, Operand(mask));
1170 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, at, Operand(zero_reg));
1171 }
1172
1173 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1174 __ Subu(result, zero_reg, dividend);
1175 return;
1176 }
1177 uint16_t shift = WhichPowerOf2Abs(divisor);
1178 if (shift == 0) {
1179 __ Move(result, dividend);
1180 } else if (shift == 1) {
1181 __ srl(result, dividend, 31);
1182 __ Addu(result, dividend, Operand(result));
1183 } else {
1184 __ sra(result, dividend, 31);
1185 __ srl(result, result, 32 - shift);
1186 __ Addu(result, dividend, Operand(result));
1187 }
1188 if (shift > 0) __ sra(result, result, shift);
1189 if (divisor < 0) __ Subu(result, zero_reg, result);
1190 }
1191
1192
1193 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1194 Register dividend = ToRegister(instr->dividend());
1195 int32_t divisor = instr->divisor();
1196 Register result = ToRegister(instr->result());
1197 DCHECK(!dividend.is(result));
1198
1199 if (divisor == 0) {
1200 DeoptimizeIf(al, instr);
1201 return;
1202 }
1203
1204 // Check for (0 / -x) that will produce negative zero.
1205 HDiv* hdiv = instr->hydrogen();
1206 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1207 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1208 Operand(zero_reg));
1209 }
1210
1211 __ TruncatingDiv(result, dividend, Abs(divisor));
1212 if (divisor < 0) __ Subu(result, zero_reg, result);
1213
1214 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1215 __ Mul(scratch0(), result, Operand(divisor));
1216 __ Subu(scratch0(), scratch0(), dividend);
1217 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, scratch0(),
1218 Operand(zero_reg));
1219 }
1220 }
1221
1222
1223 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1224 void LCodeGen::DoDivI(LDivI* instr) {
1225 HBinaryOperation* hdiv = instr->hydrogen();
1226 Register dividend = ToRegister(instr->dividend());
1227 Register divisor = ToRegister(instr->divisor());
1228 const Register result = ToRegister(instr->result());
1229 Register remainder = ToRegister(instr->temp());
1230
1231 // On MIPS div is asynchronous - it will run in the background while we
1232 // check for special cases.
1233 __ Div(remainder, result, dividend, divisor);
1234
1235 // Check for x / 0.
1236 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1237 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
1238 Operand(zero_reg));
1239 }
1240
1241 // Check for (0 / -x) that will produce negative zero.
1242 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1243 Label left_not_zero;
1244 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1245 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
1246 Operand(zero_reg));
1247 __ bind(&left_not_zero);
1248 }
1249
1250 // Check for (kMinInt / -1).
1251 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1252 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1253 Label left_not_min_int;
1254 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1255 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
1256 __ bind(&left_not_min_int);
1257 }
1258
1259 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1260 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, remainder,
1261 Operand(zero_reg));
1262 }
1263 }
1264
1265
1266 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1267 DoubleRegister addend = ToDoubleRegister(instr->addend());
1268 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1269 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1270
1271 // This is computed in-place.
1272 DCHECK(addend.is(ToDoubleRegister(instr->result())));
1273
1274 __ madd_d(addend, addend, multiplier, multiplicand);
1275 }
1276
1277
1278 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1279 Register dividend = ToRegister(instr->dividend());
1280 Register result = ToRegister(instr->result());
1281 int32_t divisor = instr->divisor();
1282 Register scratch = result.is(dividend) ? scratch0() : dividend;
1283 DCHECK(!result.is(dividend) || !scratch.is(dividend));
1284
1285 // If the divisor is 1, return the dividend.
1286 if (divisor == 1) {
1287 __ Move(result, dividend);
1288 return;
1289 }
1290
1291 // If the divisor is positive, things are easy: There can be no deopts and we
1292 // can simply do an arithmetic right shift.
1293 uint16_t shift = WhichPowerOf2Abs(divisor);
1294 if (divisor > 1) {
1295 __ sra(result, dividend, shift);
1296 return;
1297 }
1298
1299 // If the divisor is negative, we have to negate and handle edge cases.
1300
1301 // dividend can be the same register as result so save the value of it
1302 // for checking overflow.
1303 __ Move(scratch, dividend);
1304
1305 __ Subu(result, zero_reg, dividend);
1306 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1307 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
1308 }
1309
1310 // Dividing by -1 is basically negation, unless we overflow.
1311 __ Xor(scratch, scratch, result);
1312 if (divisor == -1) {
1313 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1314 DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
1315 Operand(zero_reg));
1316 }
1317 return;
1318 }
1319
1320 // If the negation could not overflow, simply shifting is OK.
1321 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1322 __ sra(result, result, shift);
1323 return;
1324 }
1325
1326 Label no_overflow, done;
1327 __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
1328 __ li(result, Operand(kMinInt / divisor));
1329 __ Branch(&done);
1330 __ bind(&no_overflow);
1331 __ sra(result, result, shift);
1332 __ bind(&done);
1333 }
1334
1335
1336 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1337 Register dividend = ToRegister(instr->dividend());
1338 int32_t divisor = instr->divisor();
1339 Register result = ToRegister(instr->result());
1340 DCHECK(!dividend.is(result));
1341
1342 if (divisor == 0) {
1343 DeoptimizeIf(al, instr);
1344 return;
1345 }
1346
1347 // Check for (0 / -x) that will produce negative zero.
1348 HMathFloorOfDiv* hdiv = instr->hydrogen();
1349 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1350 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1351 Operand(zero_reg));
1352 }
1353
1354 // Easy case: We need no dynamic check for the dividend and the flooring
1355 // division is the same as the truncating division.
1356 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1357 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1358 __ TruncatingDiv(result, dividend, Abs(divisor));
1359 if (divisor < 0) __ Subu(result, zero_reg, result);
1360 return;
1361 }
1362
1363 // In the general case we may need to adjust before and after the truncating
1364 // division to get a flooring division.
1365 Register temp = ToRegister(instr->temp());
1366 DCHECK(!temp.is(dividend) && !temp.is(result));
1367 Label needs_adjustment, done;
1368 __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
1369 dividend, Operand(zero_reg));
1370 __ TruncatingDiv(result, dividend, Abs(divisor));
1371 if (divisor < 0) __ Subu(result, zero_reg, result);
1372 __ jmp(&done);
1373 __ bind(&needs_adjustment);
1374 __ Addu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1375 __ TruncatingDiv(result, temp, Abs(divisor));
1376 if (divisor < 0) __ Subu(result, zero_reg, result);
1377 __ Subu(result, result, Operand(1));
1378 __ bind(&done);
1379 }
1380
1381
1382 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1383 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1384 HBinaryOperation* hdiv = instr->hydrogen();
1385 Register dividend = ToRegister(instr->dividend());
1386 Register divisor = ToRegister(instr->divisor());
1387 const Register result = ToRegister(instr->result());
1388 Register remainder = scratch0();
1389 // On MIPS div is asynchronous - it will run in the background while we
1390 // check for special cases.
1391 __ Div(remainder, result, dividend, divisor);
1392
1393 // Check for x / 0.
1394 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1395 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
1396 Operand(zero_reg));
1397 }
1398
1399 // Check for (0 / -x) that will produce negative zero.
1400 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1401 Label left_not_zero;
1402 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1403 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
1404 Operand(zero_reg));
1405 __ bind(&left_not_zero);
1406 }
1407
1408 // Check for (kMinInt / -1).
1409 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1410 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1411 Label left_not_min_int;
1412 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1413 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
1414 __ bind(&left_not_min_int);
1415 }
1416
1417 // We performed a truncating division. Correct the result if necessary.
1418 Label done;
1419 __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1420 __ Xor(remainder, remainder, Operand(divisor));
1421 __ Branch(&done, ge, remainder, Operand(zero_reg));
1422 __ Subu(result, result, Operand(1));
1423 __ bind(&done);
1424 }
1425
1426
1427 void LCodeGen::DoMulI(LMulI* instr) {
1428 Register scratch = scratch0();
1429 Register result = ToRegister(instr->result());
1430 // Note that result may alias left.
1431 Register left = ToRegister(instr->left());
1432 LOperand* right_op = instr->right();
1433
1434 bool bailout_on_minus_zero =
1435 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1436 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1437
1438 if (right_op->IsConstantOperand()) {
1439 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1440
1441 if (bailout_on_minus_zero && (constant < 0)) {
1442 // The case of a null constant will be handled separately.
1443 // If constant is negative and left is null, the result should be -0.
1444 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
1445 }
1446
1447 switch (constant) {
1448 case -1:
1449 if (overflow) {
1450 __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
1451 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
1452 Operand(zero_reg));
1453 } else {
1454 __ Subu(result, zero_reg, left);
1455 }
1456 break;
1457 case 0:
1458 if (bailout_on_minus_zero) {
1459 // If left is strictly negative and the constant is null, the
1460 // result is -0. Deoptimize if required, otherwise return 0.
1461 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
1462 Operand(zero_reg));
1463 }
1464 __ mov(result, zero_reg);
1465 break;
1466 case 1:
1467 // Nothing to do.
1468 __ Move(result, left);
1469 break;
1470 default:
1471 // Multiplying by powers of two and powers of two plus or minus
1472 // one can be done faster with shifted operands.
1473 // For other constants we emit standard code.
1474 int32_t mask = constant >> 31;
1475 uint32_t constant_abs = (constant + mask) ^ mask;
1476
1477 if (base::bits::IsPowerOfTwo32(constant_abs)) {
1478 int32_t shift = WhichPowerOf2(constant_abs);
1479 __ sll(result, left, shift);
1480 // Correct the sign of the result if the constant is negative.
1481 if (constant < 0) __ Subu(result, zero_reg, result);
1482 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1483 int32_t shift = WhichPowerOf2(constant_abs - 1);
1484 __ sll(scratch, left, shift);
1485 __ Addu(result, scratch, left);
1486 // Correct the sign of the result if the constant is negative.
1487 if (constant < 0) __ Subu(result, zero_reg, result);
1488 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1489 int32_t shift = WhichPowerOf2(constant_abs + 1);
1490 __ sll(scratch, left, shift);
1491 __ Subu(result, scratch, left);
1492 // Correct the sign of the result if the constant is negative.
1493 if (constant < 0) __ Subu(result, zero_reg, result);
1494 } else {
1495 // Generate standard code.
1496 __ li(at, constant);
1497 __ Mul(result, left, at);
1498 }
1499 }
1500
1501 } else {
1502 DCHECK(right_op->IsRegister());
1503 Register right = ToRegister(right_op);
1504
1505 if (overflow) {
1506 // hi:lo = left * right.
1507 if (instr->hydrogen()->representation().IsSmi()) {
1508 __ SmiUntag(result, left);
1509 __ Mul(scratch, result, result, right);
1510 } else {
1511 __ Mul(scratch, result, left, right);
1512 }
1513 __ sra(at, result, 31);
1514 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
1515 } else {
1516 if (instr->hydrogen()->representation().IsSmi()) {
1517 __ SmiUntag(result, left);
1518 __ Mul(result, result, right);
1519 } else {
1520 __ Mul(result, left, right);
1521 }
1522 }
1523
1524 if (bailout_on_minus_zero) {
1525 Label done;
1526 __ Xor(at, left, right);
1527 __ Branch(&done, ge, at, Operand(zero_reg));
1528 // Bail out if the result is minus zero.
1529 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
1530 Operand(zero_reg));
1531 __ bind(&done);
1532 }
1533 }
1534 }
1535
1536
1537 void LCodeGen::DoBitI(LBitI* instr) {
1538 LOperand* left_op = instr->left();
1539 LOperand* right_op = instr->right();
1540 DCHECK(left_op->IsRegister());
1541 Register left = ToRegister(left_op);
1542 Register result = ToRegister(instr->result());
1543 Operand right(no_reg);
1544
1545 if (right_op->IsStackSlot()) {
1546 right = Operand(EmitLoadRegister(right_op, at));
1547 } else {
1548 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1549 right = ToOperand(right_op);
1550 }
1551
1552 switch (instr->op()) {
1553 case Token::BIT_AND:
1554 __ And(result, left, right);
1555 break;
1556 case Token::BIT_OR:
1557 __ Or(result, left, right);
1558 break;
1559 case Token::BIT_XOR:
1560 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1561 __ Nor(result, zero_reg, left);
1562 } else {
1563 __ Xor(result, left, right);
1564 }
1565 break;
1566 default:
1567 UNREACHABLE();
1568 break;
1569 }
1570 }
1571
1572
1573 void LCodeGen::DoShiftI(LShiftI* instr) {
1574 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1575 // result may alias either of them.
1576 LOperand* right_op = instr->right();
1577 Register left = ToRegister(instr->left());
1578 Register result = ToRegister(instr->result());
1579 Register scratch = scratch0();
1580
1581 if (right_op->IsRegister()) {
1582 // No need to mask the right operand on MIPS, it is built into the variable
1583 // shift instructions.
1584 switch (instr->op()) {
1585 case Token::ROR:
1586 __ Ror(result, left, Operand(ToRegister(right_op)));
1587 break;
1588 case Token::SAR:
1589 __ srav(result, left, ToRegister(right_op));
1590 break;
1591 case Token::SHR:
1592 __ srlv(result, left, ToRegister(right_op));
1593 if (instr->can_deopt()) {
1594 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, result,
1595 Operand(zero_reg));
1596 }
1597 break;
1598 case Token::SHL:
1599 __ sllv(result, left, ToRegister(right_op));
1600 break;
1601 default:
1602 UNREACHABLE();
1603 break;
1604 }
1605 } else {
1606 // Mask the right_op operand.
1607 int value = ToInteger32(LConstantOperand::cast(right_op));
1608 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1609 switch (instr->op()) {
1610 case Token::ROR:
1611 if (shift_count != 0) {
1612 __ Ror(result, left, Operand(shift_count));
1613 } else {
1614 __ Move(result, left);
1615 }
1616 break;
1617 case Token::SAR:
1618 if (shift_count != 0) {
1619 __ sra(result, left, shift_count);
1620 } else {
1621 __ Move(result, left);
1622 }
1623 break;
1624 case Token::SHR:
1625 if (shift_count != 0) {
1626 __ srl(result, left, shift_count);
1627 } else {
1628 if (instr->can_deopt()) {
1629 __ And(at, left, Operand(0x80000000));
1630 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue, at,
1631 Operand(zero_reg));
1632 }
1633 __ Move(result, left);
1634 }
1635 break;
1636 case Token::SHL:
1637 if (shift_count != 0) {
1638 if (instr->hydrogen_value()->representation().IsSmi() &&
1639 instr->can_deopt()) {
1640 if (shift_count != 1) {
1641 __ sll(result, left, shift_count - 1);
1642 __ SmiTagCheckOverflow(result, result, scratch);
1643 } else {
1644 __ SmiTagCheckOverflow(result, left, scratch);
1645 }
1646 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
1647 Operand(zero_reg));
1648 } else {
1649 __ sll(result, left, shift_count);
1650 }
1651 } else {
1652 __ Move(result, left);
1653 }
1654 break;
1655 default:
1656 UNREACHABLE();
1657 break;
1658 }
1659 }
1660 }
1661
1662
1663 void LCodeGen::DoSubI(LSubI* instr) {
1664 LOperand* left = instr->left();
1665 LOperand* right = instr->right();
1666 LOperand* result = instr->result();
1667 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1668
1669 if (!can_overflow) {
1670 if (right->IsStackSlot()) {
1671 Register right_reg = EmitLoadRegister(right, at);
1672 __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
1673 } else {
1674 DCHECK(right->IsRegister() || right->IsConstantOperand());
1675 __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
1676 }
1677 } else { // can_overflow.
1678 Register overflow = scratch0();
1679 Register scratch = scratch1();
1680 if (right->IsStackSlot()) {
1681 Register right_reg = EmitLoadRegister(right, scratch);
1682 __ SubuAndCheckForOverflow(ToRegister(result),
1683 ToRegister(left),
1684 right_reg,
1685 overflow); // Reg at also used as scratch.
1686 } else {
1687 DCHECK(right->IsRegister() || right->IsConstantOperand());
1688 __ SubuAndCheckForOverflow(ToRegister(result), ToRegister(left),
1689 ToOperand(right), overflow, scratch);
1690 }
1691 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
1692 Operand(zero_reg));
1693 }
1694 }
1695
1696
1697 void LCodeGen::DoConstantI(LConstantI* instr) {
1698 __ li(ToRegister(instr->result()), Operand(instr->value()));
1699 }
1700
1701
1702 void LCodeGen::DoConstantS(LConstantS* instr) {
1703 __ li(ToRegister(instr->result()), Operand(instr->value()));
1704 }
1705
1706
1707 void LCodeGen::DoConstantD(LConstantD* instr) {
1708 DCHECK(instr->result()->IsDoubleRegister());
1709 DoubleRegister result = ToDoubleRegister(instr->result());
1710 #if V8_HOST_ARCH_IA32
1711 // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
1712 // builds.
1713 uint64_t bits = instr->bits();
1714 if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
1715 V8_UINT64_C(0x7FF0000000000000)) {
1716 uint32_t lo = static_cast<uint32_t>(bits);
1717 uint32_t hi = static_cast<uint32_t>(bits >> 32);
1718 __ li(at, Operand(lo));
1719 __ li(scratch0(), Operand(hi));
1720 __ Move(result, at, scratch0());
1721 return;
1722 }
1723 #endif
1724 double v = instr->value();
1725 __ Move(result, v);
1726 }
1727
1728
1729 void LCodeGen::DoConstantE(LConstantE* instr) {
1730 __ li(ToRegister(instr->result()), Operand(instr->value()));
1731 }
1732
1733
1734 void LCodeGen::DoConstantT(LConstantT* instr) {
1735 Handle<Object> object = instr->value(isolate());
1736 AllowDeferredHandleDereference smi_check;
1737 __ li(ToRegister(instr->result()), object);
1738 }
1739
1740
1741 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1742 Register result = ToRegister(instr->result());
1743 Register map = ToRegister(instr->value());
1744 __ EnumLength(result, map);
1745 }
1746
1747
1748 void LCodeGen::DoDateField(LDateField* instr) {
1749 Register object = ToRegister(instr->date());
1750 Register result = ToRegister(instr->result());
1751 Register scratch = ToRegister(instr->temp());
1752 Smi* index = instr->index();
1753 DCHECK(object.is(a0));
1754 DCHECK(result.is(v0));
1755 DCHECK(!scratch.is(scratch0()));
1756 DCHECK(!scratch.is(object));
1757
1758 if (index->value() == 0) {
1759 __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
1760 } else {
1761 Label runtime, done;
1762 if (index->value() < JSDate::kFirstUncachedField) {
1763 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1764 __ li(scratch, Operand(stamp));
1765 __ lw(scratch, MemOperand(scratch));
1766 __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1767 __ Branch(&runtime, ne, scratch, Operand(scratch0()));
1768 __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
1769 kPointerSize * index->value()));
1770 __ jmp(&done);
1771 }
1772 __ bind(&runtime);
1773 __ PrepareCallCFunction(2, scratch);
1774 __ li(a1, Operand(index));
1775 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1776 __ bind(&done);
1777 }
1778 }
1779
1780
1781 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1782 LOperand* index,
1783 String::Encoding encoding) {
1784 if (index->IsConstantOperand()) {
1785 int offset = ToInteger32(LConstantOperand::cast(index));
1786 if (encoding == String::TWO_BYTE_ENCODING) {
1787 offset *= kUC16Size;
1788 }
1789 STATIC_ASSERT(kCharSize == 1);
1790 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1791 }
1792 Register scratch = scratch0();
1793 DCHECK(!scratch.is(string));
1794 DCHECK(!scratch.is(ToRegister(index)));
1795 if (encoding == String::ONE_BYTE_ENCODING) {
1796 __ Addu(scratch, string, ToRegister(index));
1797 } else {
1798 STATIC_ASSERT(kUC16Size == 2);
1799 __ sll(scratch, ToRegister(index), 1);
1800 __ Addu(scratch, string, scratch);
1801 }
1802 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1803 }
1804
1805
1806 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1807 String::Encoding encoding = instr->hydrogen()->encoding();
1808 Register string = ToRegister(instr->string());
1809 Register result = ToRegister(instr->result());
1810
1811 if (FLAG_debug_code) {
1812 Register scratch = scratch0();
1813 __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1814 __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1815
1816 __ And(scratch, scratch,
1817 Operand(kStringRepresentationMask | kStringEncodingMask));
1818 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1819 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1820 __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1821 ? one_byte_seq_type : two_byte_seq_type));
1822 __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
1823 }
1824
1825 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1826 if (encoding == String::ONE_BYTE_ENCODING) {
1827 __ lbu(result, operand);
1828 } else {
1829 __ lhu(result, operand);
1830 }
1831 }
1832
1833
1834 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1835 String::Encoding encoding = instr->hydrogen()->encoding();
1836 Register string = ToRegister(instr->string());
1837 Register value = ToRegister(instr->value());
1838
1839 if (FLAG_debug_code) {
1840 Register scratch = scratch0();
1841 Register index = ToRegister(instr->index());
1842 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1843 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1844 int encoding_mask =
1845 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1846 ? one_byte_seq_type : two_byte_seq_type;
1847 __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
1848 }
1849
1850 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1851 if (encoding == String::ONE_BYTE_ENCODING) {
1852 __ sb(value, operand);
1853 } else {
1854 __ sh(value, operand);
1855 }
1856 }
1857
1858
1859 void LCodeGen::DoAddI(LAddI* instr) {
1860 LOperand* left = instr->left();
1861 LOperand* right = instr->right();
1862 LOperand* result = instr->result();
1863 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1864
1865 if (!can_overflow) {
1866 if (right->IsStackSlot()) {
1867 Register right_reg = EmitLoadRegister(right, at);
1868 __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
1869 } else {
1870 DCHECK(right->IsRegister() || right->IsConstantOperand());
1871 __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
1872 }
1873 } else { // can_overflow.
1874 Register overflow = scratch0();
1875 Register scratch = scratch1();
1876 if (right->IsStackSlot()) {
1877 Register right_reg = EmitLoadRegister(right, scratch);
1878 __ AdduAndCheckForOverflow(ToRegister(result),
1879 ToRegister(left),
1880 right_reg,
1881 overflow); // Reg at also used as scratch.
1882 } else {
1883 DCHECK(right->IsRegister() || right->IsConstantOperand());
1884 __ AdduAndCheckForOverflow(ToRegister(result), ToRegister(left),
1885 ToOperand(right), overflow, scratch);
1886 }
1887 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
1888 Operand(zero_reg));
1889 }
1890 }
1891
1892
1893 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1894 LOperand* left = instr->left();
1895 LOperand* right = instr->right();
1896 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1897 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1898 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1899 Register left_reg = ToRegister(left);
1900 Register right_reg = EmitLoadRegister(right, scratch0());
1901 Register result_reg = ToRegister(instr->result());
1902 Label return_right, done;
1903 Register scratch = scratch1();
1904 __ Slt(scratch, left_reg, Operand(right_reg));
1905 if (condition == ge) {
1906 __ Movz(result_reg, left_reg, scratch);
1907 __ Movn(result_reg, right_reg, scratch);
1908 } else {
1909 DCHECK(condition == le);
1910 __ Movn(result_reg, left_reg, scratch);
1911 __ Movz(result_reg, right_reg, scratch);
1912 }
1913 } else {
1914 DCHECK(instr->hydrogen()->representation().IsDouble());
1915 FPURegister left_reg = ToDoubleRegister(left);
1916 FPURegister right_reg = ToDoubleRegister(right);
1917 FPURegister result_reg = ToDoubleRegister(instr->result());
1918 Label check_nan_left, check_zero, return_left, return_right, done;
1919 __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
1920 __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
1921 __ Branch(&return_right);
1922
1923 __ bind(&check_zero);
1924 // left == right != 0.
1925 __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
1926 // At this point, both left and right are either 0 or -0.
1927 if (operation == HMathMinMax::kMathMin) {
1928 __ neg_d(left_reg, left_reg);
1929 __ sub_d(result_reg, left_reg, right_reg);
1930 __ neg_d(result_reg, result_reg);
1931 } else {
1932 __ add_d(result_reg, left_reg, right_reg);
1933 }
1934 __ Branch(&done);
1935
1936 __ bind(&check_nan_left);
1937 // left == NaN.
1938 __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
1939 __ bind(&return_right);
1940 if (!right_reg.is(result_reg)) {
1941 __ mov_d(result_reg, right_reg);
1942 }
1943 __ Branch(&done);
1944
1945 __ bind(&return_left);
1946 if (!left_reg.is(result_reg)) {
1947 __ mov_d(result_reg, left_reg);
1948 }
1949 __ bind(&done);
1950 }
1951 }
1952
1953
1954 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1955 DoubleRegister left = ToDoubleRegister(instr->left());
1956 DoubleRegister right = ToDoubleRegister(instr->right());
1957 DoubleRegister result = ToDoubleRegister(instr->result());
1958 switch (instr->op()) {
1959 case Token::ADD:
1960 __ add_d(result, left, right);
1961 break;
1962 case Token::SUB:
1963 __ sub_d(result, left, right);
1964 break;
1965 case Token::MUL:
1966 __ mul_d(result, left, right);
1967 break;
1968 case Token::DIV:
1969 __ div_d(result, left, right);
1970 break;
1971 case Token::MOD: {
1972 // Save a0-a3 on the stack.
1973 RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1974 __ MultiPush(saved_regs);
1975
1976 __ PrepareCallCFunction(0, 2, scratch0());
1977 __ MovToFloatParameters(left, right);
1978 __ CallCFunction(
1979 ExternalReference::mod_two_doubles_operation(isolate()),
1980 0, 2);
1981 // Move the result in the double result register.
1982 __ MovFromFloatResult(result);
1983
1984 // Restore saved register.
1985 __ MultiPop(saved_regs);
1986 break;
1987 }
1988 default:
1989 UNREACHABLE();
1990 break;
1991 }
1992 }
1993
1994
1995 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1996 DCHECK(ToRegister(instr->context()).is(cp));
1997 DCHECK(ToRegister(instr->left()).is(a1));
1998 DCHECK(ToRegister(instr->right()).is(a0));
1999 DCHECK(ToRegister(instr->result()).is(v0));
2000
2001 Handle<Code> code =
2002 CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
2003 CallCode(code, RelocInfo::CODE_TARGET, instr);
2004 // Other arch use a nop here, to signal that there is no inlined
2005 // patchable code. Mips does not need the nop, since our marker
2006 // instruction (andi zero_reg) will never be used in normal code.
2007 }
2008
2009
2010 template<class InstrType>
2011 void LCodeGen::EmitBranch(InstrType instr,
2012 Condition condition,
2013 Register src1,
2014 const Operand& src2) {
2015 int left_block = instr->TrueDestination(chunk_);
2016 int right_block = instr->FalseDestination(chunk_);
2017
2018 int next_block = GetNextEmittedBlock();
2019 if (right_block == left_block || condition == al) {
2020 EmitGoto(left_block);
2021 } else if (left_block == next_block) {
2022 __ Branch(chunk_->GetAssemblyLabel(right_block),
2023 NegateCondition(condition), src1, src2);
2024 } else if (right_block == next_block) {
2025 __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
2026 } else {
2027 __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
2028 __ Branch(chunk_->GetAssemblyLabel(right_block));
2029 }
2030 }
2031
2032
2033 template<class InstrType>
2034 void LCodeGen::EmitBranchF(InstrType instr,
2035 Condition condition,
2036 FPURegister src1,
2037 FPURegister src2) {
2038 int right_block = instr->FalseDestination(chunk_);
2039 int left_block = instr->TrueDestination(chunk_);
2040
2041 int next_block = GetNextEmittedBlock();
2042 if (right_block == left_block) {
2043 EmitGoto(left_block);
2044 } else if (left_block == next_block) {
2045 __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
2046 NegateFpuCondition(condition), src1, src2);
2047 } else if (right_block == next_block) {
2048 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
2049 condition, src1, src2);
2050 } else {
2051 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
2052 condition, src1, src2);
2053 __ Branch(chunk_->GetAssemblyLabel(right_block));
2054 }
2055 }
2056
2057
2058 template <class InstrType>
2059 void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition,
2060 Register src1, const Operand& src2) {
2061 int true_block = instr->TrueDestination(chunk_);
2062 __ Branch(chunk_->GetAssemblyLabel(true_block), condition, src1, src2);
2063 }
2064
2065
2066 template <class InstrType>
2067 void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition,
2068 Register src1, const Operand& src2) {
2069 int false_block = instr->FalseDestination(chunk_);
2070 __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
2071 }
2072
2073
2074 template<class InstrType>
2075 void LCodeGen::EmitFalseBranchF(InstrType instr,
2076 Condition condition,
2077 FPURegister src1,
2078 FPURegister src2) {
2079 int false_block = instr->FalseDestination(chunk_);
2080 __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
2081 condition, src1, src2);
2082 }
2083
2084
2085 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2086 __ stop("LDebugBreak");
2087 }
2088
2089
2090 void LCodeGen::DoBranch(LBranch* instr) {
2091 Representation r = instr->hydrogen()->value()->representation();
2092 if (r.IsInteger32() || r.IsSmi()) {
2093 DCHECK(!info()->IsStub());
2094 Register reg = ToRegister(instr->value());
2095 EmitBranch(instr, ne, reg, Operand(zero_reg));
2096 } else if (r.IsDouble()) {
2097 DCHECK(!info()->IsStub());
2098 DoubleRegister reg = ToDoubleRegister(instr->value());
2099 // Test the double value. Zero and NaN are false.
2100 EmitBranchF(instr, ogl, reg, kDoubleRegZero);
2101 } else {
2102 DCHECK(r.IsTagged());
2103 Register reg = ToRegister(instr->value());
2104 HType type = instr->hydrogen()->value()->type();
2105 if (type.IsBoolean()) {
2106 DCHECK(!info()->IsStub());
2107 __ LoadRoot(at, Heap::kTrueValueRootIndex);
2108 EmitBranch(instr, eq, reg, Operand(at));
2109 } else if (type.IsSmi()) {
2110 DCHECK(!info()->IsStub());
2111 EmitBranch(instr, ne, reg, Operand(zero_reg));
2112 } else if (type.IsJSArray()) {
2113 DCHECK(!info()->IsStub());
2114 EmitBranch(instr, al, zero_reg, Operand(zero_reg));
2115 } else if (type.IsHeapNumber()) {
2116 DCHECK(!info()->IsStub());
2117 DoubleRegister dbl_scratch = double_scratch0();
2118 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2119 // Test the double value. Zero and NaN are false.
2120 EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
2121 } else if (type.IsString()) {
2122 DCHECK(!info()->IsStub());
2123 __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
2124 EmitBranch(instr, ne, at, Operand(zero_reg));
2125 } else {
2126 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2127 // Avoid deopts in the case where we've never executed this path before.
2128 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2129
2130 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2131 // undefined -> false.
2132 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2133 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2134 }
2135 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2136 // Boolean -> its value.
2137 __ LoadRoot(at, Heap::kTrueValueRootIndex);
2138 __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
2139 __ LoadRoot(at, Heap::kFalseValueRootIndex);
2140 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2141 }
2142 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2143 // 'null' -> false.
2144 __ LoadRoot(at, Heap::kNullValueRootIndex);
2145 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2146 }
2147
2148 if (expected.Contains(ToBooleanStub::SMI)) {
2149 // Smis: 0 -> false, all other -> true.
2150 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
2151 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2152 } else if (expected.NeedsMap()) {
2153 // If we need a map later and have a Smi -> deopt.
2154 __ SmiTst(reg, at);
2155 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
2156 }
2157
2158 const Register map = scratch0();
2159 if (expected.NeedsMap()) {
2160 __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2161 if (expected.CanBeUndetectable()) {
2162 // Undetectable -> false.
2163 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
2164 __ And(at, at, Operand(1 << Map::kIsUndetectable));
2165 __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
2166 }
2167 }
2168
2169 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2170 // spec object -> true.
2171 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2172 __ Branch(instr->TrueLabel(chunk_),
2173 ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
2174 }
2175
2176 if (expected.Contains(ToBooleanStub::STRING)) {
2177 // String value -> false iff empty.
2178 Label not_string;
2179 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2180 __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
2181 __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
2182 __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
2183 __ Branch(instr->FalseLabel(chunk_));
2184 __ bind(&not_string);
2185 }
2186
2187 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2188 // Symbol value -> true.
2189 const Register scratch = scratch1();
2190 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2191 __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
2192 }
2193
2194 if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
2195 // SIMD value -> true.
2196 const Register scratch = scratch1();
2197 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2198 __ Branch(instr->TrueLabel(chunk_), eq, scratch,
2199 Operand(SIMD128_VALUE_TYPE));
2200 }
2201
2202 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2203 // heap number -> false iff +0, -0, or NaN.
2204 DoubleRegister dbl_scratch = double_scratch0();
2205 Label not_heap_number;
2206 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2207 __ Branch(&not_heap_number, ne, map, Operand(at));
2208 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2209 __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2210 ne, dbl_scratch, kDoubleRegZero);
2211 // Falls through if dbl_scratch == 0.
2212 __ Branch(instr->FalseLabel(chunk_));
2213 __ bind(&not_heap_number);
2214 }
2215
2216 if (!expected.IsGeneric()) {
2217 // We've seen something for the first time -> deopt.
2218 // This can only happen if we are not generic already.
2219 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject, zero_reg,
2220 Operand(zero_reg));
2221 }
2222 }
2223 }
2224 }
2225
2226
2227 void LCodeGen::EmitGoto(int block) {
2228 if (!IsNextEmittedBlock(block)) {
2229 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2230 }
2231 }
2232
2233
2234 void LCodeGen::DoGoto(LGoto* instr) {
2235 EmitGoto(instr->block_id());
2236 }
2237
2238
2239 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2240 Condition cond = kNoCondition;
2241 switch (op) {
2242 case Token::EQ:
2243 case Token::EQ_STRICT:
2244 cond = eq;
2245 break;
2246 case Token::NE:
2247 case Token::NE_STRICT:
2248 cond = ne;
2249 break;
2250 case Token::LT:
2251 cond = is_unsigned ? lo : lt;
2252 break;
2253 case Token::GT:
2254 cond = is_unsigned ? hi : gt;
2255 break;
2256 case Token::LTE:
2257 cond = is_unsigned ? ls : le;
2258 break;
2259 case Token::GTE:
2260 cond = is_unsigned ? hs : ge;
2261 break;
2262 case Token::IN:
2263 case Token::INSTANCEOF:
2264 default:
2265 UNREACHABLE();
2266 }
2267 return cond;
2268 }
2269
2270
2271 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2272 LOperand* left = instr->left();
2273 LOperand* right = instr->right();
2274 bool is_unsigned =
2275 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2276 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2277 Condition cond = TokenToCondition(instr->op(), is_unsigned);
2278
2279 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2280 // We can statically evaluate the comparison.
2281 double left_val = ToDouble(LConstantOperand::cast(left));
2282 double right_val = ToDouble(LConstantOperand::cast(right));
2283 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2284 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2285 EmitGoto(next_block);
2286 } else {
2287 if (instr->is_double()) {
2288 // Compare left and right as doubles and load the
2289 // resulting flags into the normal status register.
2290 FPURegister left_reg = ToDoubleRegister(left);
2291 FPURegister right_reg = ToDoubleRegister(right);
2292
2293 // If a NaN is involved, i.e. the result is unordered,
2294 // jump to false block label.
2295 __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
2296 left_reg, right_reg);
2297
2298 EmitBranchF(instr, cond, left_reg, right_reg);
2299 } else {
2300 Register cmp_left;
2301 Operand cmp_right = Operand(0);
2302
2303 if (right->IsConstantOperand()) {
2304 int32_t value = ToInteger32(LConstantOperand::cast(right));
2305 if (instr->hydrogen_value()->representation().IsSmi()) {
2306 cmp_left = ToRegister(left);
2307 cmp_right = Operand(Smi::FromInt(value));
2308 } else {
2309 cmp_left = ToRegister(left);
2310 cmp_right = Operand(value);
2311 }
2312 } else if (left->IsConstantOperand()) {
2313 int32_t value = ToInteger32(LConstantOperand::cast(left));
2314 if (instr->hydrogen_value()->representation().IsSmi()) {
2315 cmp_left = ToRegister(right);
2316 cmp_right = Operand(Smi::FromInt(value));
2317 } else {
2318 cmp_left = ToRegister(right);
2319 cmp_right = Operand(value);
2320 }
2321 // We commuted the operands, so commute the condition.
2322 cond = CommuteCondition(cond);
2323 } else {
2324 cmp_left = ToRegister(left);
2325 cmp_right = Operand(ToRegister(right));
2326 }
2327
2328 EmitBranch(instr, cond, cmp_left, cmp_right);
2329 }
2330 }
2331 }
2332
2333
2334 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2335 Register left = ToRegister(instr->left());
2336 Register right = ToRegister(instr->right());
2337
2338 EmitBranch(instr, eq, left, Operand(right));
2339 }
2340
2341
2342 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2343 if (instr->hydrogen()->representation().IsTagged()) {
2344 Register input_reg = ToRegister(instr->object());
2345 __ li(at, Operand(factory()->the_hole_value()));
2346 EmitBranch(instr, eq, input_reg, Operand(at));
2347 return;
2348 }
2349
2350 DoubleRegister input_reg = ToDoubleRegister(instr->object());
2351 EmitFalseBranchF(instr, eq, input_reg, input_reg);
2352
2353 Register scratch = scratch0();
2354 __ FmoveHigh(scratch, input_reg);
2355 EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
2356 }
2357
2358
2359 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2360 Representation rep = instr->hydrogen()->value()->representation();
2361 DCHECK(!rep.IsInteger32());
2362 Register scratch = ToRegister(instr->temp());
2363
2364 if (rep.IsDouble()) {
2365 DoubleRegister value = ToDoubleRegister(instr->value());
2366 EmitFalseBranchF(instr, ne, value, kDoubleRegZero);
2367 __ FmoveHigh(scratch, value);
2368 __ li(at, 0x80000000);
2369 } else {
2370 Register value = ToRegister(instr->value());
2371 __ CheckMap(value,
2372 scratch,
2373 Heap::kHeapNumberMapRootIndex,
2374 instr->FalseLabel(chunk()),
2375 DO_SMI_CHECK);
2376 __ lw(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2377 EmitFalseBranch(instr, ne, scratch, Operand(0x80000000));
2378 __ lw(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2379 __ mov(at, zero_reg);
2380 }
2381 EmitBranch(instr, eq, scratch, Operand(at));
2382 }
2383
2384
2385 Condition LCodeGen::EmitIsString(Register input,
2386 Register temp1,
2387 Label* is_not_string,
2388 SmiCheck check_needed = INLINE_SMI_CHECK) {
2389 if (check_needed == INLINE_SMI_CHECK) {
2390 __ JumpIfSmi(input, is_not_string);
2391 }
2392 __ GetObjectType(input, temp1, temp1);
2393
2394 return lt;
2395 }
2396
2397
2398 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2399 Register reg = ToRegister(instr->value());
2400 Register temp1 = ToRegister(instr->temp());
2401
2402 SmiCheck check_needed =
2403 instr->hydrogen()->value()->type().IsHeapObject()
2404 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2405 Condition true_cond =
2406 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2407
2408 EmitBranch(instr, true_cond, temp1,
2409 Operand(FIRST_NONSTRING_TYPE));
2410 }
2411
2412
2413 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2414 Register input_reg = EmitLoadRegister(instr->value(), at);
2415 __ And(at, input_reg, kSmiTagMask);
2416 EmitBranch(instr, eq, at, Operand(zero_reg));
2417 }
2418
2419
2420 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2421 Register input = ToRegister(instr->value());
2422 Register temp = ToRegister(instr->temp());
2423
2424 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2425 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2426 }
2427 __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2428 __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2429 __ And(at, temp, Operand(1 << Map::kIsUndetectable));
2430 EmitBranch(instr, ne, at, Operand(zero_reg));
2431 }
2432
2433
2434 static Condition ComputeCompareCondition(Token::Value op) {
2435 switch (op) {
2436 case Token::EQ_STRICT:
2437 case Token::EQ:
2438 return eq;
2439 case Token::LT:
2440 return lt;
2441 case Token::GT:
2442 return gt;
2443 case Token::LTE:
2444 return le;
2445 case Token::GTE:
2446 return ge;
2447 default:
2448 UNREACHABLE();
2449 return kNoCondition;
2450 }
2451 }
2452
2453
2454 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2455 DCHECK(ToRegister(instr->context()).is(cp));
2456 DCHECK(ToRegister(instr->left()).is(a1));
2457 DCHECK(ToRegister(instr->right()).is(a0));
2458
2459 Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
2460 CallCode(code, RelocInfo::CODE_TARGET, instr);
2461
2462 EmitBranch(instr, ComputeCompareCondition(instr->op()), v0,
2463 Operand(zero_reg));
2464 }
2465
2466
2467 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2468 InstanceType from = instr->from();
2469 InstanceType to = instr->to();
2470 if (from == FIRST_TYPE) return to;
2471 DCHECK(from == to || to == LAST_TYPE);
2472 return from;
2473 }
2474
2475
2476 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2477 InstanceType from = instr->from();
2478 InstanceType to = instr->to();
2479 if (from == to) return eq;
2480 if (to == LAST_TYPE) return hs;
2481 if (from == FIRST_TYPE) return ls;
2482 UNREACHABLE();
2483 return eq;
2484 }
2485
2486
2487 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2488 Register scratch = scratch0();
2489 Register input = ToRegister(instr->value());
2490
2491 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2492 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2493 }
2494
2495 __ GetObjectType(input, scratch, scratch);
2496 EmitBranch(instr,
2497 BranchCondition(instr->hydrogen()),
2498 scratch,
2499 Operand(TestType(instr->hydrogen())));
2500 }
2501
2502
2503 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2504 Register input = ToRegister(instr->value());
2505 Register result = ToRegister(instr->result());
2506
2507 __ AssertString(input);
2508
2509 __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
2510 __ IndexFromHash(result, result);
2511 }
2512
2513
2514 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2515 LHasCachedArrayIndexAndBranch* instr) {
2516 Register input = ToRegister(instr->value());
2517 Register scratch = scratch0();
2518
2519 __ lw(scratch,
2520 FieldMemOperand(input, String::kHashFieldOffset));
2521 __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
2522 EmitBranch(instr, eq, at, Operand(zero_reg));
2523 }
2524
2525
2526 // Branches to a label or falls through with the answer in flags. Trashes
2527 // the temp registers, but not the input.
2528 void LCodeGen::EmitClassOfTest(Label* is_true,
2529 Label* is_false,
2530 Handle<String>class_name,
2531 Register input,
2532 Register temp,
2533 Register temp2) {
2534 DCHECK(!input.is(temp));
2535 DCHECK(!input.is(temp2));
2536 DCHECK(!temp.is(temp2));
2537
2538 __ JumpIfSmi(input, is_false);
2539
2540 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2541 // Assuming the following assertions, we can use the same compares to test
2542 // for both being a function type and being in the object type range.
2543 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2544 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2545 FIRST_SPEC_OBJECT_TYPE + 1);
2546 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2547 LAST_SPEC_OBJECT_TYPE - 1);
2548 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2549
2550 __ GetObjectType(input, temp, temp2);
2551 __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2552 __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2553 __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2554 } else {
2555 // Faster code path to avoid two compares: subtract lower bound from the
2556 // actual type and do a signed compare with the width of the type range.
2557 __ GetObjectType(input, temp, temp2);
2558 __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2559 __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2560 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2561 }
2562
2563 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2564 // Check if the constructor in the map is a function.
2565 Register instance_type = scratch1();
2566 DCHECK(!instance_type.is(temp));
2567 __ GetMapConstructor(temp, temp, temp2, instance_type);
2568
2569 // Objects with a non-function constructor have class 'Object'.
2570 if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2571 __ Branch(is_true, ne, instance_type, Operand(JS_FUNCTION_TYPE));
2572 } else {
2573 __ Branch(is_false, ne, instance_type, Operand(JS_FUNCTION_TYPE));
2574 }
2575
2576 // temp now contains the constructor function. Grab the
2577 // instance class name from there.
2578 __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2579 __ lw(temp, FieldMemOperand(temp,
2580 SharedFunctionInfo::kInstanceClassNameOffset));
2581 // The class name we are testing against is internalized since it's a literal.
2582 // The name in the constructor is internalized because of the way the context
2583 // is booted. This routine isn't expected to work for random API-created
2584 // classes and it doesn't have to because you can't access it with natives
2585 // syntax. Since both sides are internalized it is sufficient to use an
2586 // identity comparison.
2587
2588 // End with the address of this class_name instance in temp register.
2589 // On MIPS, the caller must do the comparison with Handle<String>class_name.
2590 }
2591
2592
2593 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2594 Register input = ToRegister(instr->value());
2595 Register temp = scratch0();
2596 Register temp2 = ToRegister(instr->temp());
2597 Handle<String> class_name = instr->hydrogen()->class_name();
2598
2599 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2600 class_name, input, temp, temp2);
2601
2602 EmitBranch(instr, eq, temp, Operand(class_name));
2603 }
2604
2605
2606 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2607 Register reg = ToRegister(instr->value());
2608 Register temp = ToRegister(instr->temp());
2609
2610 __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2611 EmitBranch(instr, eq, temp, Operand(instr->map()));
2612 }
2613
2614
2615 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2616 DCHECK(ToRegister(instr->context()).is(cp));
2617 DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
2618 DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
2619 DCHECK(ToRegister(instr->result()).is(v0));
2620 InstanceOfStub stub(isolate());
2621 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2622 }
2623
2624
2625 void LCodeGen::DoHasInPrototypeChainAndBranch(
2626 LHasInPrototypeChainAndBranch* instr) {
2627 Register const object = ToRegister(instr->object());
2628 Register const object_map = scratch0();
2629 Register const object_prototype = object_map;
2630 Register const prototype = ToRegister(instr->prototype());
2631
2632 // The {object} must be a spec object. It's sufficient to know that {object}
2633 // is not a smi, since all other non-spec objects have {null} prototypes and
2634 // will be ruled out below.
2635 if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2636 __ SmiTst(object, at);
2637 EmitFalseBranch(instr, eq, at, Operand(zero_reg));
2638 }
2639 // Loop through the {object}s prototype chain looking for the {prototype}.
2640 __ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2641 Label loop;
2642 __ bind(&loop);
2643 __ lw(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
2644 EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
2645 __ LoadRoot(at, Heap::kNullValueRootIndex);
2646 EmitFalseBranch(instr, eq, object_prototype, Operand(at));
2647 __ Branch(USE_DELAY_SLOT, &loop);
2648 __ lw(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
2649 }
2650
2651
2652 void LCodeGen::DoCmpT(LCmpT* instr) {
2653 DCHECK(ToRegister(instr->context()).is(cp));
2654 Token::Value op = instr->op();
2655
2656 Handle<Code> ic =
2657 CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
2658 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2659 // On MIPS there is no need for a "no inlined smi code" marker (nop).
2660
2661 Condition condition = ComputeCompareCondition(op);
2662 // A minor optimization that relies on LoadRoot always emitting one
2663 // instruction.
2664 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2665 Label done, check;
2666 __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2667 __ bind(&check);
2668 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2669 DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check));
2670 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2671 __ bind(&done);
2672 }
2673
2674
2675 void LCodeGen::DoReturn(LReturn* instr) {
2676 if (FLAG_trace && info()->IsOptimizing()) {
2677 // Push the return value on the stack as the parameter.
2678 // Runtime::TraceExit returns its parameter in v0. We're leaving the code
2679 // managed by the register allocator and tearing down the frame, it's
2680 // safe to write to the context register.
2681 __ push(v0);
2682 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2683 __ CallRuntime(Runtime::kTraceExit, 1);
2684 }
2685 if (info()->saves_caller_doubles()) {
2686 RestoreCallerDoubles();
2687 }
2688 if (NeedsEagerFrame()) {
2689 __ mov(sp, fp);
2690 __ Pop(ra, fp);
2691 }
2692 if (instr->has_constant_parameter_count()) {
2693 int parameter_count = ToInteger32(instr->constant_parameter_count());
2694 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2695 if (sp_delta != 0) {
2696 __ Addu(sp, sp, Operand(sp_delta));
2697 }
2698 } else {
2699 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2700 Register reg = ToRegister(instr->parameter_count());
2701 // The argument count parameter is a smi
2702 __ SmiUntag(reg);
2703 __ sll(at, reg, kPointerSizeLog2);
2704 __ Addu(sp, sp, at);
2705 }
2706
2707 __ Jump(ra);
2708 }
2709
2710
2711 template <class T>
2712 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2713 Register vector_register = ToRegister(instr->temp_vector());
2714 Register slot_register = LoadWithVectorDescriptor::SlotRegister();
2715 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
2716 DCHECK(slot_register.is(a0));
2717
2718 AllowDeferredHandleDereference vector_structure_check;
2719 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2720 __ li(vector_register, vector);
2721 // No need to allocate this register.
2722 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2723 int index = vector->GetIndex(slot);
2724 __ li(slot_register, Operand(Smi::FromInt(index)));
2725 }
2726
2727
2728 template <class T>
2729 void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
2730 Register vector_register = ToRegister(instr->temp_vector());
2731 Register slot_register = ToRegister(instr->temp_slot());
2732
2733 AllowDeferredHandleDereference vector_structure_check;
2734 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2735 __ li(vector_register, vector);
2736 FeedbackVectorSlot slot = instr->hydrogen()->slot();
2737 int index = vector->GetIndex(slot);
2738 __ li(slot_register, Operand(Smi::FromInt(index)));
2739 }
2740
2741
2742 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2743 DCHECK(ToRegister(instr->context()).is(cp));
2744 DCHECK(ToRegister(instr->global_object())
2745 .is(LoadDescriptor::ReceiverRegister()));
2746 DCHECK(ToRegister(instr->result()).is(v0));
2747
2748 __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
2749 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
2750 Handle<Code> ic =
2751 CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
2752 SLOPPY, PREMONOMORPHIC).code();
2753 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2754 }
2755
2756
2757 void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
2758 DCHECK(ToRegister(instr->context()).is(cp));
2759 DCHECK(ToRegister(instr->result()).is(v0));
2760
2761 int const slot = instr->slot_index();
2762 int const depth = instr->depth();
2763 if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
2764 __ li(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
2765 Handle<Code> stub =
2766 CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
2767 CallCode(stub, RelocInfo::CODE_TARGET, instr);
2768 } else {
2769 __ Push(Smi::FromInt(slot));
2770 __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
2771 }
2772 }
2773
2774
2775 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2776 Register context = ToRegister(instr->context());
2777 Register result = ToRegister(instr->result());
2778
2779 __ lw(result, ContextOperand(context, instr->slot_index()));
2780 if (instr->hydrogen()->RequiresHoleCheck()) {
2781 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2782
2783 if (instr->hydrogen()->DeoptimizesOnHole()) {
2784 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
2785 } else {
2786 Label is_not_hole;
2787 __ Branch(&is_not_hole, ne, result, Operand(at));
2788 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2789 __ bind(&is_not_hole);
2790 }
2791 }
2792 }
2793
2794
2795 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2796 Register context = ToRegister(instr->context());
2797 Register value = ToRegister(instr->value());
2798 Register scratch = scratch0();
2799 MemOperand target = ContextOperand(context, instr->slot_index());
2800
2801 Label skip_assignment;
2802
2803 if (instr->hydrogen()->RequiresHoleCheck()) {
2804 __ lw(scratch, target);
2805 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2806
2807 if (instr->hydrogen()->DeoptimizesOnHole()) {
2808 DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, Operand(at));
2809 } else {
2810 __ Branch(&skip_assignment, ne, scratch, Operand(at));
2811 }
2812 }
2813
2814 __ sw(value, target);
2815 if (instr->hydrogen()->NeedsWriteBarrier()) {
2816 SmiCheck check_needed =
2817 instr->hydrogen()->value()->type().IsHeapObject()
2818 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2819 __ RecordWriteContextSlot(context,
2820 target.offset(),
2821 value,
2822 scratch0(),
2823 GetRAState(),
2824 kSaveFPRegs,
2825 EMIT_REMEMBERED_SET,
2826 check_needed);
2827 }
2828
2829 __ bind(&skip_assignment);
2830 }
2831
2832
2833 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2834 HObjectAccess access = instr->hydrogen()->access();
2835 int offset = access.offset();
2836 Register object = ToRegister(instr->object());
2837
2838 if (access.IsExternalMemory()) {
2839 Register result = ToRegister(instr->result());
2840 MemOperand operand = MemOperand(object, offset);
2841 __ Load(result, operand, access.representation());
2842 return;
2843 }
2844
2845 if (instr->hydrogen()->representation().IsDouble()) {
2846 DoubleRegister result = ToDoubleRegister(instr->result());
2847 __ ldc1(result, FieldMemOperand(object, offset));
2848 return;
2849 }
2850
2851 Register result = ToRegister(instr->result());
2852 if (!access.IsInobject()) {
2853 __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2854 object = result;
2855 }
2856 MemOperand operand = FieldMemOperand(object, offset);
2857 __ Load(result, operand, access.representation());
2858 }
2859
2860
2861 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2862 DCHECK(ToRegister(instr->context()).is(cp));
2863 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
2864 DCHECK(ToRegister(instr->result()).is(v0));
2865
2866 // Name is always in a2.
2867 __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
2868 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
2869 Handle<Code> ic =
2870 CodeFactory::LoadICInOptimizedCode(
2871 isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
2872 instr->hydrogen()->initialization_state()).code();
2873 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2874 }
2875
2876
2877 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2878 Register scratch = scratch0();
2879 Register function = ToRegister(instr->function());
2880 Register result = ToRegister(instr->result());
2881
2882 // Get the prototype or initial map from the function.
2883 __ lw(result,
2884 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2885
2886 // Check that the function has a prototype or an initial map.
2887 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2888 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
2889
2890 // If the function does not have an initial map, we're done.
2891 Label done;
2892 __ GetObjectType(result, scratch, scratch);
2893 __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
2894
2895 // Get the prototype from the initial map.
2896 __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
2897
2898 // All done.
2899 __ bind(&done);
2900 }
2901
2902
2903 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2904 Register result = ToRegister(instr->result());
2905 __ LoadRoot(result, instr->index());
2906 }
2907
2908
2909 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2910 Register arguments = ToRegister(instr->arguments());
2911 Register result = ToRegister(instr->result());
2912 // There are two words between the frame pointer and the last argument.
2913 // Subtracting from length accounts for one of them add one more.
2914 if (instr->length()->IsConstantOperand()) {
2915 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2916 if (instr->index()->IsConstantOperand()) {
2917 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2918 int index = (const_length - const_index) + 1;
2919 __ lw(result, MemOperand(arguments, index * kPointerSize));
2920 } else {
2921 Register index = ToRegister(instr->index());
2922 __ li(at, Operand(const_length + 1));
2923 __ Subu(result, at, index);
2924 __ sll(at, result, kPointerSizeLog2);
2925 __ Addu(at, arguments, at);
2926 __ lw(result, MemOperand(at));
2927 }
2928 } else if (instr->index()->IsConstantOperand()) {
2929 Register length = ToRegister(instr->length());
2930 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2931 int loc = const_index - 1;
2932 if (loc != 0) {
2933 __ Subu(result, length, Operand(loc));
2934 __ sll(at, result, kPointerSizeLog2);
2935 __ Addu(at, arguments, at);
2936 __ lw(result, MemOperand(at));
2937 } else {
2938 __ sll(at, length, kPointerSizeLog2);
2939 __ Addu(at, arguments, at);
2940 __ lw(result, MemOperand(at));
2941 }
2942 } else {
2943 Register length = ToRegister(instr->length());
2944 Register index = ToRegister(instr->index());
2945 __ Subu(result, length, index);
2946 __ Addu(result, result, 1);
2947 __ sll(at, result, kPointerSizeLog2);
2948 __ Addu(at, arguments, at);
2949 __ lw(result, MemOperand(at));
2950 }
2951 }
2952
2953
2954 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2955 Register external_pointer = ToRegister(instr->elements());
2956 Register key = no_reg;
2957 ElementsKind elements_kind = instr->elements_kind();
2958 bool key_is_constant = instr->key()->IsConstantOperand();
2959 int constant_key = 0;
2960 if (key_is_constant) {
2961 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2962 if (constant_key & 0xF0000000) {
2963 Abort(kArrayIndexConstantValueTooBig);
2964 }
2965 } else {
2966 key = ToRegister(instr->key());
2967 }
2968 int element_size_shift = ElementsKindToShiftSize(elements_kind);
2969 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
2970 ? (element_size_shift - kSmiTagSize) : element_size_shift;
2971 int base_offset = instr->base_offset();
2972
2973 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
2974 FPURegister result = ToDoubleRegister(instr->result());
2975 if (key_is_constant) {
2976 __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
2977 } else {
2978 __ sll(scratch0(), key, shift_size);
2979 __ Addu(scratch0(), scratch0(), external_pointer);
2980 }
2981 if (elements_kind == FLOAT32_ELEMENTS) {
2982 __ lwc1(result, MemOperand(scratch0(), base_offset));
2983 __ cvt_d_s(result, result);
2984 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2985 __ ldc1(result, MemOperand(scratch0(), base_offset));
2986 }
2987 } else {
2988 Register result = ToRegister(instr->result());
2989 MemOperand mem_operand = PrepareKeyedOperand(
2990 key, external_pointer, key_is_constant, constant_key,
2991 element_size_shift, shift_size, base_offset);
2992 switch (elements_kind) {
2993 case INT8_ELEMENTS:
2994 __ lb(result, mem_operand);
2995 break;
2996 case UINT8_ELEMENTS:
2997 case UINT8_CLAMPED_ELEMENTS:
2998 __ lbu(result, mem_operand);
2999 break;
3000 case INT16_ELEMENTS:
3001 __ lh(result, mem_operand);
3002 break;
3003 case UINT16_ELEMENTS:
3004 __ lhu(result, mem_operand);
3005 break;
3006 case INT32_ELEMENTS:
3007 __ lw(result, mem_operand);
3008 break;
3009 case UINT32_ELEMENTS:
3010 __ lw(result, mem_operand);
3011 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3012 DeoptimizeIf(Ugreater_equal, instr, Deoptimizer::kNegativeValue,
3013 result, Operand(0x80000000));
3014 }
3015 break;
3016 case FLOAT32_ELEMENTS:
3017 case FLOAT64_ELEMENTS:
3018 case FAST_DOUBLE_ELEMENTS:
3019 case FAST_ELEMENTS:
3020 case FAST_SMI_ELEMENTS:
3021 case FAST_HOLEY_DOUBLE_ELEMENTS:
3022 case FAST_HOLEY_ELEMENTS:
3023 case FAST_HOLEY_SMI_ELEMENTS:
3024 case DICTIONARY_ELEMENTS:
3025 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
3026 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
3027 UNREACHABLE();
3028 break;
3029 }
3030 }
3031 }
3032
3033
3034 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3035 Register elements = ToRegister(instr->elements());
3036 bool key_is_constant = instr->key()->IsConstantOperand();
3037 Register key = no_reg;
3038 DoubleRegister result = ToDoubleRegister(instr->result());
3039 Register scratch = scratch0();
3040
3041 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3042
3043 int base_offset = instr->base_offset();
3044 if (key_is_constant) {
3045 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3046 if (constant_key & 0xF0000000) {
3047 Abort(kArrayIndexConstantValueTooBig);
3048 }
3049 base_offset += constant_key * kDoubleSize;
3050 }
3051 __ Addu(scratch, elements, Operand(base_offset));
3052
3053 if (!key_is_constant) {
3054 key = ToRegister(instr->key());
3055 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3056 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3057 __ sll(at, key, shift_size);
3058 __ Addu(scratch, scratch, at);
3059 }
3060
3061 __ ldc1(result, MemOperand(scratch));
3062
3063 if (instr->hydrogen()->RequiresHoleCheck()) {
3064 __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
3065 DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch,
3066 Operand(kHoleNanUpper32));
3067 }
3068 }
3069
3070
3071 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3072 Register elements = ToRegister(instr->elements());
3073 Register result = ToRegister(instr->result());
3074 Register scratch = scratch0();
3075 Register store_base = scratch;
3076 int offset = instr->base_offset();
3077
3078 if (instr->key()->IsConstantOperand()) {
3079 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3080 offset += ToInteger32(const_operand) * kPointerSize;
3081 store_base = elements;
3082 } else {
3083 Register key = ToRegister(instr->key());
3084 // Even though the HLoadKeyed instruction forces the input
3085 // representation for the key to be an integer, the input gets replaced
3086 // during bound check elimination with the index argument to the bounds
3087 // check, which can be tagged, so that case must be handled here, too.
3088 if (instr->hydrogen()->key()->representation().IsSmi()) {
3089 __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
3090 __ addu(scratch, elements, scratch);
3091 } else {
3092 __ sll(scratch, key, kPointerSizeLog2);
3093 __ addu(scratch, elements, scratch);
3094 }
3095 }
3096 __ lw(result, MemOperand(store_base, offset));
3097
3098 // Check for the hole value.
3099 if (instr->hydrogen()->RequiresHoleCheck()) {
3100 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3101 __ SmiTst(result, scratch);
3102 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch,
3103 Operand(zero_reg));
3104 } else {
3105 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3106 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(scratch));
3107 }
3108 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3109 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
3110 Label done;
3111 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3112 __ Branch(&done, ne, result, Operand(scratch));
3113 if (info()->IsStub()) {
3114 // A stub can safely convert the hole to undefined only if the array
3115 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
3116 // it needs to bail out.
3117 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
3118 __ lw(result, FieldMemOperand(result, Cell::kValueOffset));
3119 DeoptimizeIf(ne, instr, Deoptimizer::kHole, result,
3120 Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
3121 }
3122 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3123 __ bind(&done);
3124 }
3125 }
3126
3127
3128 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3129 if (instr->is_fixed_typed_array()) {
3130 DoLoadKeyedExternalArray(instr);
3131 } else if (instr->hydrogen()->representation().IsDouble()) {
3132 DoLoadKeyedFixedDoubleArray(instr);
3133 } else {
3134 DoLoadKeyedFixedArray(instr);
3135 }
3136 }
3137
3138
3139 MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3140 Register base,
3141 bool key_is_constant,
3142 int constant_key,
3143 int element_size,
3144 int shift_size,
3145 int base_offset) {
3146 if (key_is_constant) {
3147 return MemOperand(base, (constant_key << element_size) + base_offset);
3148 }
3149
3150 if (base_offset == 0) {
3151 if (shift_size >= 0) {
3152 __ sll(scratch0(), key, shift_size);
3153 __ Addu(scratch0(), base, scratch0());
3154 return MemOperand(scratch0());
3155 } else {
3156 DCHECK_EQ(-1, shift_size);
3157 __ srl(scratch0(), key, 1);
3158 __ Addu(scratch0(), base, scratch0());
3159 return MemOperand(scratch0());
3160 }
3161 }
3162
3163 if (shift_size >= 0) {
3164 __ sll(scratch0(), key, shift_size);
3165 __ Addu(scratch0(), base, scratch0());
3166 return MemOperand(scratch0(), base_offset);
3167 } else {
3168 DCHECK_EQ(-1, shift_size);
3169 __ sra(scratch0(), key, 1);
3170 __ Addu(scratch0(), base, scratch0());
3171 return MemOperand(scratch0(), base_offset);
3172 }
3173 }
3174
3175
3176 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3177 DCHECK(ToRegister(instr->context()).is(cp));
3178 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3179 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3180
3181 if (instr->hydrogen()->HasVectorAndSlot()) {
3182 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3183 }
3184
3185 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
3186 isolate(), instr->hydrogen()->language_mode(),
3187 instr->hydrogen()->initialization_state()).code();
3188 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3189 }
3190
3191
3192 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3193 Register scratch = scratch0();
3194 Register temp = scratch1();
3195 Register result = ToRegister(instr->result());
3196
3197 if (instr->hydrogen()->from_inlined()) {
3198 __ Subu(result, sp, 2 * kPointerSize);
3199 } else {
3200 // Check if the calling frame is an arguments adaptor frame.
3201 Label done, adapted;
3202 __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3203 __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
3204 __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3205
3206 // Result is the frame pointer for the frame if not adapted and for the real
3207 // frame below the adaptor frame if adapted.
3208 __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
3209 __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
3210 }
3211 }
3212
3213
3214 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3215 Register elem = ToRegister(instr->elements());
3216 Register result = ToRegister(instr->result());
3217
3218 Label done;
3219
3220 // If no arguments adaptor frame the number of arguments is fixed.
3221 __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
3222 __ Branch(&done, eq, fp, Operand(elem));
3223
3224 // Arguments adaptor frame present. Get argument length from there.
3225 __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3226 __ lw(result,
3227 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3228 __ SmiUntag(result);
3229
3230 // Argument length is in result register.
3231 __ bind(&done);
3232 }
3233
3234
3235 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3236 Register receiver = ToRegister(instr->receiver());
3237 Register function = ToRegister(instr->function());
3238 Register result = ToRegister(instr->result());
3239 Register scratch = scratch0();
3240
3241 // If the receiver is null or undefined, we have to pass the global
3242 // object as a receiver to normal functions. Values have to be
3243 // passed unchanged to builtins and strict-mode functions.
3244 Label global_object, result_in_receiver;
3245
3246 if (!instr->hydrogen()->known_function()) {
3247 // Do not transform the receiver to object for strict mode
3248 // functions.
3249 __ lw(scratch,
3250 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3251 __ lw(scratch,
3252 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3253
3254 // Do not transform the receiver to object for builtins.
3255 int32_t strict_mode_function_mask =
3256 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
3257 int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
3258 __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
3259 __ Branch(&result_in_receiver, ne, scratch, Operand(zero_reg));
3260 }
3261
3262 // Normal function. Replace undefined or null with global receiver.
3263 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3264 __ Branch(&global_object, eq, receiver, Operand(scratch));
3265 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3266 __ Branch(&global_object, eq, receiver, Operand(scratch));
3267
3268 // Deoptimize if the receiver is not a JS object.
3269 __ SmiTst(receiver, scratch);
3270 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, scratch, Operand(zero_reg));
3271
3272 __ GetObjectType(receiver, scratch, scratch);
3273 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch,
3274 Operand(FIRST_SPEC_OBJECT_TYPE));
3275
3276 __ Branch(&result_in_receiver);
3277 __ bind(&global_object);
3278 __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset));
3279 __ lw(result,
3280 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
3281 __ lw(result,
3282 FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
3283
3284 if (result.is(receiver)) {
3285 __ bind(&result_in_receiver);
3286 } else {
3287 Label result_ok;
3288 __ Branch(&result_ok);
3289 __ bind(&result_in_receiver);
3290 __ mov(result, receiver);
3291 __ bind(&result_ok);
3292 }
3293 }
3294
3295
3296 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3297 Register receiver = ToRegister(instr->receiver());
3298 Register function = ToRegister(instr->function());
3299 Register length = ToRegister(instr->length());
3300 Register elements = ToRegister(instr->elements());
3301 Register scratch = scratch0();
3302 DCHECK(receiver.is(a0)); // Used for parameter count.
3303 DCHECK(function.is(a1)); // Required by InvokeFunction.
3304 DCHECK(ToRegister(instr->result()).is(v0));
3305
3306 // Copy the arguments to this function possibly from the
3307 // adaptor frame below it.
3308 const uint32_t kArgumentsLimit = 1 * KB;
3309 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments, length,
3310 Operand(kArgumentsLimit));
3311
3312 // Push the receiver and use the register to keep the original
3313 // number of arguments.
3314 __ push(receiver);
3315 __ Move(receiver, length);
3316 // The arguments are at a one pointer size offset from elements.
3317 __ Addu(elements, elements, Operand(1 * kPointerSize));
3318
3319 // Loop through the arguments pushing them onto the execution
3320 // stack.
3321 Label invoke, loop;
3322 // length is a small non-negative integer, due to the test above.
3323 __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
3324 __ sll(scratch, length, 2);
3325 __ bind(&loop);
3326 __ Addu(scratch, elements, scratch);
3327 __ lw(scratch, MemOperand(scratch));
3328 __ push(scratch);
3329 __ Subu(length, length, Operand(1));
3330 __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
3331 __ sll(scratch, length, 2);
3332
3333 __ bind(&invoke);
3334 DCHECK(instr->HasPointerMap());
3335 LPointerMap* pointers = instr->pointer_map();
3336 SafepointGenerator safepoint_generator(
3337 this, pointers, Safepoint::kLazyDeopt);
3338 // The number of arguments is stored in receiver which is a0, as expected
3339 // by InvokeFunction.
3340 ParameterCount actual(receiver);
3341 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3342 }
3343
3344
3345 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3346 LOperand* argument = instr->value();
3347 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3348 Abort(kDoPushArgumentNotImplementedForDoubleType);
3349 } else {
3350 Register argument_reg = EmitLoadRegister(argument, at);
3351 __ push(argument_reg);
3352 }
3353 }
3354
3355
3356 void LCodeGen::DoDrop(LDrop* instr) {
3357 __ Drop(instr->count());
3358 }
3359
3360
3361 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3362 Register result = ToRegister(instr->result());
3363 __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3364 }
3365
3366
3367 void LCodeGen::DoContext(LContext* instr) {
3368 // If there is a non-return use, the context must be moved to a register.
3369 Register result = ToRegister(instr->result());
3370 if (info()->IsOptimizing()) {
3371 __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3372 } else {
3373 // If there is no frame, the context must be in cp.
3374 DCHECK(result.is(cp));
3375 }
3376 }
3377
3378
3379 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3380 DCHECK(ToRegister(instr->context()).is(cp));
3381 __ li(scratch0(), instr->hydrogen()->pairs());
3382 __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3383 __ Push(scratch0(), scratch1());
3384 CallRuntime(Runtime::kDeclareGlobals, 2, instr);
3385 }
3386
3387
3388 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3389 int formal_parameter_count, int arity,
3390 LInstruction* instr) {
3391 bool dont_adapt_arguments =
3392 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3393 bool can_invoke_directly =
3394 dont_adapt_arguments || formal_parameter_count == arity;
3395
3396 Register function_reg = a1;
3397 LPointerMap* pointers = instr->pointer_map();
3398
3399 if (can_invoke_directly) {
3400 // Change context.
3401 __ lw(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
3402
3403 // Always initialize a0 to the number of actual arguments.
3404 __ li(a0, Operand(arity));
3405
3406 // Invoke function.
3407 __ lw(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
3408 __ Call(at);
3409
3410 // Set up deoptimization.
3411 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3412 } else {
3413 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3414 ParameterCount count(arity);
3415 ParameterCount expected(formal_parameter_count);
3416 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
3417 }
3418 }
3419
3420
3421 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3422 DCHECK(instr->context() != NULL);
3423 DCHECK(ToRegister(instr->context()).is(cp));
3424 Register input = ToRegister(instr->value());
3425 Register result = ToRegister(instr->result());
3426 Register scratch = scratch0();
3427
3428 // Deoptimize if not a heap number.
3429 __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3430 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3431 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, Operand(at));
3432
3433 Label done;
3434 Register exponent = scratch0();
3435 scratch = no_reg;
3436 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3437 // Check the sign of the argument. If the argument is positive, just
3438 // return it.
3439 __ Move(result, input);
3440 __ And(at, exponent, Operand(HeapNumber::kSignMask));
3441 __ Branch(&done, eq, at, Operand(zero_reg));
3442
3443 // Input is negative. Reverse its sign.
3444 // Preserve the value of all registers.
3445 {
3446 PushSafepointRegistersScope scope(this);
3447
3448 // Registers were saved at the safepoint, so we can use
3449 // many scratch registers.
3450 Register tmp1 = input.is(a1) ? a0 : a1;
3451 Register tmp2 = input.is(a2) ? a0 : a2;
3452 Register tmp3 = input.is(a3) ? a0 : a3;
3453 Register tmp4 = input.is(t0) ? a0 : t0;
3454
3455 // exponent: floating point exponent value.
3456
3457 Label allocated, slow;
3458 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3459 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3460 __ Branch(&allocated);
3461
3462 // Slow case: Call the runtime system to do the number allocation.
3463 __ bind(&slow);
3464
3465 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3466 instr->context());
3467 // Set the pointer to the new heap number in tmp.
3468 if (!tmp1.is(v0))
3469 __ mov(tmp1, v0);
3470 // Restore input_reg after call to runtime.
3471 __ LoadFromSafepointRegisterSlot(input, input);
3472 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3473
3474 __ bind(&allocated);
3475 // exponent: floating point exponent value.
3476 // tmp1: allocated heap number.
3477 __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
3478 __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3479 __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3480 __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3481
3482 __ StoreToSafepointRegisterSlot(tmp1, result);
3483 }
3484
3485 __ bind(&done);
3486 }
3487
3488
3489 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3490 Register input = ToRegister(instr->value());
3491 Register result = ToRegister(instr->result());
3492 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3493 Label done;
3494 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3495 __ mov(result, input);
3496 __ subu(result, zero_reg, input);
3497 // Overflow if result is still negative, i.e. 0x80000000.
3498 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
3499 __ bind(&done);
3500 }
3501
3502
3503 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3504 // Class for deferred case.
3505 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3506 public:
3507 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3508 : LDeferredCode(codegen), instr_(instr) { }
3509 void Generate() override {
3510 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3511 }
3512 LInstruction* instr() override { return instr_; }
3513
3514 private:
3515 LMathAbs* instr_;
3516 };
3517
3518 Representation r = instr->hydrogen()->value()->representation();
3519 if (r.IsDouble()) {
3520 FPURegister input = ToDoubleRegister(instr->value());
3521 FPURegister result = ToDoubleRegister(instr->result());
3522 __ abs_d(result, input);
3523 } else if (r.IsSmiOrInteger32()) {
3524 EmitIntegerMathAbs(instr);
3525 } else {
3526 // Representation is tagged.
3527 DeferredMathAbsTaggedHeapNumber* deferred =
3528 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3529 Register input = ToRegister(instr->value());
3530 // Smi check.
3531 __ JumpIfNotSmi(input, deferred->entry());
3532 // If smi, handle it directly.
3533 EmitIntegerMathAbs(instr);
3534 __ bind(deferred->exit());
3535 }
3536 }
3537
3538
3539 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3540 DoubleRegister input = ToDoubleRegister(instr->value());
3541 Register result = ToRegister(instr->result());
3542 Register scratch1 = scratch0();
3543 Register except_flag = ToRegister(instr->temp());
3544
3545 __ EmitFPUTruncate(kRoundToMinusInf,
3546 result,
3547 input,
3548 scratch1,
3549 double_scratch0(),
3550 except_flag);
3551
3552 // Deopt if the operation did not succeed.
3553 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
3554 Operand(zero_reg));
3555
3556 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3557 // Test for -0.
3558 Label done;
3559 __ Branch(&done, ne, result, Operand(zero_reg));
3560 __ Mfhc1(scratch1, input);
3561 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3562 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
3563 Operand(zero_reg));
3564 __ bind(&done);
3565 }
3566 }
3567
3568
3569 void LCodeGen::DoMathRound(LMathRound* instr) {
3570 DoubleRegister input = ToDoubleRegister(instr->value());
3571 Register result = ToRegister(instr->result());
3572 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3573 Register scratch = scratch0();
3574 Label done, check_sign_on_zero;
3575
3576 // Extract exponent bits.
3577 __ Mfhc1(result, input);
3578 __ Ext(scratch,
3579 result,
3580 HeapNumber::kExponentShift,
3581 HeapNumber::kExponentBits);
3582
3583 // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3584 Label skip1;
3585 __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3586 __ mov(result, zero_reg);
3587 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3588 __ Branch(&check_sign_on_zero);
3589 } else {
3590 __ Branch(&done);
3591 }
3592 __ bind(&skip1);
3593
3594 // The following conversion will not work with numbers
3595 // outside of ]-2^32, 2^32[.
3596 DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
3597 Operand(HeapNumber::kExponentBias + 32));
3598
3599 // Save the original sign for later comparison.
3600 __ And(scratch, result, Operand(HeapNumber::kSignMask));
3601
3602 __ Move(double_scratch0(), 0.5);
3603 __ add_d(double_scratch0(), input, double_scratch0());
3604
3605 // Check sign of the result: if the sign changed, the input
3606 // value was in ]0.5, 0[ and the result should be -0.
3607 __ Mfhc1(result, double_scratch0());
3608 __ Xor(result, result, Operand(scratch));
3609 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3610 // ARM uses 'mi' here, which is 'lt'
3611 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
3612 } else {
3613 Label skip2;
3614 // ARM uses 'mi' here, which is 'lt'
3615 // Negating it results in 'ge'
3616 __ Branch(&skip2, ge, result, Operand(zero_reg));
3617 __ mov(result, zero_reg);
3618 __ Branch(&done);
3619 __ bind(&skip2);
3620 }
3621
3622 Register except_flag = scratch;
3623 __ EmitFPUTruncate(kRoundToMinusInf,
3624 result,
3625 double_scratch0(),
3626 at,
3627 double_scratch1,
3628 except_flag);
3629
3630 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
3631 Operand(zero_reg));
3632
3633 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3634 // Test for -0.
3635 __ Branch(&done, ne, result, Operand(zero_reg));
3636 __ bind(&check_sign_on_zero);
3637 __ Mfhc1(scratch, input);
3638 __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3639 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch,
3640 Operand(zero_reg));
3641 }
3642 __ bind(&done);
3643 }
3644
3645
3646 void LCodeGen::DoMathFround(LMathFround* instr) {
3647 DoubleRegister input = ToDoubleRegister(instr->value());
3648 DoubleRegister result = ToDoubleRegister(instr->result());
3649 __ cvt_s_d(result.low(), input);
3650 __ cvt_d_s(result, result.low());
3651 }
3652
3653
3654 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3655 DoubleRegister input = ToDoubleRegister(instr->value());
3656 DoubleRegister result = ToDoubleRegister(instr->result());
3657 __ sqrt_d(result, input);
3658 }
3659
3660
3661 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3662 DoubleRegister input = ToDoubleRegister(instr->value());
3663 DoubleRegister result = ToDoubleRegister(instr->result());
3664 DoubleRegister temp = ToDoubleRegister(instr->temp());
3665
3666 DCHECK(!input.is(result));
3667
3668 // Note that according to ECMA-262 15.8.2.13:
3669 // Math.pow(-Infinity, 0.5) == Infinity
3670 // Math.sqrt(-Infinity) == NaN
3671 Label done;
3672 __ Move(temp, static_cast<double>(-V8_INFINITY));
3673 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
3674 // Set up Infinity in the delay slot.
3675 // result is overwritten if the branch is not taken.
3676 __ neg_d(result, temp);
3677
3678 // Add +0 to convert -0 to +0.
3679 __ add_d(result, input, kDoubleRegZero);
3680 __ sqrt_d(result, result);
3681 __ bind(&done);
3682 }
3683
3684
3685 void LCodeGen::DoPower(LPower* instr) {
3686 Representation exponent_type = instr->hydrogen()->right()->representation();
3687 // Having marked this as a call, we can use any registers.
3688 // Just make sure that the input/output registers are the expected ones.
3689 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3690 DCHECK(!instr->right()->IsDoubleRegister() ||
3691 ToDoubleRegister(instr->right()).is(f4));
3692 DCHECK(!instr->right()->IsRegister() ||
3693 ToRegister(instr->right()).is(tagged_exponent));
3694 DCHECK(ToDoubleRegister(instr->left()).is(f2));
3695 DCHECK(ToDoubleRegister(instr->result()).is(f0));
3696
3697 if (exponent_type.IsSmi()) {
3698 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3699 __ CallStub(&stub);
3700 } else if (exponent_type.IsTagged()) {
3701 Label no_deopt;
3702 __ JumpIfSmi(tagged_exponent, &no_deopt);
3703 DCHECK(!t3.is(tagged_exponent));
3704 __ lw(t3, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3705 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3706 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, t3, Operand(at));
3707 __ bind(&no_deopt);
3708 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3709 __ CallStub(&stub);
3710 } else if (exponent_type.IsInteger32()) {
3711 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3712 __ CallStub(&stub);
3713 } else {
3714 DCHECK(exponent_type.IsDouble());
3715 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3716 __ CallStub(&stub);
3717 }
3718 }
3719
3720
3721 void LCodeGen::DoMathExp(LMathExp* instr) {
3722 DoubleRegister input = ToDoubleRegister(instr->value());
3723 DoubleRegister result = ToDoubleRegister(instr->result());
3724 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3725 DoubleRegister double_scratch2 = double_scratch0();
3726 Register temp1 = ToRegister(instr->temp1());
3727 Register temp2 = ToRegister(instr->temp2());
3728
3729 MathExpGenerator::EmitMathExp(
3730 masm(), input, result, double_scratch1, double_scratch2,
3731 temp1, temp2, scratch0());
3732 }
3733
3734
3735 void LCodeGen::DoMathLog(LMathLog* instr) {
3736 __ PrepareCallCFunction(0, 1, scratch0());
3737 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3738 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3739 0, 1);
3740 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3741 }
3742
3743
3744 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3745 Register input = ToRegister(instr->value());
3746 Register result = ToRegister(instr->result());
3747 __ Clz(result, input);
3748 }
3749
3750
3751 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3752 DCHECK(ToRegister(instr->context()).is(cp));
3753 DCHECK(ToRegister(instr->function()).is(a1));
3754 DCHECK(instr->HasPointerMap());
3755
3756 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3757 if (known_function.is_null()) {
3758 LPointerMap* pointers = instr->pointer_map();
3759 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3760 ParameterCount count(instr->arity());
3761 __ InvokeFunction(a1, count, CALL_FUNCTION, generator);
3762 } else {
3763 CallKnownFunction(known_function,
3764 instr->hydrogen()->formal_parameter_count(),
3765 instr->arity(), instr);
3766 }
3767 }
3768
3769
3770 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3771 DCHECK(ToRegister(instr->result()).is(v0));
3772
3773 if (instr->hydrogen()->IsTailCall()) {
3774 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
3775
3776 if (instr->target()->IsConstantOperand()) {
3777 LConstantOperand* target = LConstantOperand::cast(instr->target());
3778 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3779 __ Jump(code, RelocInfo::CODE_TARGET);
3780 } else {
3781 DCHECK(instr->target()->IsRegister());
3782 Register target = ToRegister(instr->target());
3783 __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3784 __ Jump(target);
3785 }
3786 } else {
3787 LPointerMap* pointers = instr->pointer_map();
3788 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3789
3790 if (instr->target()->IsConstantOperand()) {
3791 LConstantOperand* target = LConstantOperand::cast(instr->target());
3792 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3793 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3794 __ Call(code, RelocInfo::CODE_TARGET);
3795 } else {
3796 DCHECK(instr->target()->IsRegister());
3797 Register target = ToRegister(instr->target());
3798 generator.BeforeCall(__ CallSize(target));
3799 __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3800 __ Call(target);
3801 }
3802 generator.AfterCall();
3803 }
3804 }
3805
3806
3807 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3808 DCHECK(ToRegister(instr->function()).is(a1));
3809 DCHECK(ToRegister(instr->result()).is(v0));
3810
3811 __ li(a0, Operand(instr->arity()));
3812
3813 // Change context.
3814 __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3815
3816 // Load the code entry address
3817 __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3818 __ Call(at);
3819
3820 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3821 }
3822
3823
3824 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3825 DCHECK(ToRegister(instr->context()).is(cp));
3826 DCHECK(ToRegister(instr->function()).is(a1));
3827 DCHECK(ToRegister(instr->result()).is(v0));
3828
3829 int arity = instr->arity();
3830 CallFunctionFlags flags = instr->hydrogen()->function_flags();
3831 if (instr->hydrogen()->HasVectorAndSlot()) {
3832 Register slot_register = ToRegister(instr->temp_slot());
3833 Register vector_register = ToRegister(instr->temp_vector());
3834 DCHECK(slot_register.is(a3));
3835 DCHECK(vector_register.is(a2));
3836
3837 AllowDeferredHandleDereference vector_structure_check;
3838 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
3839 int index = vector->GetIndex(instr->hydrogen()->slot());
3840
3841 __ li(vector_register, vector);
3842 __ li(slot_register, Operand(Smi::FromInt(index)));
3843
3844 CallICState::CallType call_type =
3845 (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
3846
3847 Handle<Code> ic =
3848 CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
3849 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3850 } else {
3851 CallFunctionStub stub(isolate(), arity, flags);
3852 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3853 }
3854 }
3855
3856
3857 void LCodeGen::DoCallNew(LCallNew* instr) {
3858 DCHECK(ToRegister(instr->context()).is(cp));
3859 DCHECK(ToRegister(instr->constructor()).is(a1));
3860 DCHECK(ToRegister(instr->result()).is(v0));
3861
3862 __ li(a0, Operand(instr->arity()));
3863 // No cell in a2 for construct type feedback in optimized code
3864 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
3865 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
3866 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3867 }
3868
3869
3870 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3871 DCHECK(ToRegister(instr->context()).is(cp));
3872 DCHECK(ToRegister(instr->constructor()).is(a1));
3873 DCHECK(ToRegister(instr->result()).is(v0));
3874
3875 __ li(a0, Operand(instr->arity()));
3876 if (instr->arity() == 1) {
3877 // We only need the allocation site for the case we have a length argument.
3878 // The case may bail out to the runtime, which will determine the correct
3879 // elements kind with the site.
3880 __ li(a2, instr->hydrogen()->site());
3881 } else {
3882 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
3883 }
3884 ElementsKind kind = instr->hydrogen()->elements_kind();
3885 AllocationSiteOverrideMode override_mode =
3886 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3887 ? DISABLE_ALLOCATION_SITES
3888 : DONT_OVERRIDE;
3889
3890 if (instr->arity() == 0) {
3891 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3892 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3893 } else if (instr->arity() == 1) {
3894 Label done;
3895 if (IsFastPackedElementsKind(kind)) {
3896 Label packed_case;
3897 // We might need a change here,
3898 // look at the first argument.
3899 __ lw(t1, MemOperand(sp, 0));
3900 __ Branch(&packed_case, eq, t1, Operand(zero_reg));
3901
3902 ElementsKind holey_kind = GetHoleyElementsKind(kind);
3903 ArraySingleArgumentConstructorStub stub(isolate(),
3904 holey_kind,
3905 override_mode);
3906 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3907 __ jmp(&done);
3908 __ bind(&packed_case);
3909 }
3910
3911 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3912 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3913 __ bind(&done);
3914 } else {
3915 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
3916 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3917 }
3918 }
3919
3920
3921 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3922 CallRuntime(instr->function(), instr->arity(), instr);
3923 }
3924
3925
3926 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3927 Register function = ToRegister(instr->function());
3928 Register code_object = ToRegister(instr->code_object());
3929 __ Addu(code_object, code_object,
3930 Operand(Code::kHeaderSize - kHeapObjectTag));
3931 __ sw(code_object,
3932 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
3933 }
3934
3935
3936 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3937 Register result = ToRegister(instr->result());
3938 Register base = ToRegister(instr->base_object());
3939 if (instr->offset()->IsConstantOperand()) {
3940 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3941 __ Addu(result, base, Operand(ToInteger32(offset)));
3942 } else {
3943 Register offset = ToRegister(instr->offset());
3944 __ Addu(result, base, offset);
3945 }
3946 }
3947
3948
3949 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3950 Representation representation = instr->representation();
3951
3952 Register object = ToRegister(instr->object());
3953 Register scratch = scratch0();
3954 HObjectAccess access = instr->hydrogen()->access();
3955 int offset = access.offset();
3956
3957 if (access.IsExternalMemory()) {
3958 Register value = ToRegister(instr->value());
3959 MemOperand operand = MemOperand(object, offset);
3960 __ Store(value, operand, representation);
3961 return;
3962 }
3963
3964 __ AssertNotSmi(object);
3965
3966 DCHECK(!representation.IsSmi() ||
3967 !instr->value()->IsConstantOperand() ||
3968 IsSmi(LConstantOperand::cast(instr->value())));
3969 if (representation.IsDouble()) {
3970 DCHECK(access.IsInobject());
3971 DCHECK(!instr->hydrogen()->has_transition());
3972 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3973 DoubleRegister value = ToDoubleRegister(instr->value());
3974 __ sdc1(value, FieldMemOperand(object, offset));
3975 return;
3976 }
3977
3978 if (instr->hydrogen()->has_transition()) {
3979 Handle<Map> transition = instr->hydrogen()->transition_map();
3980 AddDeprecationDependency(transition);
3981 __ li(scratch, Operand(transition));
3982 __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3983 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3984 Register temp = ToRegister(instr->temp());
3985 // Update the write barrier for the map field.
3986 __ RecordWriteForMap(object,
3987 scratch,
3988 temp,
3989 GetRAState(),
3990 kSaveFPRegs);
3991 }
3992 }
3993
3994 // Do the store.
3995 Register value = ToRegister(instr->value());
3996 if (access.IsInobject()) {
3997 MemOperand operand = FieldMemOperand(object, offset);
3998 __ Store(value, operand, representation);
3999 if (instr->hydrogen()->NeedsWriteBarrier()) {
4000 // Update the write barrier for the object for in-object properties.
4001 __ RecordWriteField(object,
4002 offset,
4003 value,
4004 scratch,
4005 GetRAState(),
4006 kSaveFPRegs,
4007 EMIT_REMEMBERED_SET,
4008 instr->hydrogen()->SmiCheckForWriteBarrier(),
4009 instr->hydrogen()->PointersToHereCheckForValue());
4010 }
4011 } else {
4012 __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4013 MemOperand operand = FieldMemOperand(scratch, offset);
4014 __ Store(value, operand, representation);
4015 if (instr->hydrogen()->NeedsWriteBarrier()) {
4016 // Update the write barrier for the properties array.
4017 // object is used as a scratch register.
4018 __ RecordWriteField(scratch,
4019 offset,
4020 value,
4021 object,
4022 GetRAState(),
4023 kSaveFPRegs,
4024 EMIT_REMEMBERED_SET,
4025 instr->hydrogen()->SmiCheckForWriteBarrier(),
4026 instr->hydrogen()->PointersToHereCheckForValue());
4027 }
4028 }
4029 }
4030
4031
4032 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4033 DCHECK(ToRegister(instr->context()).is(cp));
4034 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4035 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4036
4037 if (instr->hydrogen()->HasVectorAndSlot()) {
4038 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
4039 }
4040
4041 __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
4042 Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
4043 isolate(), instr->language_mode(),
4044 instr->hydrogen()->initialization_state()).code();
4045 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4046 }
4047
4048
4049 void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
4050 DCHECK(ToRegister(instr->context()).is(cp));
4051 DCHECK(ToRegister(instr->value())
4052 .is(StoreGlobalViaContextDescriptor::ValueRegister()));
4053
4054 int const slot = instr->slot_index();
4055 int const depth = instr->depth();
4056 if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
4057 __ li(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
4058 Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
4059 isolate(), depth, instr->language_mode())
4060 .code();
4061 CallCode(stub, RelocInfo::CODE_TARGET, instr);
4062 } else {
4063 __ Push(Smi::FromInt(slot));
4064 __ Push(StoreGlobalViaContextDescriptor::ValueRegister());
4065 __ CallRuntime(is_strict(language_mode())
4066 ? Runtime::kStoreGlobalViaContext_Strict
4067 : Runtime::kStoreGlobalViaContext_Sloppy,
4068 2);
4069 }
4070 }
4071
4072
4073 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4074 Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
4075 Operand operand(0);
4076 Register reg;
4077 if (instr->index()->IsConstantOperand()) {
4078 operand = ToOperand(instr->index());
4079 reg = ToRegister(instr->length());
4080 cc = CommuteCondition(cc);
4081 } else {
4082 reg = ToRegister(instr->index());
4083 operand = ToOperand(instr->length());
4084 }
4085 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4086 Label done;
4087 __ Branch(&done, NegateCondition(cc), reg, operand);
4088 __ stop("eliminated bounds check failed");
4089 __ bind(&done);
4090 } else {
4091 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds, reg, operand);
4092 }
4093 }
4094
4095
4096 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4097 Register external_pointer = ToRegister(instr->elements());
4098 Register key = no_reg;
4099 ElementsKind elements_kind = instr->elements_kind();
4100 bool key_is_constant = instr->key()->IsConstantOperand();
4101 int constant_key = 0;
4102 if (key_is_constant) {
4103 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4104 if (constant_key & 0xF0000000) {
4105 Abort(kArrayIndexConstantValueTooBig);
4106 }
4107 } else {
4108 key = ToRegister(instr->key());
4109 }
4110 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4111 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4112 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4113 int base_offset = instr->base_offset();
4114
4115 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
4116 Register address = scratch0();
4117 FPURegister value(ToDoubleRegister(instr->value()));
4118 if (key_is_constant) {
4119 if (constant_key != 0) {
4120 __ Addu(address, external_pointer,
4121 Operand(constant_key << element_size_shift));
4122 } else {
4123 address = external_pointer;
4124 }
4125 } else {
4126 __ sll(address, key, shift_size);
4127 __ Addu(address, external_pointer, address);
4128 }
4129
4130 if (elements_kind == FLOAT32_ELEMENTS) {
4131 __ cvt_s_d(double_scratch0(), value);
4132 __ swc1(double_scratch0(), MemOperand(address, base_offset));
4133 } else { // Storing doubles, not floats.
4134 __ sdc1(value, MemOperand(address, base_offset));
4135 }
4136 } else {
4137 Register value(ToRegister(instr->value()));
4138 MemOperand mem_operand = PrepareKeyedOperand(
4139 key, external_pointer, key_is_constant, constant_key,
4140 element_size_shift, shift_size,
4141 base_offset);
4142 switch (elements_kind) {
4143 case UINT8_ELEMENTS:
4144 case UINT8_CLAMPED_ELEMENTS:
4145 case INT8_ELEMENTS:
4146 __ sb(value, mem_operand);
4147 break;
4148 case INT16_ELEMENTS:
4149 case UINT16_ELEMENTS:
4150 __ sh(value, mem_operand);
4151 break;
4152 case INT32_ELEMENTS:
4153 case UINT32_ELEMENTS:
4154 __ sw(value, mem_operand);
4155 break;
4156 case FLOAT32_ELEMENTS:
4157 case FLOAT64_ELEMENTS:
4158 case FAST_DOUBLE_ELEMENTS:
4159 case FAST_ELEMENTS:
4160 case FAST_SMI_ELEMENTS:
4161 case FAST_HOLEY_DOUBLE_ELEMENTS:
4162 case FAST_HOLEY_ELEMENTS:
4163 case FAST_HOLEY_SMI_ELEMENTS:
4164 case DICTIONARY_ELEMENTS:
4165 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
4166 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
4167 UNREACHABLE();
4168 break;
4169 }
4170 }
4171 }
4172
4173
4174 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4175 DoubleRegister value = ToDoubleRegister(instr->value());
4176 Register elements = ToRegister(instr->elements());
4177 Register scratch = scratch0();
4178 Register scratch_1 = scratch1();
4179 DoubleRegister double_scratch = double_scratch0();
4180 bool key_is_constant = instr->key()->IsConstantOperand();
4181 int base_offset = instr->base_offset();
4182 Label not_nan, done;
4183
4184 // Calculate the effective address of the slot in the array to store the
4185 // double value.
4186 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4187 if (key_is_constant) {
4188 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4189 if (constant_key & 0xF0000000) {
4190 Abort(kArrayIndexConstantValueTooBig);
4191 }
4192 __ Addu(scratch, elements,
4193 Operand((constant_key << element_size_shift) + base_offset));
4194 } else {
4195 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4196 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4197 __ Addu(scratch, elements, Operand(base_offset));
4198 __ sll(at, ToRegister(instr->key()), shift_size);
4199 __ Addu(scratch, scratch, at);
4200 }
4201
4202 if (instr->NeedsCanonicalization()) {
4203 Label is_nan;
4204 // Check for NaN. All NaNs must be canonicalized.
4205 __ BranchF(NULL, &is_nan, eq, value, value);
4206 __ Branch(&not_nan);
4207
4208 // Only load canonical NaN if the comparison above set the overflow.
4209 __ bind(&is_nan);
4210 __ LoadRoot(scratch_1, Heap::kNanValueRootIndex);
4211 __ ldc1(double_scratch,
4212 FieldMemOperand(scratch_1, HeapNumber::kValueOffset));
4213 __ sdc1(double_scratch, MemOperand(scratch, 0));
4214 __ Branch(&done);
4215 }
4216
4217 __ bind(&not_nan);
4218 __ sdc1(value, MemOperand(scratch, 0));
4219 __ bind(&done);
4220 }
4221
4222
4223 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4224 Register value = ToRegister(instr->value());
4225 Register elements = ToRegister(instr->elements());
4226 Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4227 : no_reg;
4228 Register scratch = scratch0();
4229 Register store_base = scratch;
4230 int offset = instr->base_offset();
4231
4232 // Do the store.
4233 if (instr->key()->IsConstantOperand()) {
4234 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4235 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4236 offset += ToInteger32(const_operand) * kPointerSize;
4237 store_base = elements;
4238 } else {
4239 // Even though the HLoadKeyed instruction forces the input
4240 // representation for the key to be an integer, the input gets replaced
4241 // during bound check elimination with the index argument to the bounds
4242 // check, which can be tagged, so that case must be handled here, too.
4243 if (instr->hydrogen()->key()->representation().IsSmi()) {
4244 __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
4245 __ addu(scratch, elements, scratch);
4246 } else {
4247 __ sll(scratch, key, kPointerSizeLog2);
4248 __ addu(scratch, elements, scratch);
4249 }
4250 }
4251 __ sw(value, MemOperand(store_base, offset));
4252
4253 if (instr->hydrogen()->NeedsWriteBarrier()) {
4254 SmiCheck check_needed =
4255 instr->hydrogen()->value()->type().IsHeapObject()
4256 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4257 // Compute address of modified element and store it into key register.
4258 __ Addu(key, store_base, Operand(offset));
4259 __ RecordWrite(elements,
4260 key,
4261 value,
4262 GetRAState(),
4263 kSaveFPRegs,
4264 EMIT_REMEMBERED_SET,
4265 check_needed,
4266 instr->hydrogen()->PointersToHereCheckForValue());
4267 }
4268 }
4269
4270
4271 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4272 // By cases: external, fast double
4273 if (instr->is_fixed_typed_array()) {
4274 DoStoreKeyedExternalArray(instr);
4275 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4276 DoStoreKeyedFixedDoubleArray(instr);
4277 } else {
4278 DoStoreKeyedFixedArray(instr);
4279 }
4280 }
4281
4282
4283 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4284 DCHECK(ToRegister(instr->context()).is(cp));
4285 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4286 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4287 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4288
4289 if (instr->hydrogen()->HasVectorAndSlot()) {
4290 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
4291 }
4292
4293 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4294 isolate(), instr->language_mode(),
4295 instr->hydrogen()->initialization_state()).code();
4296 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4297 }
4298
4299
4300 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4301 class DeferredMaybeGrowElements final : public LDeferredCode {
4302 public:
4303 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4304 : LDeferredCode(codegen), instr_(instr) {}
4305 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4306 LInstruction* instr() override { return instr_; }
4307
4308 private:
4309 LMaybeGrowElements* instr_;
4310 };
4311
4312 Register result = v0;
4313 DeferredMaybeGrowElements* deferred =
4314 new (zone()) DeferredMaybeGrowElements(this, instr);
4315 LOperand* key = instr->key();
4316 LOperand* current_capacity = instr->current_capacity();
4317
4318 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4319 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4320 DCHECK(key->IsConstantOperand() || key->IsRegister());
4321 DCHECK(current_capacity->IsConstantOperand() ||
4322 current_capacity->IsRegister());
4323
4324 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4325 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4326 int32_t constant_capacity =
4327 ToInteger32(LConstantOperand::cast(current_capacity));
4328 if (constant_key >= constant_capacity) {
4329 // Deferred case.
4330 __ jmp(deferred->entry());
4331 }
4332 } else if (key->IsConstantOperand()) {
4333 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4334 __ Branch(deferred->entry(), le, ToRegister(current_capacity),
4335 Operand(constant_key));
4336 } else if (current_capacity->IsConstantOperand()) {
4337 int32_t constant_capacity =
4338 ToInteger32(LConstantOperand::cast(current_capacity));
4339 __ Branch(deferred->entry(), ge, ToRegister(key),
4340 Operand(constant_capacity));
4341 } else {
4342 __ Branch(deferred->entry(), ge, ToRegister(key),
4343 Operand(ToRegister(current_capacity)));
4344 }
4345
4346 if (instr->elements()->IsRegister()) {
4347 __ mov(result, ToRegister(instr->elements()));
4348 } else {
4349 __ lw(result, ToMemOperand(instr->elements()));
4350 }
4351
4352 __ bind(deferred->exit());
4353 }
4354
4355
4356 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4357 // TODO(3095996): Get rid of this. For now, we need to make the
4358 // result register contain a valid pointer because it is already
4359 // contained in the register pointer map.
4360 Register result = v0;
4361 __ mov(result, zero_reg);
4362
4363 // We have to call a stub.
4364 {
4365 PushSafepointRegistersScope scope(this);
4366 if (instr->object()->IsRegister()) {
4367 __ mov(result, ToRegister(instr->object()));
4368 } else {
4369 __ lw(result, ToMemOperand(instr->object()));
4370 }
4371
4372 LOperand* key = instr->key();
4373 if (key->IsConstantOperand()) {
4374 __ li(a3, Operand(ToSmi(LConstantOperand::cast(key))));
4375 } else {
4376 __ mov(a3, ToRegister(key));
4377 __ SmiTag(a3);
4378 }
4379
4380 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4381 instr->hydrogen()->kind());
4382 __ mov(a0, result);
4383 __ CallStub(&stub);
4384 RecordSafepointWithLazyDeopt(
4385 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4386 __ StoreToSafepointRegisterSlot(result, result);
4387 }
4388
4389 // Deopt on smi, which means the elements array changed to dictionary mode.
4390 __ SmiTst(result, at);
4391 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
4392 }
4393
4394
4395 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4396 Register object_reg = ToRegister(instr->object());
4397 Register scratch = scratch0();
4398
4399 Handle<Map> from_map = instr->original_map();
4400 Handle<Map> to_map = instr->transitioned_map();
4401 ElementsKind from_kind = instr->from_kind();
4402 ElementsKind to_kind = instr->to_kind();
4403
4404 Label not_applicable;
4405 __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4406 __ Branch(&not_applicable, ne, scratch, Operand(from_map));
4407
4408 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4409 Register new_map_reg = ToRegister(instr->new_map_temp());
4410 __ li(new_map_reg, Operand(to_map));
4411 __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4412 // Write barrier.
4413 __ RecordWriteForMap(object_reg,
4414 new_map_reg,
4415 scratch,
4416 GetRAState(),
4417 kDontSaveFPRegs);
4418 } else {
4419 DCHECK(object_reg.is(a0));
4420 DCHECK(ToRegister(instr->context()).is(cp));
4421 PushSafepointRegistersScope scope(this);
4422 __ li(a1, Operand(to_map));
4423 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4424 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4425 __ CallStub(&stub);
4426 RecordSafepointWithRegisters(
4427 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
4428 }
4429 __ bind(&not_applicable);
4430 }
4431
4432
4433 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4434 Register object = ToRegister(instr->object());
4435 Register temp = ToRegister(instr->temp());
4436 Label no_memento_found;
4437 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
4438 ne, &no_memento_found);
4439 DeoptimizeIf(al, instr);
4440 __ bind(&no_memento_found);
4441 }
4442
4443
4444 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4445 DCHECK(ToRegister(instr->context()).is(cp));
4446 DCHECK(ToRegister(instr->left()).is(a1));
4447 DCHECK(ToRegister(instr->right()).is(a0));
4448 StringAddStub stub(isolate(),
4449 instr->hydrogen()->flags(),
4450 instr->hydrogen()->pretenure_flag());
4451 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4452 }
4453
4454
4455 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4456 class DeferredStringCharCodeAt final : public LDeferredCode {
4457 public:
4458 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4459 : LDeferredCode(codegen), instr_(instr) { }
4460 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4461 LInstruction* instr() override { return instr_; }
4462
4463 private:
4464 LStringCharCodeAt* instr_;
4465 };
4466
4467 DeferredStringCharCodeAt* deferred =
4468 new(zone()) DeferredStringCharCodeAt(this, instr);
4469 StringCharLoadGenerator::Generate(masm(),
4470 ToRegister(instr->string()),
4471 ToRegister(instr->index()),
4472 ToRegister(instr->result()),
4473 deferred->entry());
4474 __ bind(deferred->exit());
4475 }
4476
4477
4478 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4479 Register string = ToRegister(instr->string());
4480 Register result = ToRegister(instr->result());
4481 Register scratch = scratch0();
4482
4483 // TODO(3095996): Get rid of this. For now, we need to make the
4484 // result register contain a valid pointer because it is already
4485 // contained in the register pointer map.
4486 __ mov(result, zero_reg);
4487
4488 PushSafepointRegistersScope scope(this);
4489 __ push(string);
4490 // Push the index as a smi. This is safe because of the checks in
4491 // DoStringCharCodeAt above.
4492 if (instr->index()->IsConstantOperand()) {
4493 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4494 __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
4495 __ push(scratch);
4496 } else {
4497 Register index = ToRegister(instr->index());
4498 __ SmiTag(index);
4499 __ push(index);
4500 }
4501 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4502 instr->context());
4503 __ AssertSmi(v0);
4504 __ SmiUntag(v0);
4505 __ StoreToSafepointRegisterSlot(v0, result);
4506 }
4507
4508
4509 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4510 class DeferredStringCharFromCode final : public LDeferredCode {
4511 public:
4512 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4513 : LDeferredCode(codegen), instr_(instr) { }
4514 void Generate() override {
4515 codegen()->DoDeferredStringCharFromCode(instr_);
4516 }
4517 LInstruction* instr() override { return instr_; }
4518
4519 private:
4520 LStringCharFromCode* instr_;
4521 };
4522
4523 DeferredStringCharFromCode* deferred =
4524 new(zone()) DeferredStringCharFromCode(this, instr);
4525
4526 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4527 Register char_code = ToRegister(instr->char_code());
4528 Register result = ToRegister(instr->result());
4529 Register scratch = scratch0();
4530 DCHECK(!char_code.is(result));
4531
4532 __ Branch(deferred->entry(), hi,
4533 char_code, Operand(String::kMaxOneByteCharCode));
4534 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4535 __ sll(scratch, char_code, kPointerSizeLog2);
4536 __ Addu(result, result, scratch);
4537 __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4538 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4539 __ Branch(deferred->entry(), eq, result, Operand(scratch));
4540 __ bind(deferred->exit());
4541 }
4542
4543
4544 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4545 Register char_code = ToRegister(instr->char_code());
4546 Register result = ToRegister(instr->result());
4547
4548 // TODO(3095996): Get rid of this. For now, we need to make the
4549 // result register contain a valid pointer because it is already
4550 // contained in the register pointer map.
4551 __ mov(result, zero_reg);
4552
4553 PushSafepointRegistersScope scope(this);
4554 __ SmiTag(char_code);
4555 __ push(char_code);
4556 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4557 __ StoreToSafepointRegisterSlot(v0, result);
4558 }
4559
4560
4561 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4562 LOperand* input = instr->value();
4563 DCHECK(input->IsRegister() || input->IsStackSlot());
4564 LOperand* output = instr->result();
4565 DCHECK(output->IsDoubleRegister());
4566 FPURegister single_scratch = double_scratch0().low();
4567 if (input->IsStackSlot()) {
4568 Register scratch = scratch0();
4569 __ lw(scratch, ToMemOperand(input));
4570 __ mtc1(scratch, single_scratch);
4571 } else {
4572 __ mtc1(ToRegister(input), single_scratch);
4573 }
4574 __ cvt_d_w(ToDoubleRegister(output), single_scratch);
4575 }
4576
4577
4578 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4579 LOperand* input = instr->value();
4580 LOperand* output = instr->result();
4581
4582 FPURegister dbl_scratch = double_scratch0();
4583 __ mtc1(ToRegister(input), dbl_scratch);
4584 __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22);
4585 }
4586
4587
4588 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4589 class DeferredNumberTagI final : public LDeferredCode {
4590 public:
4591 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4592 : LDeferredCode(codegen), instr_(instr) { }
4593 void Generate() override {
4594 codegen()->DoDeferredNumberTagIU(instr_,
4595 instr_->value(),
4596 instr_->temp1(),
4597 instr_->temp2(),
4598 SIGNED_INT32);
4599 }
4600 LInstruction* instr() override { return instr_; }
4601
4602 private:
4603 LNumberTagI* instr_;
4604 };
4605
4606 Register src = ToRegister(instr->value());
4607 Register dst = ToRegister(instr->result());
4608 Register overflow = scratch0();
4609
4610 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4611 __ SmiTagCheckOverflow(dst, src, overflow);
4612 __ BranchOnOverflow(deferred->entry(), overflow);
4613 __ bind(deferred->exit());
4614 }
4615
4616
4617 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4618 class DeferredNumberTagU final : public LDeferredCode {
4619 public:
4620 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4621 : LDeferredCode(codegen), instr_(instr) { }
4622 void Generate() override {
4623 codegen()->DoDeferredNumberTagIU(instr_,
4624 instr_->value(),
4625 instr_->temp1(),
4626 instr_->temp2(),
4627 UNSIGNED_INT32);
4628 }
4629 LInstruction* instr() override { return instr_; }
4630
4631 private:
4632 LNumberTagU* instr_;
4633 };
4634
4635 Register input = ToRegister(instr->value());
4636 Register result = ToRegister(instr->result());
4637
4638 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4639 __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
4640 __ SmiTag(result, input);
4641 __ bind(deferred->exit());
4642 }
4643
4644
4645 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4646 LOperand* value,
4647 LOperand* temp1,
4648 LOperand* temp2,
4649 IntegerSignedness signedness) {
4650 Label done, slow;
4651 Register src = ToRegister(value);
4652 Register dst = ToRegister(instr->result());
4653 Register tmp1 = scratch0();
4654 Register tmp2 = ToRegister(temp1);
4655 Register tmp3 = ToRegister(temp2);
4656 DoubleRegister dbl_scratch = double_scratch0();
4657
4658 if (signedness == SIGNED_INT32) {
4659 // There was overflow, so bits 30 and 31 of the original integer
4660 // disagree. Try to allocate a heap number in new space and store
4661 // the value in there. If that fails, call the runtime system.
4662 if (dst.is(src)) {
4663 __ SmiUntag(src, dst);
4664 __ Xor(src, src, Operand(0x80000000));
4665 }
4666 __ mtc1(src, dbl_scratch);
4667 __ cvt_d_w(dbl_scratch, dbl_scratch);
4668 } else {
4669 __ mtc1(src, dbl_scratch);
4670 __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
4671 }
4672
4673 if (FLAG_inline_new) {
4674 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4675 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
4676 __ Branch(&done);
4677 }
4678
4679 // Slow case: Call the runtime system to do the number allocation.
4680 __ bind(&slow);
4681 {
4682 // TODO(3095996): Put a valid pointer value in the stack slot where the
4683 // result register is stored, as this register is in the pointer map, but
4684 // contains an integer value.
4685 __ mov(dst, zero_reg);
4686
4687 // Preserve the value of all registers.
4688 PushSafepointRegistersScope scope(this);
4689
4690 // NumberTagI and NumberTagD use the context from the frame, rather than
4691 // the environment's HContext or HInlinedContext value.
4692 // They only call Runtime::kAllocateHeapNumber.
4693 // The corresponding HChange instructions are added in a phase that does
4694 // not have easy access to the local context.
4695 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4696 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4697 RecordSafepointWithRegisters(
4698 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4699 __ Subu(v0, v0, kHeapObjectTag);
4700 __ StoreToSafepointRegisterSlot(v0, dst);
4701 }
4702
4703
4704 // Done. Put the value in dbl_scratch into the value of the allocated heap
4705 // number.
4706 __ bind(&done);
4707 __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
4708 __ Addu(dst, dst, kHeapObjectTag);
4709 }
4710
4711
4712 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4713 class DeferredNumberTagD final : public LDeferredCode {
4714 public:
4715 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4716 : LDeferredCode(codegen), instr_(instr) { }
4717 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4718 LInstruction* instr() override { return instr_; }
4719
4720 private:
4721 LNumberTagD* instr_;
4722 };
4723
4724 DoubleRegister input_reg = ToDoubleRegister(instr->value());
4725 Register scratch = scratch0();
4726 Register reg = ToRegister(instr->result());
4727 Register temp1 = ToRegister(instr->temp());
4728 Register temp2 = ToRegister(instr->temp2());
4729
4730 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4731 if (FLAG_inline_new) {
4732 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4733 // We want the untagged address first for performance
4734 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4735 DONT_TAG_RESULT);
4736 } else {
4737 __ Branch(deferred->entry());
4738 }
4739 __ bind(deferred->exit());
4740 __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
4741 // Now that we have finished with the object's real address tag it
4742 __ Addu(reg, reg, kHeapObjectTag);
4743 }
4744
4745
4746 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4747 // TODO(3095996): Get rid of this. For now, we need to make the
4748 // result register contain a valid pointer because it is already
4749 // contained in the register pointer map.
4750 Register reg = ToRegister(instr->result());
4751 __ mov(reg, zero_reg);
4752
4753 PushSafepointRegistersScope scope(this);
4754 // NumberTagI and NumberTagD use the context from the frame, rather than
4755 // the environment's HContext or HInlinedContext value.
4756 // They only call Runtime::kAllocateHeapNumber.
4757 // The corresponding HChange instructions are added in a phase that does
4758 // not have easy access to the local context.
4759 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4760 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4761 RecordSafepointWithRegisters(
4762 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4763 __ Subu(v0, v0, kHeapObjectTag);
4764 __ StoreToSafepointRegisterSlot(v0, reg);
4765 }
4766
4767
4768 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4769 HChange* hchange = instr->hydrogen();
4770 Register input = ToRegister(instr->value());
4771 Register output = ToRegister(instr->result());
4772 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4773 hchange->value()->CheckFlag(HValue::kUint32)) {
4774 __ And(at, input, Operand(0xc0000000));
4775 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
4776 }
4777 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4778 !hchange->value()->CheckFlag(HValue::kUint32)) {
4779 __ SmiTagCheckOverflow(output, input, at);
4780 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
4781 } else {
4782 __ SmiTag(output, input);
4783 }
4784 }
4785
4786
4787 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4788 Register scratch = scratch0();
4789 Register input = ToRegister(instr->value());
4790 Register result = ToRegister(instr->result());
4791 if (instr->needs_check()) {
4792 STATIC_ASSERT(kHeapObjectTag == 1);
4793 // If the input is a HeapObject, value of scratch won't be zero.
4794 __ And(scratch, input, Operand(kHeapObjectTag));
4795 __ SmiUntag(result, input);
4796 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, Operand(zero_reg));
4797 } else {
4798 __ SmiUntag(result, input);
4799 }
4800 }
4801
4802
4803 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4804 DoubleRegister result_reg,
4805 NumberUntagDMode mode) {
4806 bool can_convert_undefined_to_nan =
4807 instr->hydrogen()->can_convert_undefined_to_nan();
4808 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4809
4810 Register scratch = scratch0();
4811 Label convert, load_smi, done;
4812 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4813 // Smi check.
4814 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4815 // Heap number map check.
4816 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4817 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4818 if (can_convert_undefined_to_nan) {
4819 __ Branch(&convert, ne, scratch, Operand(at));
4820 } else {
4821 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch,
4822 Operand(at));
4823 }
4824 // Load heap number.
4825 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4826 if (deoptimize_on_minus_zero) {
4827 __ mfc1(at, result_reg.low());
4828 __ Branch(&done, ne, at, Operand(zero_reg));
4829 __ Mfhc1(scratch, result_reg);
4830 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, scratch,
4831 Operand(HeapNumber::kSignMask));
4832 }
4833 __ Branch(&done);
4834 if (can_convert_undefined_to_nan) {
4835 __ bind(&convert);
4836 // Convert undefined (and hole) to NaN.
4837 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4838 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
4839 Operand(at));
4840 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4841 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4842 __ Branch(&done);
4843 }
4844 } else {
4845 __ SmiUntag(scratch, input_reg);
4846 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4847 }
4848 // Smi to double register conversion
4849 __ bind(&load_smi);
4850 // scratch: untagged value of input_reg
4851 __ mtc1(scratch, result_reg);
4852 __ cvt_d_w(result_reg, result_reg);
4853 __ bind(&done);
4854 }
4855
4856
4857 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4858 Register input_reg = ToRegister(instr->value());
4859 Register scratch1 = scratch0();
4860 Register scratch2 = ToRegister(instr->temp());
4861 DoubleRegister double_scratch = double_scratch0();
4862 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4863
4864 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4865 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4866
4867 Label done;
4868
4869 // The input is a tagged HeapObject.
4870 // Heap number map check.
4871 __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4872 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4873 // This 'at' value and scratch1 map value are used for tests in both clauses
4874 // of the if.
4875
4876 if (instr->truncating()) {
4877 // Performs a truncating conversion of a floating point number as used by
4878 // the JS bitwise operations.
4879 Label no_heap_number, check_bools, check_false;
4880 // Check HeapNumber map.
4881 __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
4882 __ mov(scratch2, input_reg); // In delay slot.
4883 __ TruncateHeapNumberToI(input_reg, scratch2);
4884 __ Branch(&done);
4885
4886 // Check for Oddballs. Undefined/False is converted to zero and True to one
4887 // for truncating conversions.
4888 __ bind(&no_heap_number);
4889 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4890 __ Branch(&check_bools, ne, input_reg, Operand(at));
4891 DCHECK(ToRegister(instr->result()).is(input_reg));
4892 __ Branch(USE_DELAY_SLOT, &done);
4893 __ mov(input_reg, zero_reg); // In delay slot.
4894
4895 __ bind(&check_bools);
4896 __ LoadRoot(at, Heap::kTrueValueRootIndex);
4897 __ Branch(&check_false, ne, scratch2, Operand(at));
4898 __ Branch(USE_DELAY_SLOT, &done);
4899 __ li(input_reg, Operand(1)); // In delay slot.
4900
4901 __ bind(&check_false);
4902 __ LoadRoot(at, Heap::kFalseValueRootIndex);
4903 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean,
4904 scratch2, Operand(at));
4905 __ Branch(USE_DELAY_SLOT, &done);
4906 __ mov(input_reg, zero_reg); // In delay slot.
4907 } else {
4908 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch1,
4909 Operand(at));
4910
4911 // Load the double value.
4912 __ ldc1(double_scratch,
4913 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4914
4915 Register except_flag = scratch2;
4916 __ EmitFPUTruncate(kRoundToZero,
4917 input_reg,
4918 double_scratch,
4919 scratch1,
4920 double_scratch2,
4921 except_flag,
4922 kCheckForInexactConversion);
4923
4924 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
4925 Operand(zero_reg));
4926
4927 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4928 __ Branch(&done, ne, input_reg, Operand(zero_reg));
4929
4930 __ Mfhc1(scratch1, double_scratch);
4931 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4932 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
4933 Operand(zero_reg));
4934 }
4935 }
4936 __ bind(&done);
4937 }
4938
4939
4940 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4941 class DeferredTaggedToI final : public LDeferredCode {
4942 public:
4943 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4944 : LDeferredCode(codegen), instr_(instr) { }
4945 void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
4946 LInstruction* instr() override { return instr_; }
4947
4948 private:
4949 LTaggedToI* instr_;
4950 };
4951
4952 LOperand* input = instr->value();
4953 DCHECK(input->IsRegister());
4954 DCHECK(input->Equals(instr->result()));
4955
4956 Register input_reg = ToRegister(input);
4957
4958 if (instr->hydrogen()->value()->representation().IsSmi()) {
4959 __ SmiUntag(input_reg);
4960 } else {
4961 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4962
4963 // Let the deferred code handle the HeapObject case.
4964 __ JumpIfNotSmi(input_reg, deferred->entry());
4965
4966 // Smi to int32 conversion.
4967 __ SmiUntag(input_reg);
4968 __ bind(deferred->exit());
4969 }
4970 }
4971
4972
4973 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4974 LOperand* input = instr->value();
4975 DCHECK(input->IsRegister());
4976 LOperand* result = instr->result();
4977 DCHECK(result->IsDoubleRegister());
4978
4979 Register input_reg = ToRegister(input);
4980 DoubleRegister result_reg = ToDoubleRegister(result);
4981
4982 HValue* value = instr->hydrogen()->value();
4983 NumberUntagDMode mode = value->representation().IsSmi()
4984 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4985
4986 EmitNumberUntagD(instr, input_reg, result_reg, mode);
4987 }
4988
4989
4990 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4991 Register result_reg = ToRegister(instr->result());
4992 Register scratch1 = scratch0();
4993 DoubleRegister double_input = ToDoubleRegister(instr->value());
4994
4995 if (instr->truncating()) {
4996 __ TruncateDoubleToI(result_reg, double_input);
4997 } else {
4998 Register except_flag = LCodeGen::scratch1();
4999
5000 __ EmitFPUTruncate(kRoundToMinusInf,
5001 result_reg,
5002 double_input,
5003 scratch1,
5004 double_scratch0(),
5005 except_flag,
5006 kCheckForInexactConversion);
5007
5008 // Deopt if the operation did not succeed (except_flag != 0).
5009 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
5010 Operand(zero_reg));
5011
5012 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5013 Label done;
5014 __ Branch(&done, ne, result_reg, Operand(zero_reg));
5015 __ Mfhc1(scratch1, double_input);
5016 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5017 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
5018 Operand(zero_reg));
5019 __ bind(&done);
5020 }
5021 }
5022 }
5023
5024
5025 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5026 Register result_reg = ToRegister(instr->result());
5027 Register scratch1 = LCodeGen::scratch0();
5028 DoubleRegister double_input = ToDoubleRegister(instr->value());
5029
5030 if (instr->truncating()) {
5031 __ TruncateDoubleToI(result_reg, double_input);
5032 } else {
5033 Register except_flag = LCodeGen::scratch1();
5034
5035 __ EmitFPUTruncate(kRoundToMinusInf,
5036 result_reg,
5037 double_input,
5038 scratch1,
5039 double_scratch0(),
5040 except_flag,
5041 kCheckForInexactConversion);
5042
5043 // Deopt if the operation did not succeed (except_flag != 0).
5044 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
5045 Operand(zero_reg));
5046
5047 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5048 Label done;
5049 __ Branch(&done, ne, result_reg, Operand(zero_reg));
5050 __ Mfhc1(scratch1, double_input);
5051 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5052 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
5053 Operand(zero_reg));
5054 __ bind(&done);
5055 }
5056 }
5057 __ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
5058 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch1, Operand(zero_reg));
5059 }
5060
5061
5062 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5063 LOperand* input = instr->value();
5064 __ SmiTst(ToRegister(input), at);
5065 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, at, Operand(zero_reg));
5066 }
5067
5068
5069 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5070 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5071 LOperand* input = instr->value();
5072 __ SmiTst(ToRegister(input), at);
5073 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
5074 }
5075 }
5076
5077
5078 void LCodeGen::DoCheckArrayBufferNotNeutered(
5079 LCheckArrayBufferNotNeutered* instr) {
5080 Register view = ToRegister(instr->view());
5081 Register scratch = scratch0();
5082
5083 __ lw(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
5084 __ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
5085 __ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift);
5086 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, at, Operand(zero_reg));
5087 }
5088
5089
5090 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5091 Register input = ToRegister(instr->value());
5092 Register scratch = scratch0();
5093
5094 __ GetObjectType(input, scratch, scratch);
5095
5096 if (instr->hydrogen()->is_interval_check()) {
5097 InstanceType first;
5098 InstanceType last;
5099 instr->hydrogen()->GetCheckInterval(&first, &last);
5100
5101 // If there is only one type in the interval check for equality.
5102 if (first == last) {
5103 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
5104 Operand(first));
5105 } else {
5106 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType, scratch,
5107 Operand(first));
5108 // Omit check for the last type.
5109 if (last != LAST_TYPE) {
5110 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType, scratch,
5111 Operand(last));
5112 }
5113 }
5114 } else {
5115 uint8_t mask;
5116 uint8_t tag;
5117 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5118
5119 if (base::bits::IsPowerOfTwo32(mask)) {
5120 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5121 __ And(at, scratch, mask);
5122 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
5123 at, Operand(zero_reg));
5124 } else {
5125 __ And(scratch, scratch, Operand(mask));
5126 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
5127 Operand(tag));
5128 }
5129 }
5130 }
5131
5132
5133 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5134 Register reg = ToRegister(instr->value());
5135 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5136 AllowDeferredHandleDereference smi_check;
5137 if (isolate()->heap()->InNewSpace(*object)) {
5138 Register reg = ToRegister(instr->value());
5139 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5140 __ li(at, Operand(cell));
5141 __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
5142 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at));
5143 } else {
5144 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(object));
5145 }
5146 }
5147
5148
5149 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5150 {
5151 PushSafepointRegistersScope scope(this);
5152 __ push(object);
5153 __ mov(cp, zero_reg);
5154 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5155 RecordSafepointWithRegisters(
5156 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5157 __ StoreToSafepointRegisterSlot(v0, scratch0());
5158 }
5159 __ SmiTst(scratch0(), at);
5160 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, at,
5161 Operand(zero_reg));
5162 }
5163
5164
5165 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5166 class DeferredCheckMaps final : public LDeferredCode {
5167 public:
5168 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5169 : LDeferredCode(codegen), instr_(instr), object_(object) {
5170 SetExit(check_maps());
5171 }
5172 void Generate() override {
5173 codegen()->DoDeferredInstanceMigration(instr_, object_);
5174 }
5175 Label* check_maps() { return &check_maps_; }
5176 LInstruction* instr() override { return instr_; }
5177
5178 private:
5179 LCheckMaps* instr_;
5180 Label check_maps_;
5181 Register object_;
5182 };
5183
5184 if (instr->hydrogen()->IsStabilityCheck()) {
5185 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5186 for (int i = 0; i < maps->size(); ++i) {
5187 AddStabilityDependency(maps->at(i).handle());
5188 }
5189 return;
5190 }
5191
5192 Register map_reg = scratch0();
5193 LOperand* input = instr->value();
5194 DCHECK(input->IsRegister());
5195 Register reg = ToRegister(input);
5196 __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5197
5198 DeferredCheckMaps* deferred = NULL;
5199 if (instr->hydrogen()->HasMigrationTarget()) {
5200 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5201 __ bind(deferred->check_maps());
5202 }
5203
5204 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5205 Label success;
5206 for (int i = 0; i < maps->size() - 1; i++) {
5207 Handle<Map> map = maps->at(i).handle();
5208 __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
5209 }
5210 Handle<Map> map = maps->at(maps->size() - 1).handle();
5211 // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
5212 if (instr->hydrogen()->HasMigrationTarget()) {
5213 __ Branch(deferred->entry(), ne, map_reg, Operand(map));
5214 } else {
5215 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map_reg, Operand(map));
5216 }
5217
5218 __ bind(&success);
5219 }
5220
5221
5222 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5223 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5224 Register result_reg = ToRegister(instr->result());
5225 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5226 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
5227 }
5228
5229
5230 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5231 Register unclamped_reg = ToRegister(instr->unclamped());
5232 Register result_reg = ToRegister(instr->result());
5233 __ ClampUint8(result_reg, unclamped_reg);
5234 }
5235
5236
5237 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5238 Register scratch = scratch0();
5239 Register input_reg = ToRegister(instr->unclamped());
5240 Register result_reg = ToRegister(instr->result());
5241 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5242 Label is_smi, done, heap_number;
5243
5244 // Both smi and heap number cases are handled.
5245 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
5246
5247 // Check for heap number
5248 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5249 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
5250
5251 // Check for undefined. Undefined is converted to zero for clamping
5252 // conversions.
5253 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
5254 Operand(factory()->undefined_value()));
5255 __ mov(result_reg, zero_reg);
5256 __ jmp(&done);
5257
5258 // Heap number
5259 __ bind(&heap_number);
5260 __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
5261 HeapNumber::kValueOffset));
5262 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
5263 __ jmp(&done);
5264
5265 __ bind(&is_smi);
5266 __ ClampUint8(result_reg, scratch);
5267
5268 __ bind(&done);
5269 }
5270
5271
5272 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5273 DoubleRegister value_reg = ToDoubleRegister(instr->value());
5274 Register result_reg = ToRegister(instr->result());
5275 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5276 __ FmoveHigh(result_reg, value_reg);
5277 } else {
5278 __ FmoveLow(result_reg, value_reg);
5279 }
5280 }
5281
5282
5283 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5284 Register hi_reg = ToRegister(instr->hi());
5285 Register lo_reg = ToRegister(instr->lo());
5286 DoubleRegister result_reg = ToDoubleRegister(instr->result());
5287 __ Move(result_reg, lo_reg, hi_reg);
5288 }
5289
5290
5291 void LCodeGen::DoAllocate(LAllocate* instr) {
5292 class DeferredAllocate final : public LDeferredCode {
5293 public:
5294 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5295 : LDeferredCode(codegen), instr_(instr) { }
5296 void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5297 LInstruction* instr() override { return instr_; }
5298
5299 private:
5300 LAllocate* instr_;
5301 };
5302
5303 DeferredAllocate* deferred =
5304 new(zone()) DeferredAllocate(this, instr);
5305
5306 Register result = ToRegister(instr->result());
5307 Register scratch = ToRegister(instr->temp1());
5308 Register scratch2 = ToRegister(instr->temp2());
5309
5310 // Allocate memory for the object.
5311 AllocationFlags flags = TAG_OBJECT;
5312 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5313 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5314 }
5315 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5316 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5317 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5318 }
5319 if (instr->size()->IsConstantOperand()) {
5320 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5321 CHECK(size <= Page::kMaxRegularHeapObjectSize);
5322 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5323 } else {
5324 Register size = ToRegister(instr->size());
5325 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5326 }
5327
5328 __ bind(deferred->exit());
5329
5330 if (instr->hydrogen()->MustPrefillWithFiller()) {
5331 STATIC_ASSERT(kHeapObjectTag == 1);
5332 if (instr->size()->IsConstantOperand()) {
5333 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5334 __ li(scratch, Operand(size - kHeapObjectTag));
5335 } else {
5336 __ Subu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5337 }
5338 __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5339 Label loop;
5340 __ bind(&loop);
5341 __ Subu(scratch, scratch, Operand(kPointerSize));
5342 __ Addu(at, result, Operand(scratch));
5343 __ sw(scratch2, MemOperand(at));
5344 __ Branch(&loop, ge, scratch, Operand(zero_reg));
5345 }
5346 }
5347
5348
5349 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5350 Register result = ToRegister(instr->result());
5351
5352 // TODO(3095996): Get rid of this. For now, we need to make the
5353 // result register contain a valid pointer because it is already
5354 // contained in the register pointer map.
5355 __ mov(result, zero_reg);
5356
5357 PushSafepointRegistersScope scope(this);
5358 if (instr->size()->IsRegister()) {
5359 Register size = ToRegister(instr->size());
5360 DCHECK(!size.is(result));
5361 __ SmiTag(size);
5362 __ push(size);
5363 } else {
5364 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5365 if (size >= 0 && size <= Smi::kMaxValue) {
5366 __ Push(Smi::FromInt(size));
5367 } else {
5368 // We should never get here at runtime => abort
5369 __ stop("invalid allocation size");
5370 return;
5371 }
5372 }
5373
5374 int flags = AllocateDoubleAlignFlag::encode(
5375 instr->hydrogen()->MustAllocateDoubleAligned());
5376 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5377 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5378 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5379 } else {
5380 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5381 }
5382 __ Push(Smi::FromInt(flags));
5383
5384 CallRuntimeFromDeferred(
5385 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5386 __ StoreToSafepointRegisterSlot(v0, result);
5387 }
5388
5389
5390 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5391 DCHECK(ToRegister(instr->value()).is(a0));
5392 DCHECK(ToRegister(instr->result()).is(v0));
5393 __ push(a0);
5394 CallRuntime(Runtime::kToFastProperties, 1, instr);
5395 }
5396
5397
5398 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5399 DCHECK(ToRegister(instr->context()).is(cp));
5400 Label materialized;
5401 // Registers will be used as follows:
5402 // t3 = literals array.
5403 // a1 = regexp literal.
5404 // a0 = regexp literal clone.
5405 // a2 and t0-t2 are used as temporaries.
5406 int literal_offset =
5407 LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
5408 __ li(t3, instr->hydrogen()->literals());
5409 __ lw(a1, FieldMemOperand(t3, literal_offset));
5410 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5411 __ Branch(&materialized, ne, a1, Operand(at));
5412
5413 // Create regexp literal using runtime function
5414 // Result will be in v0.
5415 __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5416 __ li(t1, Operand(instr->hydrogen()->pattern()));
5417 __ li(t0, Operand(instr->hydrogen()->flags()));
5418 __ Push(t3, t2, t1, t0);
5419 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5420 __ mov(a1, v0);
5421
5422 __ bind(&materialized);
5423 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5424 Label allocated, runtime_allocate;
5425
5426 __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
5427 __ jmp(&allocated);
5428
5429 __ bind(&runtime_allocate);
5430 __ li(a0, Operand(Smi::FromInt(size)));
5431 __ Push(a1, a0);
5432 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5433 __ pop(a1);
5434
5435 __ bind(&allocated);
5436 // Copy the content into the newly allocated memory.
5437 // (Unroll copy loop once for better throughput).
5438 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5439 __ lw(a3, FieldMemOperand(a1, i));
5440 __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
5441 __ sw(a3, FieldMemOperand(v0, i));
5442 __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
5443 }
5444 if ((size % (2 * kPointerSize)) != 0) {
5445 __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
5446 __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
5447 }
5448 }
5449
5450
5451 void LCodeGen::DoTypeof(LTypeof* instr) {
5452 DCHECK(ToRegister(instr->value()).is(a3));
5453 DCHECK(ToRegister(instr->result()).is(v0));
5454 Label end, do_call;
5455 Register value_register = ToRegister(instr->value());
5456 __ JumpIfNotSmi(value_register, &do_call);
5457 __ li(v0, Operand(isolate()->factory()->number_string()));
5458 __ jmp(&end);
5459 __ bind(&do_call);
5460 TypeofStub stub(isolate());
5461 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5462 __ bind(&end);
5463 }
5464
5465
5466 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5467 Register input = ToRegister(instr->value());
5468
5469 Register cmp1 = no_reg;
5470 Operand cmp2 = Operand(no_reg);
5471
5472 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5473 instr->FalseLabel(chunk_),
5474 input,
5475 instr->type_literal(),
5476 &cmp1,
5477 &cmp2);
5478
5479 DCHECK(cmp1.is_valid());
5480 DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid());
5481
5482 if (final_branch_condition != kNoCondition) {
5483 EmitBranch(instr, final_branch_condition, cmp1, cmp2);
5484 }
5485 }
5486
5487
5488 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5489 Label* false_label,
5490 Register input,
5491 Handle<String> type_name,
5492 Register* cmp1,
5493 Operand* cmp2) {
5494 // This function utilizes the delay slot heavily. This is used to load
5495 // values that are always usable without depending on the type of the input
5496 // register.
5497 Condition final_branch_condition = kNoCondition;
5498 Register scratch = scratch0();
5499 Factory* factory = isolate()->factory();
5500 if (String::Equals(type_name, factory->number_string())) {
5501 __ JumpIfSmi(input, true_label);
5502 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5503 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5504 *cmp1 = input;
5505 *cmp2 = Operand(at);
5506 final_branch_condition = eq;
5507
5508 } else if (String::Equals(type_name, factory->string_string())) {
5509 __ JumpIfSmi(input, false_label);
5510 __ GetObjectType(input, input, scratch);
5511 *cmp1 = scratch;
5512 *cmp2 = Operand(FIRST_NONSTRING_TYPE);
5513 final_branch_condition = lt;
5514
5515 } else if (String::Equals(type_name, factory->symbol_string())) {
5516 __ JumpIfSmi(input, false_label);
5517 __ GetObjectType(input, input, scratch);
5518 *cmp1 = scratch;
5519 *cmp2 = Operand(SYMBOL_TYPE);
5520 final_branch_condition = eq;
5521
5522 } else if (String::Equals(type_name, factory->boolean_string())) {
5523 __ LoadRoot(at, Heap::kTrueValueRootIndex);
5524 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5525 __ LoadRoot(at, Heap::kFalseValueRootIndex);
5526 *cmp1 = at;
5527 *cmp2 = Operand(input);
5528 final_branch_condition = eq;
5529
5530 } else if (String::Equals(type_name, factory->undefined_string())) {
5531 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5532 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5533 // The first instruction of JumpIfSmi is an And - it is safe in the delay
5534 // slot.
5535 __ JumpIfSmi(input, false_label);
5536 // Check for undetectable objects => true.
5537 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5538 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5539 __ And(at, at, 1 << Map::kIsUndetectable);
5540 *cmp1 = at;
5541 *cmp2 = Operand(zero_reg);
5542 final_branch_condition = ne;
5543
5544 } else if (String::Equals(type_name, factory->function_string())) {
5545 __ JumpIfSmi(input, false_label);
5546 __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5547 __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5548 __ And(scratch, scratch,
5549 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5550 *cmp1 = scratch;
5551 *cmp2 = Operand(1 << Map::kIsCallable);
5552 final_branch_condition = eq;
5553
5554 } else if (String::Equals(type_name, factory->object_string())) {
5555 __ JumpIfSmi(input, false_label);
5556 __ LoadRoot(at, Heap::kNullValueRootIndex);
5557 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5558 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
5559 __ GetObjectType(input, scratch, scratch1());
5560 __ Branch(false_label, lt, scratch1(), Operand(FIRST_SPEC_OBJECT_TYPE));
5561 // Check for callable or undetectable objects => false.
5562 __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5563 __ And(at, scratch,
5564 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5565 *cmp1 = at;
5566 *cmp2 = Operand(zero_reg);
5567 final_branch_condition = eq;
5568
5569 // clang-format off
5570 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
5571 } else if (String::Equals(type_name, factory->type##_string())) { \
5572 __ JumpIfSmi(input, false_label); \
5573 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset)); \
5574 __ LoadRoot(at, Heap::k##Type##MapRootIndex); \
5575 *cmp1 = input; \
5576 *cmp2 = Operand(at); \
5577 final_branch_condition = eq;
5578 SIMD128_TYPES(SIMD128_TYPE)
5579 #undef SIMD128_TYPE
5580 // clang-format on
5581
5582 } else {
5583 *cmp1 = at;
5584 *cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
5585 __ Branch(false_label);
5586 }
5587
5588 return final_branch_condition;
5589 }
5590
5591
5592 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5593 Register temp1 = ToRegister(instr->temp());
5594
5595 EmitIsConstructCall(temp1, scratch0());
5596
5597 EmitBranch(instr, eq, temp1,
5598 Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5599 }
5600
5601
5602 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5603 DCHECK(!temp1.is(temp2));
5604 // Get the frame pointer for the calling frame.
5605 __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5606
5607 // Skip the arguments adaptor frame if it exists.
5608 Label check_frame_marker;
5609 __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
5610 __ Branch(&check_frame_marker, ne, temp2,
5611 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5612 __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
5613
5614 // Check the marker in the calling frame.
5615 __ bind(&check_frame_marker);
5616 __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
5617 }
5618
5619
5620 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5621 if (info()->ShouldEnsureSpaceForLazyDeopt()) {
5622 // Ensure that we have enough space after the previous lazy-bailout
5623 // instruction for patching the code here.
5624 int current_pc = masm()->pc_offset();
5625 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5626 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5627 DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5628 while (padding_size > 0) {
5629 __ nop();
5630 padding_size -= Assembler::kInstrSize;
5631 }
5632 }
5633 }
5634 last_lazy_deopt_pc_ = masm()->pc_offset();
5635 }
5636
5637
5638 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5639 last_lazy_deopt_pc_ = masm()->pc_offset();
5640 DCHECK(instr->HasEnvironment());
5641 LEnvironment* env = instr->environment();
5642 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5643 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5644 }
5645
5646
5647 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5648 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5649 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5650 // needed return address), even though the implementation of LAZY and EAGER is
5651 // now identical. When LAZY is eventually completely folded into EAGER, remove
5652 // the special case below.
5653 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5654 type = Deoptimizer::LAZY;
5655 }
5656
5657 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type, zero_reg,
5658 Operand(zero_reg));
5659 }
5660
5661
5662 void LCodeGen::DoDummy(LDummy* instr) {
5663 // Nothing to see here, move on!
5664 }
5665
5666
5667 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5668 // Nothing to see here, move on!
5669 }
5670
5671
5672 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5673 PushSafepointRegistersScope scope(this);
5674 LoadContextFromDeferred(instr->context());
5675 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5676 RecordSafepointWithLazyDeopt(
5677 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5678 DCHECK(instr->HasEnvironment());
5679 LEnvironment* env = instr->environment();
5680 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5681 }
5682
5683
5684 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5685 class DeferredStackCheck final : public LDeferredCode {
5686 public:
5687 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5688 : LDeferredCode(codegen), instr_(instr) { }
5689 void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5690 LInstruction* instr() override { return instr_; }
5691
5692 private:
5693 LStackCheck* instr_;
5694 };
5695
5696 DCHECK(instr->HasEnvironment());
5697 LEnvironment* env = instr->environment();
5698 // There is no LLazyBailout instruction for stack-checks. We have to
5699 // prepare for lazy deoptimization explicitly here.
5700 if (instr->hydrogen()->is_function_entry()) {
5701 // Perform stack overflow check.
5702 Label done;
5703 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5704 __ Branch(&done, hs, sp, Operand(at));
5705 DCHECK(instr->context()->IsRegister());
5706 DCHECK(ToRegister(instr->context()).is(cp));
5707 CallCode(isolate()->builtins()->StackCheck(),
5708 RelocInfo::CODE_TARGET,
5709 instr);
5710 __ bind(&done);
5711 } else {
5712 DCHECK(instr->hydrogen()->is_backwards_branch());
5713 // Perform stack overflow check if this goto needs it before jumping.
5714 DeferredStackCheck* deferred_stack_check =
5715 new(zone()) DeferredStackCheck(this, instr);
5716 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5717 __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
5718 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5719 __ bind(instr->done_label());
5720 deferred_stack_check->SetExit(instr->done_label());
5721 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5722 // Don't record a deoptimization index for the safepoint here.
5723 // This will be done explicitly when emitting call and the safepoint in
5724 // the deferred code.
5725 }
5726 }
5727
5728
5729 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5730 // This is a pseudo-instruction that ensures that the environment here is
5731 // properly registered for deoptimization and records the assembler's PC
5732 // offset.
5733 LEnvironment* environment = instr->environment();
5734
5735 // If the environment were already registered, we would have no way of
5736 // backpatching it with the spill slot operands.
5737 DCHECK(!environment->HasBeenRegistered());
5738 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5739
5740 GenerateOsrPrologue();
5741 }
5742
5743
5744 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5745 Register result = ToRegister(instr->result());
5746 Register object = ToRegister(instr->object());
5747 __ And(at, object, kSmiTagMask);
5748 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
5749
5750 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5751 __ GetObjectType(object, a1, a1);
5752 DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject, a1,
5753 Operand(LAST_JS_PROXY_TYPE));
5754
5755 Label use_cache, call_runtime;
5756 DCHECK(object.is(a0));
5757 Register null_value = t1;
5758 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5759 __ CheckEnumCache(null_value, &call_runtime);
5760
5761 __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
5762 __ Branch(&use_cache);
5763
5764 // Get the set of properties to enumerate.
5765 __ bind(&call_runtime);
5766 __ push(object);
5767 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5768
5769 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
5770 DCHECK(result.is(v0));
5771 __ LoadRoot(at, Heap::kMetaMapRootIndex);
5772 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, a1, Operand(at));
5773 __ bind(&use_cache);
5774 }
5775
5776
5777 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5778 Register map = ToRegister(instr->map());
5779 Register result = ToRegister(instr->result());
5780 Label load_cache, done;
5781 __ EnumLength(result, map);
5782 __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
5783 __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5784 __ jmp(&done);
5785
5786 __ bind(&load_cache);
5787 __ LoadInstanceDescriptors(map, result);
5788 __ lw(result,
5789 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5790 __ lw(result,
5791 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5792 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache, result, Operand(zero_reg));
5793
5794 __ bind(&done);
5795 }
5796
5797
5798 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5799 Register object = ToRegister(instr->value());
5800 Register map = ToRegister(instr->map());
5801 __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5802 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map, Operand(scratch0()));
5803 }
5804
5805
5806 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5807 Register result,
5808 Register object,
5809 Register index) {
5810 PushSafepointRegistersScope scope(this);
5811 __ Push(object, index);
5812 __ mov(cp, zero_reg);
5813 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5814 RecordSafepointWithRegisters(
5815 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5816 __ StoreToSafepointRegisterSlot(v0, result);
5817 }
5818
5819
5820 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5821 class DeferredLoadMutableDouble final : public LDeferredCode {
5822 public:
5823 DeferredLoadMutableDouble(LCodeGen* codegen,
5824 LLoadFieldByIndex* instr,
5825 Register result,
5826 Register object,
5827 Register index)
5828 : LDeferredCode(codegen),
5829 instr_(instr),
5830 result_(result),
5831 object_(object),
5832 index_(index) {
5833 }
5834 void Generate() override {
5835 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5836 }
5837 LInstruction* instr() override { return instr_; }
5838
5839 private:
5840 LLoadFieldByIndex* instr_;
5841 Register result_;
5842 Register object_;
5843 Register index_;
5844 };
5845
5846 Register object = ToRegister(instr->object());
5847 Register index = ToRegister(instr->index());
5848 Register result = ToRegister(instr->result());
5849 Register scratch = scratch0();
5850
5851 DeferredLoadMutableDouble* deferred;
5852 deferred = new(zone()) DeferredLoadMutableDouble(
5853 this, instr, result, object, index);
5854
5855 Label out_of_object, done;
5856
5857 __ And(scratch, index, Operand(Smi::FromInt(1)));
5858 __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
5859 __ sra(index, index, 1);
5860
5861 __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
5862 __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize); // In delay slot.
5863
5864 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
5865 __ Addu(scratch, object, scratch);
5866 __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5867
5868 __ Branch(&done);
5869
5870 __ bind(&out_of_object);
5871 __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5872 // Index is equal to negated out of object property index plus 1.
5873 __ Subu(scratch, result, scratch);
5874 __ lw(result, FieldMemOperand(scratch,
5875 FixedArray::kHeaderSize - kPointerSize));
5876 __ bind(deferred->exit());
5877 __ bind(&done);
5878 }
5879
5880
5881 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5882 Register context = ToRegister(instr->context());
5883 __ sw(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
5884 }
5885
5886
5887 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
5888 Handle<ScopeInfo> scope_info = instr->scope_info();
5889 __ li(at, scope_info);
5890 __ Push(at, ToRegister(instr->function()));
5891 CallRuntime(Runtime::kPushBlockContext, 2, instr);
5892 RecordSafepoint(Safepoint::kNoLazyDeopt);
5893 }
5894
5895
5896 #undef __
5897
5898 } // namespace internal
5899 } // namespace v8
OLDNEW
« no previous file with comments | « src/mips/lithium-codegen-mips.h ('k') | src/mips/lithium-gap-resolver-mips.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698