Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(416)

Side by Side Diff: src/mips/lithium-codegen-mips.cc

Issue 7934002: MIPS: crankshaft implementation (Closed)
Patch Set: rebased on r9823. Created 9 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/mips/lithium-codegen-mips.h ('k') | src/mips/lithium-gap-resolver-mips.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "mips/lithium-codegen-mips.h"
31 #include "mips/lithium-gap-resolver-mips.h"
32 #include "code-stubs.h"
33 #include "stub-cache.h"
34
35 namespace v8 {
36 namespace internal {
37
38
39 class SafepointGenerator : public CallWrapper {
40 public:
41 SafepointGenerator(LCodeGen* codegen,
42 LPointerMap* pointers,
43 int deoptimization_index)
44 : codegen_(codegen),
45 pointers_(pointers),
46 deoptimization_index_(deoptimization_index) { }
47 virtual ~SafepointGenerator() { }
48
49 virtual void BeforeCall(int call_size) const {
50 ASSERT(call_size >= 0);
51 // Ensure that we have enough space after the previous safepoint position
52 // for the generated code there.
53 int call_end = codegen_->masm()->pc_offset() + call_size;
54 int prev_jump_end =
55 codegen_->LastSafepointEnd() + Deoptimizer::patch_size();
56 if (call_end < prev_jump_end) {
57 int padding_size = prev_jump_end - call_end;
58 ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
59 while (padding_size > 0) {
60 codegen_->masm()->nop();
61 padding_size -= Assembler::kInstrSize;
62 }
63 }
64 }
65
66 virtual void AfterCall() const {
67 codegen_->RecordSafepoint(pointers_, deoptimization_index_);
68 }
69
70 private:
71 LCodeGen* codegen_;
72 LPointerMap* pointers_;
73 int deoptimization_index_;
74 };
75
76
77 #define __ masm()->
78
79 bool LCodeGen::GenerateCode() {
80 HPhase phase("Code generation", chunk());
81 ASSERT(is_unused());
82 status_ = GENERATING;
83 CpuFeatures::Scope scope(FPU);
84
85 CodeStub::GenerateFPStubs();
86
87 // Open a frame scope to indicate that there is a frame on the stack. The
88 // NONE indicates that the scope shouldn't actually generate code to set up
89 // the frame (that is done in GeneratePrologue).
90 FrameScope frame_scope(masm_, StackFrame::NONE);
91
92 return GeneratePrologue() &&
93 GenerateBody() &&
94 GenerateDeferredCode() &&
95 GenerateSafepointTable();
96 }
97
98
99 void LCodeGen::FinishCode(Handle<Code> code) {
100 ASSERT(is_done());
101 code->set_stack_slots(GetStackSlotCount());
102 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
103 PopulateDeoptimizationData(code);
104 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
105 }
106
107
108 void LCodeGen::Abort(const char* format, ...) {
109 if (FLAG_trace_bailout) {
110 SmartArrayPointer<char> name(
111 info()->shared_info()->DebugName()->ToCString());
112 PrintF("Aborting LCodeGen in @\"%s\": ", *name);
113 va_list arguments;
114 va_start(arguments, format);
115 OS::VPrint(format, arguments);
116 va_end(arguments);
117 PrintF("\n");
118 }
119 status_ = ABORTED;
120 }
121
122
123 void LCodeGen::Comment(const char* format, ...) {
124 if (!FLAG_code_comments) return;
125 char buffer[4 * KB];
126 StringBuilder builder(buffer, ARRAY_SIZE(buffer));
127 va_list arguments;
128 va_start(arguments, format);
129 builder.AddFormattedList(format, arguments);
130 va_end(arguments);
131
132 // Copy the string before recording it in the assembler to avoid
133 // issues when the stack allocated buffer goes out of scope.
134 size_t length = builder.position();
135 Vector<char> copy = Vector<char>::New(length + 1);
136 memcpy(copy.start(), builder.Finalize(), copy.length());
137 masm()->RecordComment(copy.start());
138 }
139
140
141 bool LCodeGen::GeneratePrologue() {
142 ASSERT(is_generating());
143
144 #ifdef DEBUG
145 if (strlen(FLAG_stop_at) > 0 &&
146 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
147 __ stop("stop_at");
148 }
149 #endif
150
151 // a1: Callee's JS function.
152 // cp: Callee's context.
153 // fp: Caller's frame pointer.
154 // lr: Caller's pc.
155
156 // Strict mode functions and builtins need to replace the receiver
157 // with undefined when called as functions (without an explicit
158 // receiver object). r5 is zero for method calls and non-zero for
159 // function calls.
160 if (info_->is_strict_mode() || info_->is_native()) {
161 Label ok;
162 __ Branch(&ok, eq, t1, Operand(zero_reg));
163
164 int receiver_offset = scope()->num_parameters() * kPointerSize;
165 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
166 __ sw(a2, MemOperand(sp, receiver_offset));
167 __ bind(&ok);
168 }
169
170 __ Push(ra, fp, cp, a1);
171 __ Addu(fp, sp, Operand(2 * kPointerSize)); // Adj. FP to point to saved FP.
172
173 // Reserve space for the stack slots needed by the code.
174 int slots = GetStackSlotCount();
175 if (slots > 0) {
176 if (FLAG_debug_code) {
177 __ li(a0, Operand(slots));
178 __ li(a2, Operand(kSlotsZapValue));
179 Label loop;
180 __ bind(&loop);
181 __ push(a2);
182 __ Subu(a0, a0, 1);
183 __ Branch(&loop, ne, a0, Operand(zero_reg));
184 } else {
185 __ Subu(sp, sp, Operand(slots * kPointerSize));
186 }
187 }
188
189 // Possibly allocate a local context.
190 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
191 if (heap_slots > 0) {
192 Comment(";;; Allocate local context");
193 // Argument to NewContext is the function, which is in a1.
194 __ push(a1);
195 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
196 FastNewContextStub stub(heap_slots);
197 __ CallStub(&stub);
198 } else {
199 __ CallRuntime(Runtime::kNewFunctionContext, 1);
200 }
201 RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
202 // Context is returned in both v0 and cp. It replaces the context
203 // passed to us. It's saved in the stack and kept live in cp.
204 __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
205 // Copy any necessary parameters into the context.
206 int num_parameters = scope()->num_parameters();
207 for (int i = 0; i < num_parameters; i++) {
208 Variable* var = scope()->parameter(i);
209 if (var->IsContextSlot()) {
210 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
211 (num_parameters - 1 - i) * kPointerSize;
212 // Load parameter from stack.
213 __ lw(a0, MemOperand(fp, parameter_offset));
214 // Store it in the context.
215 MemOperand target = ContextOperand(cp, var->index());
216 __ sw(a0, target);
217 // Update the write barrier. This clobbers a3 and a0.
218 __ RecordWriteContextSlot(
219 cp, target.offset(), a0, a3, kRAHasBeenSaved, kSaveFPRegs);
220 }
221 }
222 Comment(";;; End allocate local context");
223 }
224
225 // Trace the call.
226 if (FLAG_trace) {
227 __ CallRuntime(Runtime::kTraceEnter, 0);
228 }
229 return !is_aborted();
230 }
231
232
233 bool LCodeGen::GenerateBody() {
234 ASSERT(is_generating());
235 bool emit_instructions = true;
236 for (current_instruction_ = 0;
237 !is_aborted() && current_instruction_ < instructions_->length();
238 current_instruction_++) {
239 LInstruction* instr = instructions_->at(current_instruction_);
240 if (instr->IsLabel()) {
241 LLabel* label = LLabel::cast(instr);
242 emit_instructions = !label->HasReplacement();
243 }
244
245 if (emit_instructions) {
246 Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
247 instr->CompileToNative(this);
248 }
249 }
250 return !is_aborted();
251 }
252
253
254 LInstruction* LCodeGen::GetNextInstruction() {
255 if (current_instruction_ < instructions_->length() - 1) {
256 return instructions_->at(current_instruction_ + 1);
257 } else {
258 return NULL;
259 }
260 }
261
262
263 bool LCodeGen::GenerateDeferredCode() {
264 ASSERT(is_generating());
265 if (deferred_.length() > 0) {
266 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
267 LDeferredCode* code = deferred_[i];
268 __ bind(code->entry());
269 Comment(";;; Deferred code @%d: %s.",
270 code->instruction_index(),
271 code->instr()->Mnemonic());
272 code->Generate();
273 __ jmp(code->exit());
274 }
275
276 // Pad code to ensure that the last piece of deferred code have
277 // room for lazy bailout.
278 while ((masm()->pc_offset() - LastSafepointEnd())
279 < Deoptimizer::patch_size()) {
280 __ nop();
281 }
282 }
283 // Deferred code is the last part of the instruction sequence. Mark
284 // the generated code as done unless we bailed out.
285 if (!is_aborted()) status_ = DONE;
286 return !is_aborted();
287 }
288
289
290 bool LCodeGen::GenerateDeoptJumpTable() {
291 // TODO(plind): not clear that this will have advantage for MIPS.
292 // Skipping it for now. Raised issue #100 for this.
293 Abort("Unimplemented: %s", "GenerateDeoptJumpTable");
294 return false;
295 }
296
297
298 bool LCodeGen::GenerateSafepointTable() {
299 ASSERT(is_done());
300 safepoints_.Emit(masm(), GetStackSlotCount());
301 return !is_aborted();
302 }
303
304
305 Register LCodeGen::ToRegister(int index) const {
306 return Register::FromAllocationIndex(index);
307 }
308
309
310 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
311 return DoubleRegister::FromAllocationIndex(index);
312 }
313
314
315 Register LCodeGen::ToRegister(LOperand* op) const {
316 ASSERT(op->IsRegister());
317 return ToRegister(op->index());
318 }
319
320
321 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
322 if (op->IsRegister()) {
323 return ToRegister(op->index());
324 } else if (op->IsConstantOperand()) {
325 __ li(scratch, ToOperand(op));
326 return scratch;
327 } else if (op->IsStackSlot() || op->IsArgument()) {
328 __ lw(scratch, ToMemOperand(op));
329 return scratch;
330 }
331 UNREACHABLE();
332 return scratch;
333 }
334
335
336 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
337 ASSERT(op->IsDoubleRegister());
338 return ToDoubleRegister(op->index());
339 }
340
341
342 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
343 FloatRegister flt_scratch,
344 DoubleRegister dbl_scratch) {
345 if (op->IsDoubleRegister()) {
346 return ToDoubleRegister(op->index());
347 } else if (op->IsConstantOperand()) {
348 LConstantOperand* const_op = LConstantOperand::cast(op);
349 Handle<Object> literal = chunk_->LookupLiteral(const_op);
350 Representation r = chunk_->LookupLiteralRepresentation(const_op);
351 if (r.IsInteger32()) {
352 ASSERT(literal->IsNumber());
353 __ li(at, Operand(static_cast<int32_t>(literal->Number())));
354 __ mtc1(at, flt_scratch);
355 __ cvt_d_w(dbl_scratch, flt_scratch);
356 return dbl_scratch;
357 } else if (r.IsDouble()) {
358 Abort("unsupported double immediate");
359 } else if (r.IsTagged()) {
360 Abort("unsupported tagged immediate");
361 }
362 } else if (op->IsStackSlot() || op->IsArgument()) {
363 MemOperand mem_op = ToMemOperand(op);
364 __ ldc1(dbl_scratch, mem_op);
365 return dbl_scratch;
366 }
367 UNREACHABLE();
368 return dbl_scratch;
369 }
370
371
372 int LCodeGen::ToInteger32(LConstantOperand* op) const {
373 Handle<Object> value = chunk_->LookupLiteral(op);
374 ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
375 ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
376 value->Number());
377 return static_cast<int32_t>(value->Number());
378 }
379
380
381 double LCodeGen::ToDouble(LConstantOperand* op) const {
382 Handle<Object> value = chunk_->LookupLiteral(op);
383 return value->Number();
384 }
385
386
387 Operand LCodeGen::ToOperand(LOperand* op) {
388 if (op->IsConstantOperand()) {
389 LConstantOperand* const_op = LConstantOperand::cast(op);
390 Handle<Object> literal = chunk_->LookupLiteral(const_op);
391 Representation r = chunk_->LookupLiteralRepresentation(const_op);
392 if (r.IsInteger32()) {
393 ASSERT(literal->IsNumber());
394 return Operand(static_cast<int32_t>(literal->Number()));
395 } else if (r.IsDouble()) {
396 Abort("ToOperand Unsupported double immediate.");
397 }
398 ASSERT(r.IsTagged());
399 return Operand(literal);
400 } else if (op->IsRegister()) {
401 return Operand(ToRegister(op));
402 } else if (op->IsDoubleRegister()) {
403 Abort("ToOperand IsDoubleRegister unimplemented");
404 return Operand(0);
405 }
406 // Stack slots not implemented, use ToMemOperand instead.
407 UNREACHABLE();
408 return Operand(0);
409 }
410
411
412 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
413 ASSERT(!op->IsRegister());
414 ASSERT(!op->IsDoubleRegister());
415 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
416 int index = op->index();
417 if (index >= 0) {
418 // Local or spill slot. Skip the frame pointer, function, and
419 // context in the fixed part of the frame.
420 return MemOperand(fp, -(index + 3) * kPointerSize);
421 } else {
422 // Incoming parameter. Skip the return address.
423 return MemOperand(fp, -(index - 1) * kPointerSize);
424 }
425 }
426
427
428 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
429 ASSERT(op->IsDoubleStackSlot());
430 int index = op->index();
431 if (index >= 0) {
432 // Local or spill slot. Skip the frame pointer, function, context,
433 // and the first word of the double in the fixed part of the frame.
434 return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
435 } else {
436 // Incoming parameter. Skip the return address and the first word of
437 // the double.
438 return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
439 }
440 }
441
442
443 void LCodeGen::WriteTranslation(LEnvironment* environment,
444 Translation* translation) {
445 if (environment == NULL) return;
446
447 // The translation includes one command per value in the environment.
448 int translation_size = environment->values()->length();
449 // The output frame height does not include the parameters.
450 int height = translation_size - environment->parameter_count();
451
452 WriteTranslation(environment->outer(), translation);
453 int closure_id = DefineDeoptimizationLiteral(environment->closure());
454 translation->BeginFrame(environment->ast_id(), closure_id, height);
455 for (int i = 0; i < translation_size; ++i) {
456 LOperand* value = environment->values()->at(i);
457 // spilled_registers_ and spilled_double_registers_ are either
458 // both NULL or both set.
459 if (environment->spilled_registers() != NULL && value != NULL) {
460 if (value->IsRegister() &&
461 environment->spilled_registers()[value->index()] != NULL) {
462 translation->MarkDuplicate();
463 AddToTranslation(translation,
464 environment->spilled_registers()[value->index()],
465 environment->HasTaggedValueAt(i));
466 } else if (
467 value->IsDoubleRegister() &&
468 environment->spilled_double_registers()[value->index()] != NULL) {
469 translation->MarkDuplicate();
470 AddToTranslation(
471 translation,
472 environment->spilled_double_registers()[value->index()],
473 false);
474 }
475 }
476
477 AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
478 }
479 }
480
481
482 void LCodeGen::AddToTranslation(Translation* translation,
483 LOperand* op,
484 bool is_tagged) {
485 if (op == NULL) {
486 // TODO(twuerthinger): Introduce marker operands to indicate that this value
487 // is not present and must be reconstructed from the deoptimizer. Currently
488 // this is only used for the arguments object.
489 translation->StoreArgumentsObject();
490 } else if (op->IsStackSlot()) {
491 if (is_tagged) {
492 translation->StoreStackSlot(op->index());
493 } else {
494 translation->StoreInt32StackSlot(op->index());
495 }
496 } else if (op->IsDoubleStackSlot()) {
497 translation->StoreDoubleStackSlot(op->index());
498 } else if (op->IsArgument()) {
499 ASSERT(is_tagged);
500 int src_index = GetStackSlotCount() + op->index();
501 translation->StoreStackSlot(src_index);
502 } else if (op->IsRegister()) {
503 Register reg = ToRegister(op);
504 if (is_tagged) {
505 translation->StoreRegister(reg);
506 } else {
507 translation->StoreInt32Register(reg);
508 }
509 } else if (op->IsDoubleRegister()) {
510 DoubleRegister reg = ToDoubleRegister(op);
511 translation->StoreDoubleRegister(reg);
512 } else if (op->IsConstantOperand()) {
513 Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
514 int src_index = DefineDeoptimizationLiteral(literal);
515 translation->StoreLiteral(src_index);
516 } else {
517 UNREACHABLE();
518 }
519 }
520
521
522 void LCodeGen::CallCode(Handle<Code> code,
523 RelocInfo::Mode mode,
524 LInstruction* instr) {
525 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
526 }
527
528
529 void LCodeGen::CallCodeGeneric(Handle<Code> code,
530 RelocInfo::Mode mode,
531 LInstruction* instr,
532 SafepointMode safepoint_mode) {
533 ASSERT(instr != NULL);
534 LPointerMap* pointers = instr->pointer_map();
535 RecordPosition(pointers->position());
536 __ Call(code, mode);
537 RegisterLazyDeoptimization(instr, safepoint_mode);
538 }
539
540
541 void LCodeGen::CallRuntime(const Runtime::Function* function,
542 int num_arguments,
543 LInstruction* instr) {
544 ASSERT(instr != NULL);
545 LPointerMap* pointers = instr->pointer_map();
546 ASSERT(pointers != NULL);
547 RecordPosition(pointers->position());
548
549 __ CallRuntime(function, num_arguments);
550 RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
551 }
552
553
554 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
555 int argc,
556 LInstruction* instr) {
557 __ CallRuntimeSaveDoubles(id);
558 RecordSafepointWithRegisters(
559 instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
560 }
561
562
563 void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
564 SafepointMode safepoint_mode) {
565 // Create the environment to bailout to. If the call has side effects
566 // execution has to continue after the call otherwise execution can continue
567 // from a previous bailout point repeating the call.
568 LEnvironment* deoptimization_environment;
569 if (instr->HasDeoptimizationEnvironment()) {
570 deoptimization_environment = instr->deoptimization_environment();
571 } else {
572 deoptimization_environment = instr->environment();
573 }
574
575 RegisterEnvironmentForDeoptimization(deoptimization_environment);
576 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
577 RecordSafepoint(instr->pointer_map(),
578 deoptimization_environment->deoptimization_index());
579 } else {
580 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
581 RecordSafepointWithRegisters(
582 instr->pointer_map(),
583 0,
584 deoptimization_environment->deoptimization_index());
585 }
586 }
587
588
589 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
590 if (!environment->HasBeenRegistered()) {
591 // Physical stack frame layout:
592 // -x ............. -4 0 ..................................... y
593 // [incoming arguments] [spill slots] [pushed outgoing arguments]
594
595 // Layout of the environment:
596 // 0 ..................................................... size-1
597 // [parameters] [locals] [expression stack including arguments]
598
599 // Layout of the translation:
600 // 0 ........................................................ size - 1 + 4
601 // [expression stack including arguments] [locals] [4 words] [parameters]
602 // |>------------ translation_size ------------<|
603
604 int frame_count = 0;
605 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
606 ++frame_count;
607 }
608 Translation translation(&translations_, frame_count);
609 WriteTranslation(environment, &translation);
610 int deoptimization_index = deoptimizations_.length();
611 environment->Register(deoptimization_index, translation.index());
612 deoptimizations_.Add(environment);
613 }
614 }
615
616
617 void LCodeGen::DeoptimizeIf(Condition cc,
618 LEnvironment* environment,
619 Register src1,
620 const Operand& src2) {
621 RegisterEnvironmentForDeoptimization(environment);
622 ASSERT(environment->HasBeenRegistered());
623 int id = environment->deoptimization_index();
624 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
625 ASSERT(entry != NULL);
626 if (entry == NULL) {
627 Abort("bailout was not prepared");
628 return;
629 }
630
631 ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on MIPS.
632
633 if (FLAG_deopt_every_n_times == 1 &&
634 info_->shared_info()->opt_count() == id) {
635 __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
636 return;
637 }
638
639 if (FLAG_trap_on_deopt) {
640 Label skip;
641 if (cc != al) {
642 __ Branch(&skip, NegateCondition(cc), src1, src2);
643 }
644 __ stop("trap_on_deopt");
645 __ bind(&skip);
646 }
647
648 if (cc == al) {
649 __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
650 } else {
651 // TODO(plind): The Arm port is a little different here, due to their
652 // DeOpt jump table, which is not used for Mips yet.
653 __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
654 }
655 }
656
657
658 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
659 int length = deoptimizations_.length();
660 if (length == 0) return;
661 ASSERT(FLAG_deopt);
662 Handle<DeoptimizationInputData> data =
663 factory()->NewDeoptimizationInputData(length, TENURED);
664
665 Handle<ByteArray> translations = translations_.CreateByteArray();
666 data->SetTranslationByteArray(*translations);
667 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
668
669 Handle<FixedArray> literals =
670 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
671 for (int i = 0; i < deoptimization_literals_.length(); i++) {
672 literals->set(i, *deoptimization_literals_[i]);
673 }
674 data->SetLiteralArray(*literals);
675
676 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
677 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
678
679 // Populate the deoptimization entries.
680 for (int i = 0; i < length; i++) {
681 LEnvironment* env = deoptimizations_[i];
682 data->SetAstId(i, Smi::FromInt(env->ast_id()));
683 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
684 data->SetArgumentsStackHeight(i,
685 Smi::FromInt(env->arguments_stack_height()));
686 }
687 code->set_deoptimization_data(*data);
688 }
689
690
691 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
692 int result = deoptimization_literals_.length();
693 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
694 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
695 }
696 deoptimization_literals_.Add(literal);
697 return result;
698 }
699
700
701 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
702 ASSERT(deoptimization_literals_.length() == 0);
703
704 const ZoneList<Handle<JSFunction> >* inlined_closures =
705 chunk()->inlined_closures();
706
707 for (int i = 0, length = inlined_closures->length();
708 i < length;
709 i++) {
710 DefineDeoptimizationLiteral(inlined_closures->at(i));
711 }
712
713 inlined_function_count_ = deoptimization_literals_.length();
714 }
715
716
717 void LCodeGen::RecordSafepoint(
718 LPointerMap* pointers,
719 Safepoint::Kind kind,
720 int arguments,
721 int deoptimization_index) {
722 ASSERT(expected_safepoint_kind_ == kind);
723
724 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
725 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
726 kind, arguments, deoptimization_index);
727 for (int i = 0; i < operands->length(); i++) {
728 LOperand* pointer = operands->at(i);
729 if (pointer->IsStackSlot()) {
730 safepoint.DefinePointerSlot(pointer->index());
731 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
732 safepoint.DefinePointerRegister(ToRegister(pointer));
733 }
734 }
735 if (kind & Safepoint::kWithRegisters) {
736 // Register cp always contains a pointer to the context.
737 safepoint.DefinePointerRegister(cp);
738 }
739 }
740
741
742 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
743 int deoptimization_index) {
744 RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
745 }
746
747
748 void LCodeGen::RecordSafepoint(int deoptimization_index) {
749 LPointerMap empty_pointers(RelocInfo::kNoPosition);
750 RecordSafepoint(&empty_pointers, deoptimization_index);
751 }
752
753
754 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
755 int arguments,
756 int deoptimization_index) {
757 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
758 deoptimization_index);
759 }
760
761
762 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
763 LPointerMap* pointers,
764 int arguments,
765 int deoptimization_index) {
766 RecordSafepoint(pointers, Safepoint::kWithRegistersAndDoubles, arguments,
767 deoptimization_index);
768 }
769
770
771 void LCodeGen::RecordPosition(int position) {
772 if (position == RelocInfo::kNoPosition) return;
773 masm()->positions_recorder()->RecordPosition(position);
774 }
775
776
777 void LCodeGen::DoLabel(LLabel* label) {
778 if (label->is_loop_header()) {
779 Comment(";;; B%d - LOOP entry", label->block_id());
780 } else {
781 Comment(";;; B%d", label->block_id());
782 }
783 __ bind(label->label());
784 current_block_ = label->block_id();
785 DoGap(label);
786 }
787
788
789 void LCodeGen::DoParallelMove(LParallelMove* move) {
790 resolver_.Resolve(move);
791 }
792
793
794 void LCodeGen::DoGap(LGap* gap) {
795 for (int i = LGap::FIRST_INNER_POSITION;
796 i <= LGap::LAST_INNER_POSITION;
797 i++) {
798 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
799 LParallelMove* move = gap->GetParallelMove(inner_pos);
800 if (move != NULL) DoParallelMove(move);
801 }
802
803 LInstruction* next = GetNextInstruction();
804 if (next != NULL && next->IsLazyBailout()) {
805 int pc = masm()->pc_offset();
806 safepoints_.SetPcAfterGap(pc);
807 }
808 }
809
810
811 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
812 DoGap(instr);
813 }
814
815
816 void LCodeGen::DoParameter(LParameter* instr) {
817 // Nothing to do.
818 }
819
820
821 void LCodeGen::DoCallStub(LCallStub* instr) {
822 ASSERT(ToRegister(instr->result()).is(v0));
823 switch (instr->hydrogen()->major_key()) {
824 case CodeStub::RegExpConstructResult: {
825 RegExpConstructResultStub stub;
826 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
827 break;
828 }
829 case CodeStub::RegExpExec: {
830 RegExpExecStub stub;
831 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
832 break;
833 }
834 case CodeStub::SubString: {
835 SubStringStub stub;
836 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
837 break;
838 }
839 case CodeStub::NumberToString: {
840 NumberToStringStub stub;
841 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
842 break;
843 }
844 case CodeStub::StringAdd: {
845 StringAddStub stub(NO_STRING_ADD_FLAGS);
846 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
847 break;
848 }
849 case CodeStub::StringCompare: {
850 StringCompareStub stub;
851 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
852 break;
853 }
854 case CodeStub::TranscendentalCache: {
855 __ lw(a0, MemOperand(sp, 0));
856 TranscendentalCacheStub stub(instr->transcendental_type(),
857 TranscendentalCacheStub::TAGGED);
858 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
859 break;
860 }
861 default:
862 UNREACHABLE();
863 }
864 }
865
866
867 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
868 // Nothing to do.
869 }
870
871
872 void LCodeGen::DoModI(LModI* instr) {
873 Register scratch = scratch0();
874 const Register left = ToRegister(instr->InputAt(0));
875 const Register result = ToRegister(instr->result());
876
877 // p2constant holds the right side value if it's a power of 2 constant.
878 // In other cases it is 0.
879 int32_t p2constant = 0;
880
881 if (instr->InputAt(1)->IsConstantOperand()) {
882 p2constant = ToInteger32(LConstantOperand::cast(instr->InputAt(1)));
883 if (p2constant % 2 != 0) {
884 p2constant = 0;
885 }
886 // Result always takes the sign of the dividend (left).
887 p2constant = abs(p2constant);
888 }
889
890 // div runs in the background while we check for special cases.
891 Register right = EmitLoadRegister(instr->InputAt(1), scratch);
892 __ div(left, right);
893
894 // Check for x % 0.
895 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
896 DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
897 }
898
899 Label skip_div, do_div;
900 if (p2constant != 0) {
901 // Fall back to the result of the div instruction if we could have sign
902 // problems.
903 __ Branch(&do_div, lt, left, Operand(zero_reg));
904 // Modulo by masking.
905 __ And(scratch, left, p2constant - 1);
906 __ Branch(&skip_div);
907 }
908
909 __ bind(&do_div);
910 __ mfhi(scratch);
911 __ bind(&skip_div);
912
913 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
914 // Result always takes the sign of the dividend (left).
915 Label done;
916 __ Branch(USE_DELAY_SLOT, &done, ge, left, Operand(zero_reg));
917 __ mov(result, scratch);
918 DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
919 __ bind(&done);
920 } else {
921 __ Move(result, scratch);
922 }
923 }
924
925
926 void LCodeGen::DoDivI(LDivI* instr) {
927 const Register left = ToRegister(instr->InputAt(0));
928 const Register right = ToRegister(instr->InputAt(1));
929 const Register result = ToRegister(instr->result());
930
931 // On MIPS div is asynchronous - it will run in the background while we
932 // check for special cases.
933 __ div(left, right);
934
935 // Check for x / 0.
936 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
937 DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
938 }
939
940 // Check for (0 / -x) that will produce negative zero.
941 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
942 Label left_not_zero;
943 __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
944 DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
945 __ bind(&left_not_zero);
946 }
947
948 // Check for (-kMinInt / -1).
949 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
950 Label left_not_min_int;
951 __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
952 DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
953 __ bind(&left_not_min_int);
954 }
955
956 __ mfhi(result);
957 DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
958 __ mflo(result);
959 }
960
961
962 void LCodeGen::DoMulI(LMulI* instr) {
963 Register scratch = scratch0();
964 Register result = ToRegister(instr->result());
965 // Note that result may alias left.
966 Register left = ToRegister(instr->InputAt(0));
967 LOperand* right_op = instr->InputAt(1);
968
969 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
970 bool bailout_on_minus_zero =
971 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
972
973 if (right_op->IsConstantOperand() && !can_overflow) {
974 // Use optimized code for specific constants.
975 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
976
977 if (bailout_on_minus_zero && (constant < 0)) {
978 // The case of a null constant will be handled separately.
979 // If constant is negative and left is null, the result should be -0.
980 DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
981 }
982
983 switch (constant) {
984 case -1:
985 __ Subu(result, zero_reg, left);
986 break;
987 case 0:
988 if (bailout_on_minus_zero) {
989 // If left is strictly negative and the constant is null, the
990 // result is -0. Deoptimize if required, otherwise return 0.
991 DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
992 }
993 __ mov(result, zero_reg);
994 break;
995 case 1:
996 // Nothing to do.
997 __ Move(result, left);
998 break;
999 default:
1000 // Multiplying by powers of two and powers of two plus or minus
1001 // one can be done faster with shifted operands.
1002 // For other constants we emit standard code.
1003 int32_t mask = constant >> 31;
1004 uint32_t constant_abs = (constant + mask) ^ mask;
1005
1006 if (IsPowerOf2(constant_abs) ||
1007 IsPowerOf2(constant_abs - 1) ||
1008 IsPowerOf2(constant_abs + 1)) {
1009 if (IsPowerOf2(constant_abs)) {
1010 int32_t shift = WhichPowerOf2(constant_abs);
1011 __ sll(result, left, shift);
1012 } else if (IsPowerOf2(constant_abs - 1)) {
1013 int32_t shift = WhichPowerOf2(constant_abs - 1);
1014 __ sll(result, left, shift);
1015 __ Addu(result, result, left);
1016 } else if (IsPowerOf2(constant_abs + 1)) {
1017 int32_t shift = WhichPowerOf2(constant_abs + 1);
1018 __ sll(result, left, shift);
1019 __ Subu(result, result, left);
1020 }
1021
1022 // Correct the sign of the result is the constant is negative.
1023 if (constant < 0) {
1024 __ Subu(result, zero_reg, result);
1025 }
1026
1027 } else {
1028 // Generate standard code.
1029 __ li(at, constant);
1030 __ mul(result, left, at);
1031 }
1032 }
1033
1034 } else {
1035 Register right = EmitLoadRegister(right_op, scratch);
1036 if (bailout_on_minus_zero) {
1037 __ Or(ToRegister(instr->TempAt(0)), left, right);
1038 }
1039
1040 if (can_overflow) {
1041 // hi:lo = left * right.
1042 __ mult(left, right);
1043 __ mfhi(scratch);
1044 __ mflo(result);
1045 __ sra(at, result, 31);
1046 DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
1047 } else {
1048 __ mul(result, left, right);
1049 }
1050
1051 if (bailout_on_minus_zero) {
1052 // Bail out if the result is supposed to be negative zero.
1053 Label done;
1054 __ Branch(&done, ne, result, Operand(zero_reg));
1055 DeoptimizeIf(lt,
1056 instr->environment(),
1057 ToRegister(instr->TempAt(0)),
1058 Operand(zero_reg));
1059 __ bind(&done);
1060 }
1061 }
1062 }
1063
1064
1065 void LCodeGen::DoBitI(LBitI* instr) {
1066 LOperand* left_op = instr->InputAt(0);
1067 LOperand* right_op = instr->InputAt(1);
1068 ASSERT(left_op->IsRegister());
1069 Register left = ToRegister(left_op);
1070 Register result = ToRegister(instr->result());
1071 Operand right(no_reg);
1072
1073 if (right_op->IsStackSlot() || right_op->IsArgument()) {
1074 right = Operand(EmitLoadRegister(right_op, at));
1075 } else {
1076 ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1077 right = ToOperand(right_op);
1078 }
1079
1080 switch (instr->op()) {
1081 case Token::BIT_AND:
1082 __ And(result, left, right);
1083 break;
1084 case Token::BIT_OR:
1085 __ Or(result, left, right);
1086 break;
1087 case Token::BIT_XOR:
1088 __ Xor(result, left, right);
1089 break;
1090 default:
1091 UNREACHABLE();
1092 break;
1093 }
1094 }
1095
1096
1097 void LCodeGen::DoShiftI(LShiftI* instr) {
1098 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1099 // result may alias either of them.
1100 LOperand* right_op = instr->InputAt(1);
1101 Register left = ToRegister(instr->InputAt(0));
1102 Register result = ToRegister(instr->result());
1103
1104 if (right_op->IsRegister()) {
1105 // No need to mask the right operand on MIPS, it is built into the variable
1106 // shift instructions.
1107 switch (instr->op()) {
1108 case Token::SAR:
1109 __ srav(result, left, ToRegister(right_op));
1110 break;
1111 case Token::SHR:
1112 __ srlv(result, left, ToRegister(right_op));
1113 if (instr->can_deopt()) {
1114 DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
1115 }
1116 break;
1117 case Token::SHL:
1118 __ sllv(result, left, ToRegister(right_op));
1119 break;
1120 default:
1121 UNREACHABLE();
1122 break;
1123 }
1124 } else {
1125 // Mask the right_op operand.
1126 int value = ToInteger32(LConstantOperand::cast(right_op));
1127 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1128 switch (instr->op()) {
1129 case Token::SAR:
1130 if (shift_count != 0) {
1131 __ sra(result, left, shift_count);
1132 } else {
1133 __ Move(result, left);
1134 }
1135 break;
1136 case Token::SHR:
1137 if (shift_count != 0) {
1138 __ srl(result, left, shift_count);
1139 } else {
1140 if (instr->can_deopt()) {
1141 __ And(at, left, Operand(0x80000000));
1142 DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
1143 }
1144 __ Move(result, left);
1145 }
1146 break;
1147 case Token::SHL:
1148 if (shift_count != 0) {
1149 __ sll(result, left, shift_count);
1150 } else {
1151 __ Move(result, left);
1152 }
1153 break;
1154 default:
1155 UNREACHABLE();
1156 break;
1157 }
1158 }
1159 }
1160
1161
1162 void LCodeGen::DoSubI(LSubI* instr) {
1163 LOperand* left = instr->InputAt(0);
1164 LOperand* right = instr->InputAt(1);
1165 LOperand* result = instr->result();
1166 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1167
1168 if (!can_overflow) {
1169 if (right->IsStackSlot() || right->IsArgument()) {
1170 Register right_reg = EmitLoadRegister(right, at);
1171 __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
1172 } else {
1173 ASSERT(right->IsRegister() || right->IsConstantOperand());
1174 __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
1175 }
1176 } else { // can_overflow.
1177 Register overflow = scratch0();
1178 Register scratch = scratch1();
1179 if (right->IsStackSlot() ||
1180 right->IsArgument() ||
1181 right->IsConstantOperand()) {
1182 Register right_reg = EmitLoadRegister(right, scratch);
1183 __ SubuAndCheckForOverflow(ToRegister(result),
1184 ToRegister(left),
1185 right_reg,
1186 overflow); // Reg at also used as scratch.
1187 } else {
1188 ASSERT(right->IsRegister());
1189 // Due to overflow check macros not supporting constant operands,
1190 // handling the IsConstantOperand case was moved to prev if clause.
1191 __ SubuAndCheckForOverflow(ToRegister(result),
1192 ToRegister(left),
1193 ToRegister(right),
1194 overflow); // Reg at also used as scratch.
1195 }
1196 DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1197 }
1198 }
1199
1200
1201 void LCodeGen::DoConstantI(LConstantI* instr) {
1202 ASSERT(instr->result()->IsRegister());
1203 __ li(ToRegister(instr->result()), Operand(instr->value()));
1204 }
1205
1206
1207 void LCodeGen::DoConstantD(LConstantD* instr) {
1208 ASSERT(instr->result()->IsDoubleRegister());
1209 DoubleRegister result = ToDoubleRegister(instr->result());
1210 double v = instr->value();
1211 __ Move(result, v);
1212 }
1213
1214
1215 void LCodeGen::DoConstantT(LConstantT* instr) {
1216 ASSERT(instr->result()->IsRegister());
1217 __ li(ToRegister(instr->result()), Operand(instr->value()));
1218 }
1219
1220
1221 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1222 Register result = ToRegister(instr->result());
1223 Register array = ToRegister(instr->InputAt(0));
1224 __ lw(result, FieldMemOperand(array, JSArray::kLengthOffset));
1225 }
1226
1227
1228 void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
1229 Register result = ToRegister(instr->result());
1230 Register array = ToRegister(instr->InputAt(0));
1231 __ lw(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
1232 }
1233
1234
1235 void LCodeGen::DoElementsKind(LElementsKind* instr) {
1236 Register result = ToRegister(instr->result());
1237 Register input = ToRegister(instr->InputAt(0));
1238
1239 // Load map into |result|.
1240 __ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
1241 // Load the map's "bit field 2" into |result|. We only need the first byte,
1242 // but the following bit field extraction takes care of that anyway.
1243 __ lbu(result, FieldMemOperand(result, Map::kBitField2Offset));
1244 // Retrieve elements_kind from bit field 2.
1245 __ Ext(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
1246 }
1247
1248
1249 void LCodeGen::DoValueOf(LValueOf* instr) {
1250 Register input = ToRegister(instr->InputAt(0));
1251 Register result = ToRegister(instr->result());
1252 Register map = ToRegister(instr->TempAt(0));
1253 Label done;
1254
1255 // If the object is a smi return the object.
1256 __ Move(result, input);
1257 __ JumpIfSmi(input, &done);
1258
1259 // If the object is not a value type, return the object.
1260 __ GetObjectType(input, map, map);
1261 __ Branch(&done, ne, map, Operand(JS_VALUE_TYPE));
1262 __ lw(result, FieldMemOperand(input, JSValue::kValueOffset));
1263
1264 __ bind(&done);
1265 }
1266
1267
1268 void LCodeGen::DoBitNotI(LBitNotI* instr) {
1269 Register input = ToRegister(instr->InputAt(0));
1270 Register result = ToRegister(instr->result());
1271 __ Nor(result, zero_reg, Operand(input));
1272 }
1273
1274
1275 void LCodeGen::DoThrow(LThrow* instr) {
1276 Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
1277 __ push(input_reg);
1278 CallRuntime(Runtime::kThrow, 1, instr);
1279
1280 if (FLAG_debug_code) {
1281 __ stop("Unreachable code.");
1282 }
1283 }
1284
1285
1286 void LCodeGen::DoAddI(LAddI* instr) {
1287 LOperand* left = instr->InputAt(0);
1288 LOperand* right = instr->InputAt(1);
1289 LOperand* result = instr->result();
1290 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1291
1292 if (!can_overflow) {
1293 if (right->IsStackSlot() || right->IsArgument()) {
1294 Register right_reg = EmitLoadRegister(right, at);
1295 __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
1296 } else {
1297 ASSERT(right->IsRegister() || right->IsConstantOperand());
1298 __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
1299 }
1300 } else { // can_overflow.
1301 Register overflow = scratch0();
1302 Register scratch = scratch1();
1303 if (right->IsStackSlot() ||
1304 right->IsArgument() ||
1305 right->IsConstantOperand()) {
1306 Register right_reg = EmitLoadRegister(right, scratch);
1307 __ AdduAndCheckForOverflow(ToRegister(result),
1308 ToRegister(left),
1309 right_reg,
1310 overflow); // Reg at also used as scratch.
1311 } else {
1312 ASSERT(right->IsRegister());
1313 // Due to overflow check macros not supporting constant operands,
1314 // handling the IsConstantOperand case was moved to prev if clause.
1315 __ AdduAndCheckForOverflow(ToRegister(result),
1316 ToRegister(left),
1317 ToRegister(right),
1318 overflow); // Reg at also used as scratch.
1319 }
1320 DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1321 }
1322 }
1323
1324
1325 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1326 DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
1327 DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
1328 DoubleRegister result = ToDoubleRegister(instr->result());
1329 switch (instr->op()) {
1330 case Token::ADD:
1331 __ add_d(result, left, right);
1332 break;
1333 case Token::SUB:
1334 __ sub_d(result, left, right);
1335 break;
1336 case Token::MUL:
1337 __ mul_d(result, left, right);
1338 break;
1339 case Token::DIV:
1340 __ div_d(result, left, right);
1341 break;
1342 case Token::MOD: {
1343 // Save a0-a3 on the stack.
1344 RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1345 __ MultiPush(saved_regs);
1346
1347 __ PrepareCallCFunction(0, 2, scratch0());
1348 __ SetCallCDoubleArguments(left, right);
1349 __ CallCFunction(
1350 ExternalReference::double_fp_operation(Token::MOD, isolate()),
1351 0, 2);
1352 // Move the result in the double result register.
1353 __ GetCFunctionDoubleResult(result);
1354
1355 // Restore saved register.
1356 __ MultiPop(saved_regs);
1357 break;
1358 }
1359 default:
1360 UNREACHABLE();
1361 break;
1362 }
1363 }
1364
1365
1366 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1367 ASSERT(ToRegister(instr->InputAt(0)).is(a1));
1368 ASSERT(ToRegister(instr->InputAt(1)).is(a0));
1369 ASSERT(ToRegister(instr->result()).is(v0));
1370
1371 BinaryOpStub stub(instr->op(), NO_OVERWRITE);
1372 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1373 // Other arch use a nop here, to signal that there is no inlined
1374 // patchable code. Mips does not need the nop, since our marker
1375 // instruction (andi zero_reg) will never be used in normal code.
1376 }
1377
1378
1379 int LCodeGen::GetNextEmittedBlock(int block) {
1380 for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1381 LLabel* label = chunk_->GetLabel(i);
1382 if (!label->HasReplacement()) return i;
1383 }
1384 return -1;
1385 }
1386
1387
1388 void LCodeGen::EmitBranch(int left_block, int right_block,
1389 Condition cc, Register src1, const Operand& src2) {
1390 int next_block = GetNextEmittedBlock(current_block_);
1391 right_block = chunk_->LookupDestination(right_block);
1392 left_block = chunk_->LookupDestination(left_block);
1393 if (right_block == left_block) {
1394 EmitGoto(left_block);
1395 } else if (left_block == next_block) {
1396 __ Branch(chunk_->GetAssemblyLabel(right_block),
1397 NegateCondition(cc), src1, src2);
1398 } else if (right_block == next_block) {
1399 __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
1400 } else {
1401 __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
1402 __ Branch(chunk_->GetAssemblyLabel(right_block));
1403 }
1404 }
1405
1406
1407 void LCodeGen::EmitBranchF(int left_block, int right_block,
1408 Condition cc, FPURegister src1, FPURegister src2) {
1409 int next_block = GetNextEmittedBlock(current_block_);
1410 right_block = chunk_->LookupDestination(right_block);
1411 left_block = chunk_->LookupDestination(left_block);
1412 if (right_block == left_block) {
1413 EmitGoto(left_block);
1414 } else if (left_block == next_block) {
1415 __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
1416 NegateCondition(cc), src1, src2);
1417 } else if (right_block == next_block) {
1418 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
1419 } else {
1420 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
1421 __ Branch(chunk_->GetAssemblyLabel(right_block));
1422 }
1423 }
1424
1425
1426 void LCodeGen::DoBranch(LBranch* instr) {
1427 int true_block = chunk_->LookupDestination(instr->true_block_id());
1428 int false_block = chunk_->LookupDestination(instr->false_block_id());
1429
1430 Representation r = instr->hydrogen()->value()->representation();
1431 if (r.IsInteger32()) {
1432 Register reg = ToRegister(instr->InputAt(0));
1433 EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
1434 } else if (r.IsDouble()) {
1435 DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
1436 // Test the double value. Zero and NaN are false.
1437 EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero);
1438 } else {
1439 ASSERT(r.IsTagged());
1440 Register reg = ToRegister(instr->InputAt(0));
1441 HType type = instr->hydrogen()->value()->type();
1442 if (type.IsBoolean()) {
1443 __ LoadRoot(at, Heap::kTrueValueRootIndex);
1444 EmitBranch(true_block, false_block, eq, reg, Operand(at));
1445 } else if (type.IsSmi()) {
1446 EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
1447 } else {
1448 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1449 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1450
1451 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1452 // Avoid deopts in the case where we've never executed this path before.
1453 if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
1454
1455 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1456 // undefined -> false.
1457 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
1458 __ Branch(false_label, eq, reg, Operand(at));
1459 }
1460 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1461 // Boolean -> its value.
1462 __ LoadRoot(at, Heap::kTrueValueRootIndex);
1463 __ Branch(true_label, eq, reg, Operand(at));
1464 __ LoadRoot(at, Heap::kFalseValueRootIndex);
1465 __ Branch(false_label, eq, reg, Operand(at));
1466 }
1467 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1468 // 'null' -> false.
1469 __ LoadRoot(at, Heap::kNullValueRootIndex);
1470 __ Branch(false_label, eq, reg, Operand(at));
1471 }
1472
1473 if (expected.Contains(ToBooleanStub::SMI)) {
1474 // Smis: 0 -> false, all other -> true.
1475 __ Branch(false_label, eq, reg, Operand(zero_reg));
1476 __ JumpIfSmi(reg, true_label);
1477 } else if (expected.NeedsMap()) {
1478 // If we need a map later and have a Smi -> deopt.
1479 __ And(at, reg, Operand(kSmiTagMask));
1480 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
1481 }
1482
1483 const Register map = scratch0();
1484 if (expected.NeedsMap()) {
1485 __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
1486 if (expected.CanBeUndetectable()) {
1487 // Undetectable -> false.
1488 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
1489 __ And(at, at, Operand(1 << Map::kIsUndetectable));
1490 __ Branch(false_label, ne, at, Operand(zero_reg));
1491 }
1492 }
1493
1494 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1495 // spec object -> true.
1496 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1497 __ Branch(true_label, ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
1498 }
1499
1500 if (expected.Contains(ToBooleanStub::STRING)) {
1501 // String value -> false iff empty.
1502 Label not_string;
1503 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1504 __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
1505 __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
1506 __ Branch(true_label, ne, at, Operand(zero_reg));
1507 __ Branch(false_label);
1508 __ bind(&not_string);
1509 }
1510
1511 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1512 // heap number -> false iff +0, -0, or NaN.
1513 DoubleRegister dbl_scratch = double_scratch0();
1514 Label not_heap_number;
1515 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1516 __ Branch(&not_heap_number, ne, map, Operand(at));
1517 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
1518 __ BranchF(true_label, false_label, ne, dbl_scratch, kDoubleRegZero);
1519 // Falls through if dbl_scratch == 0.
1520 __ Branch(false_label);
1521 __ bind(&not_heap_number);
1522 }
1523
1524 // We've seen something for the first time -> deopt.
1525 DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
1526 }
1527 }
1528 }
1529
1530
1531 void LCodeGen::EmitGoto(int block) {
1532 block = chunk_->LookupDestination(block);
1533 int next_block = GetNextEmittedBlock(current_block_);
1534 if (block != next_block) {
1535 __ jmp(chunk_->GetAssemblyLabel(block));
1536 }
1537 }
1538
1539
1540 void LCodeGen::DoGoto(LGoto* instr) {
1541 EmitGoto(instr->block_id());
1542 }
1543
1544
1545 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1546 Condition cond = kNoCondition;
1547 switch (op) {
1548 case Token::EQ:
1549 case Token::EQ_STRICT:
1550 cond = eq;
1551 break;
1552 case Token::LT:
1553 cond = is_unsigned ? lo : lt;
1554 break;
1555 case Token::GT:
1556 cond = is_unsigned ? hi : gt;
1557 break;
1558 case Token::LTE:
1559 cond = is_unsigned ? ls : le;
1560 break;
1561 case Token::GTE:
1562 cond = is_unsigned ? hs : ge;
1563 break;
1564 case Token::IN:
1565 case Token::INSTANCEOF:
1566 default:
1567 UNREACHABLE();
1568 }
1569 return cond;
1570 }
1571
1572
1573 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1574 LOperand* left = instr->InputAt(0);
1575 LOperand* right = instr->InputAt(1);
1576 int false_block = chunk_->LookupDestination(instr->false_block_id());
1577 int true_block = chunk_->LookupDestination(instr->true_block_id());
1578
1579 Condition cond = TokenToCondition(instr->op(), false);
1580
1581 if (left->IsConstantOperand() && right->IsConstantOperand()) {
1582 // We can statically evaluate the comparison.
1583 double left_val = ToDouble(LConstantOperand::cast(left));
1584 double right_val = ToDouble(LConstantOperand::cast(right));
1585 int next_block =
1586 EvalComparison(instr->op(), left_val, right_val) ? true_block
1587 : false_block;
1588 EmitGoto(next_block);
1589 } else {
1590 if (instr->is_double()) {
1591 // Compare left and right as doubles and load the
1592 // resulting flags into the normal status register.
1593 FPURegister left_reg = ToDoubleRegister(left);
1594 FPURegister right_reg = ToDoubleRegister(right);
1595
1596 // If a NaN is involved, i.e. the result is unordered,
1597 // jump to false block label.
1598 __ BranchF(NULL, chunk_->GetAssemblyLabel(false_block), eq,
1599 left_reg, right_reg);
1600
1601 EmitBranchF(true_block, false_block, cond, left_reg, right_reg);
1602 } else {
1603 Register cmp_left;
1604 Operand cmp_right = Operand(0);
1605
1606 if (right->IsConstantOperand()) {
1607 cmp_left = ToRegister(left);
1608 cmp_right = Operand(ToInteger32(LConstantOperand::cast(right)));
1609 } else if (left->IsConstantOperand()) {
1610 cmp_left = ToRegister(right);
1611 cmp_right = Operand(ToInteger32(LConstantOperand::cast(left)));
1612 // We transposed the operands. Reverse the condition.
1613 cond = ReverseCondition(cond);
1614 } else {
1615 cmp_left = ToRegister(left);
1616 cmp_right = Operand(ToRegister(right));
1617 }
1618
1619 EmitBranch(true_block, false_block, cond, cmp_left, cmp_right);
1620 }
1621 }
1622 }
1623
1624
1625 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
1626 Register left = ToRegister(instr->InputAt(0));
1627 Register right = ToRegister(instr->InputAt(1));
1628 int false_block = chunk_->LookupDestination(instr->false_block_id());
1629 int true_block = chunk_->LookupDestination(instr->true_block_id());
1630
1631 EmitBranch(true_block, false_block, eq, left, Operand(right));
1632 }
1633
1634
1635 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
1636 Register left = ToRegister(instr->InputAt(0));
1637 int true_block = chunk_->LookupDestination(instr->true_block_id());
1638 int false_block = chunk_->LookupDestination(instr->false_block_id());
1639
1640 EmitBranch(true_block, false_block, eq, left,
1641 Operand(instr->hydrogen()->right()));
1642 }
1643
1644
1645
1646 void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
1647 Register scratch = scratch0();
1648 Register reg = ToRegister(instr->InputAt(0));
1649 int false_block = chunk_->LookupDestination(instr->false_block_id());
1650
1651 // If the expression is known to be untagged or a smi, then it's definitely
1652 // not null, and it can't be a an undetectable object.
1653 if (instr->hydrogen()->representation().IsSpecialization() ||
1654 instr->hydrogen()->type().IsSmi()) {
1655 EmitGoto(false_block);
1656 return;
1657 }
1658
1659 int true_block = chunk_->LookupDestination(instr->true_block_id());
1660
1661 Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
1662 Heap::kNullValueRootIndex :
1663 Heap::kUndefinedValueRootIndex;
1664 __ LoadRoot(at, nil_value);
1665 if (instr->kind() == kStrictEquality) {
1666 EmitBranch(true_block, false_block, eq, reg, Operand(at));
1667 } else {
1668 Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
1669 Heap::kUndefinedValueRootIndex :
1670 Heap::kNullValueRootIndex;
1671 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1672 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1673 __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
1674 __ LoadRoot(at, other_nil_value); // In the delay slot.
1675 __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
1676 __ JumpIfSmi(reg, false_label); // In the delay slot.
1677 // Check for undetectable objects by looking in the bit field in
1678 // the map. The object has already been smi checked.
1679 __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
1680 __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
1681 __ And(scratch, scratch, 1 << Map::kIsUndetectable);
1682 EmitBranch(true_block, false_block, ne, scratch, Operand(zero_reg));
1683 }
1684 }
1685
1686
1687 Condition LCodeGen::EmitIsObject(Register input,
1688 Register temp1,
1689 Label* is_not_object,
1690 Label* is_object) {
1691 Register temp2 = scratch0();
1692 __ JumpIfSmi(input, is_not_object);
1693
1694 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
1695 __ Branch(is_object, eq, input, Operand(temp2));
1696
1697 // Load map.
1698 __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
1699 // Undetectable objects behave like undefined.
1700 __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
1701 __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
1702 __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
1703
1704 // Load instance type and check that it is in object type range.
1705 __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
1706 __ Branch(is_not_object,
1707 lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1708
1709 return le;
1710 }
1711
1712
1713 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
1714 Register reg = ToRegister(instr->InputAt(0));
1715 Register temp1 = ToRegister(instr->TempAt(0));
1716 Register temp2 = scratch0();
1717
1718 int true_block = chunk_->LookupDestination(instr->true_block_id());
1719 int false_block = chunk_->LookupDestination(instr->false_block_id());
1720 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1721 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1722
1723 Condition true_cond =
1724 EmitIsObject(reg, temp1, false_label, true_label);
1725
1726 EmitBranch(true_block, false_block, true_cond, temp2,
1727 Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
1728 }
1729
1730
1731 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
1732 int true_block = chunk_->LookupDestination(instr->true_block_id());
1733 int false_block = chunk_->LookupDestination(instr->false_block_id());
1734
1735 Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
1736 __ And(at, input_reg, kSmiTagMask);
1737 EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
1738 }
1739
1740
1741 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
1742 Register input = ToRegister(instr->InputAt(0));
1743 Register temp = ToRegister(instr->TempAt(0));
1744
1745 int true_block = chunk_->LookupDestination(instr->true_block_id());
1746 int false_block = chunk_->LookupDestination(instr->false_block_id());
1747
1748 __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
1749 __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
1750 __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
1751 __ And(at, temp, Operand(1 << Map::kIsUndetectable));
1752 EmitBranch(true_block, false_block, ne, at, Operand(zero_reg));
1753 }
1754
1755
1756 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
1757 InstanceType from = instr->from();
1758 InstanceType to = instr->to();
1759 if (from == FIRST_TYPE) return to;
1760 ASSERT(from == to || to == LAST_TYPE);
1761 return from;
1762 }
1763
1764
1765 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
1766 InstanceType from = instr->from();
1767 InstanceType to = instr->to();
1768 if (from == to) return eq;
1769 if (to == LAST_TYPE) return hs;
1770 if (from == FIRST_TYPE) return ls;
1771 UNREACHABLE();
1772 return eq;
1773 }
1774
1775
1776 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
1777 Register scratch = scratch0();
1778 Register input = ToRegister(instr->InputAt(0));
1779
1780 int true_block = chunk_->LookupDestination(instr->true_block_id());
1781 int false_block = chunk_->LookupDestination(instr->false_block_id());
1782
1783 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1784
1785 __ JumpIfSmi(input, false_label);
1786
1787 __ GetObjectType(input, scratch, scratch);
1788 EmitBranch(true_block,
1789 false_block,
1790 BranchCondition(instr->hydrogen()),
1791 scratch,
1792 Operand(TestType(instr->hydrogen())));
1793 }
1794
1795
1796 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
1797 Register input = ToRegister(instr->InputAt(0));
1798 Register result = ToRegister(instr->result());
1799
1800 if (FLAG_debug_code) {
1801 __ AbortIfNotString(input);
1802 }
1803
1804 __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
1805 __ IndexFromHash(result, result);
1806 }
1807
1808
1809 void LCodeGen::DoHasCachedArrayIndexAndBranch(
1810 LHasCachedArrayIndexAndBranch* instr) {
1811 Register input = ToRegister(instr->InputAt(0));
1812 Register scratch = scratch0();
1813
1814 int true_block = chunk_->LookupDestination(instr->true_block_id());
1815 int false_block = chunk_->LookupDestination(instr->false_block_id());
1816
1817 __ lw(scratch,
1818 FieldMemOperand(input, String::kHashFieldOffset));
1819 __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
1820 EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
1821 }
1822
1823
1824 // Branches to a label or falls through with this instance class-name adr
1825 // returned in temp reg, available for comparison by the caller. Trashes the
1826 // temp registers, but not the input. Only input and temp2 may alias.
1827 void LCodeGen::EmitClassOfTest(Label* is_true,
1828 Label* is_false,
1829 Handle<String>class_name,
1830 Register input,
1831 Register temp,
1832 Register temp2) {
1833 ASSERT(!input.is(temp));
1834 ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
1835 __ JumpIfSmi(input, is_false);
1836
1837 if (class_name->IsEqualTo(CStrVector("Function"))) {
1838 // Assuming the following assertions, we can use the same compares to test
1839 // for both being a function type and being in the object type range.
1840 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
1841 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
1842 FIRST_SPEC_OBJECT_TYPE + 1);
1843 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
1844 LAST_SPEC_OBJECT_TYPE - 1);
1845 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
1846
1847 __ GetObjectType(input, temp, temp2);
1848 __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
1849 __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
1850 __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
1851 } else {
1852 // Faster code path to avoid two compares: subtract lower bound from the
1853 // actual type and do a signed compare with the width of the type range.
1854 __ GetObjectType(input, temp, temp2);
1855 __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1856 __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
1857 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1858 }
1859
1860 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
1861 // Check if the constructor in the map is a function.
1862 __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset));
1863
1864 // Objects with a non-function constructor have class 'Object'.
1865 __ GetObjectType(temp, temp2, temp2);
1866 if (class_name->IsEqualTo(CStrVector("Object"))) {
1867 __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
1868 } else {
1869 __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
1870 }
1871
1872 // temp now contains the constructor function. Grab the
1873 // instance class name from there.
1874 __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
1875 __ lw(temp, FieldMemOperand(temp,
1876 SharedFunctionInfo::kInstanceClassNameOffset));
1877 // The class name we are testing against is a symbol because it's a literal.
1878 // The name in the constructor is a symbol because of the way the context is
1879 // booted. This routine isn't expected to work for random API-created
1880 // classes and it doesn't have to because you can't access it with natives
1881 // syntax. Since both sides are symbols it is sufficient to use an identity
1882 // comparison.
1883
1884 // End with the address of this class_name instance in temp register.
1885 // On MIPS, the caller must do the comparison with Handle<String>class_name.
1886 }
1887
1888
1889 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
1890 Register input = ToRegister(instr->InputAt(0));
1891 Register temp = scratch0();
1892 Register temp2 = ToRegister(instr->TempAt(0));
1893 Handle<String> class_name = instr->hydrogen()->class_name();
1894
1895 int true_block = chunk_->LookupDestination(instr->true_block_id());
1896 int false_block = chunk_->LookupDestination(instr->false_block_id());
1897
1898 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1899 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1900
1901 EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
1902
1903 EmitBranch(true_block, false_block, eq, temp, Operand(class_name));
1904 }
1905
1906
1907 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
1908 Register reg = ToRegister(instr->InputAt(0));
1909 Register temp = ToRegister(instr->TempAt(0));
1910 int true_block = instr->true_block_id();
1911 int false_block = instr->false_block_id();
1912
1913 __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
1914 EmitBranch(true_block, false_block, eq, temp, Operand(instr->map()));
1915 }
1916
1917
1918 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
1919 Label true_label, done;
1920 ASSERT(ToRegister(instr->InputAt(0)).is(a0)); // Object is in a0.
1921 ASSERT(ToRegister(instr->InputAt(1)).is(a1)); // Function is in a1.
1922 Register result = ToRegister(instr->result());
1923 ASSERT(result.is(v0));
1924
1925 InstanceofStub stub(InstanceofStub::kArgsInRegisters);
1926 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1927
1928 __ Branch(&true_label, eq, result, Operand(zero_reg));
1929 __ li(result, Operand(factory()->false_value()));
1930 __ Branch(&done);
1931 __ bind(&true_label);
1932 __ li(result, Operand(factory()->true_value()));
1933 __ bind(&done);
1934 }
1935
1936
1937 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
1938 class DeferredInstanceOfKnownGlobal: public LDeferredCode {
1939 public:
1940 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
1941 LInstanceOfKnownGlobal* instr)
1942 : LDeferredCode(codegen), instr_(instr) { }
1943 virtual void Generate() {
1944 codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
1945 }
1946 virtual LInstruction* instr() { return instr_; }
1947 Label* map_check() { return &map_check_; }
1948
1949 private:
1950 LInstanceOfKnownGlobal* instr_;
1951 Label map_check_;
1952 };
1953
1954 DeferredInstanceOfKnownGlobal* deferred;
1955 deferred = new DeferredInstanceOfKnownGlobal(this, instr);
1956
1957 Label done, false_result;
1958 Register object = ToRegister(instr->InputAt(0));
1959 Register temp = ToRegister(instr->TempAt(0));
1960 Register result = ToRegister(instr->result());
1961
1962 ASSERT(object.is(a0));
1963 ASSERT(result.is(v0));
1964
1965 // A Smi is not instance of anything.
1966 __ JumpIfSmi(object, &false_result);
1967
1968 // This is the inlined call site instanceof cache. The two occurences of the
1969 // hole value will be patched to the last map/result pair generated by the
1970 // instanceof stub.
1971 Label cache_miss;
1972 Register map = temp;
1973 __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
1974
1975 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
1976 __ bind(deferred->map_check()); // Label for calculating code patching.
1977 // We use Factory::the_hole_value() on purpose instead of loading from the
1978 // root array to force relocation to be able to later patch with
1979 // the cached map.
1980 __ li(at, Operand(factory()->the_hole_value()), true);
1981 __ Branch(&cache_miss, ne, map, Operand(at));
1982 // We use Factory::the_hole_value() on purpose instead of loading from the
1983 // root array to force relocation to be able to later patch
1984 // with true or false.
1985 __ li(result, Operand(factory()->the_hole_value()), true);
1986 __ Branch(&done);
1987
1988 // The inlined call site cache did not match. Check null and string before
1989 // calling the deferred code.
1990 __ bind(&cache_miss);
1991 // Null is not instance of anything.
1992 __ LoadRoot(temp, Heap::kNullValueRootIndex);
1993 __ Branch(&false_result, eq, object, Operand(temp));
1994
1995 // String values is not instance of anything.
1996 Condition cc = __ IsObjectStringType(object, temp, temp);
1997 __ Branch(&false_result, cc, temp, Operand(zero_reg));
1998
1999 // Go to the deferred code.
2000 __ Branch(deferred->entry());
2001
2002 __ bind(&false_result);
2003 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2004
2005 // Here result has either true or false. Deferred code also produces true or
2006 // false object.
2007 __ bind(deferred->exit());
2008 __ bind(&done);
2009 }
2010
2011
2012 void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2013 Label* map_check) {
2014 Register result = ToRegister(instr->result());
2015 ASSERT(result.is(v0));
2016
2017 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2018 flags = static_cast<InstanceofStub::Flags>(
2019 flags | InstanceofStub::kArgsInRegisters);
2020 flags = static_cast<InstanceofStub::Flags>(
2021 flags | InstanceofStub::kCallSiteInlineCheck);
2022 flags = static_cast<InstanceofStub::Flags>(
2023 flags | InstanceofStub::kReturnTrueFalseObject);
2024 InstanceofStub stub(flags);
2025
2026 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2027
2028 // Get the temp register reserved by the instruction. This needs to be t0 as
2029 // its slot of the pushing of safepoint registers is used to communicate the
2030 // offset to the location of the map check.
2031 Register temp = ToRegister(instr->TempAt(0));
2032 ASSERT(temp.is(t0));
2033 __ li(InstanceofStub::right(), Operand(instr->function()));
2034 static const int kAdditionalDelta = 7;
2035 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2036 Label before_push_delta;
2037 __ bind(&before_push_delta);
2038 {
2039 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2040 __ li(temp, Operand(delta * kPointerSize), true);
2041 __ StoreToSafepointRegisterSlot(temp, temp);
2042 }
2043 CallCodeGeneric(stub.GetCode(),
2044 RelocInfo::CODE_TARGET,
2045 instr,
2046 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2047 // Put the result value into the result register slot and
2048 // restore all registers.
2049 __ StoreToSafepointRegisterSlot(result, result);
2050 }
2051
2052
2053 static Condition ComputeCompareCondition(Token::Value op) {
2054 switch (op) {
2055 case Token::EQ_STRICT:
2056 case Token::EQ:
2057 return eq;
2058 case Token::LT:
2059 return lt;
2060 case Token::GT:
2061 return gt;
2062 case Token::LTE:
2063 return le;
2064 case Token::GTE:
2065 return ge;
2066 default:
2067 UNREACHABLE();
2068 return kNoCondition;
2069 }
2070 }
2071
2072
2073 void LCodeGen::DoCmpT(LCmpT* instr) {
2074 Token::Value op = instr->op();
2075
2076 Handle<Code> ic = CompareIC::GetUninitialized(op);
2077 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2078 // On MIPS there is no need for a "no inlined smi code" marker (nop).
2079
2080 Condition condition = ComputeCompareCondition(op);
2081 // A minor optimization that relies on LoadRoot always emitting one
2082 // instruction.
2083 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2084 Label done;
2085 __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2086 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2087 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2088 ASSERT_EQ(3, masm()->InstructionsGeneratedSince(&done));
2089 __ bind(&done);
2090 }
2091
2092
2093 void LCodeGen::DoReturn(LReturn* instr) {
2094 if (FLAG_trace) {
2095 // Push the return value on the stack as the parameter.
2096 // Runtime::TraceExit returns its parameter in v0.
2097 __ push(v0);
2098 __ CallRuntime(Runtime::kTraceExit, 1);
2099 }
2100 int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
2101 __ mov(sp, fp);
2102 __ Pop(ra, fp);
2103 __ Addu(sp, sp, Operand(sp_delta));
2104 __ Jump(ra);
2105 }
2106
2107
2108 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2109 Register result = ToRegister(instr->result());
2110 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell())));
2111 __ lw(result, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
2112 if (instr->hydrogen()->RequiresHoleCheck()) {
2113 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2114 DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2115 }
2116 }
2117
2118
2119 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2120 ASSERT(ToRegister(instr->global_object()).is(a0));
2121 ASSERT(ToRegister(instr->result()).is(v0));
2122
2123 __ li(a2, Operand(instr->name()));
2124 RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
2125 : RelocInfo::CODE_TARGET_CONTEXT;
2126 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2127 CallCode(ic, mode, instr);
2128 }
2129
2130
2131 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2132 Register value = ToRegister(instr->InputAt(0));
2133 Register scratch = scratch0();
2134 Register scratch2 = ToRegister(instr->TempAt(0));
2135
2136 // Load the cell.
2137 __ li(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
2138
2139 // If the cell we are storing to contains the hole it could have
2140 // been deleted from the property dictionary. In that case, we need
2141 // to update the property details in the property dictionary to mark
2142 // it as no longer deleted.
2143 if (instr->hydrogen()->RequiresHoleCheck()) {
2144 __ lw(scratch2,
2145 FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
2146 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2147 DeoptimizeIf(eq, instr->environment(), scratch2, Operand(at));
2148 }
2149
2150 // Store the value.
2151 __ sw(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
2152
2153 // Cells are always in the remembered set.
2154 if (instr->hydrogen()->NeedsWriteBarrier()) {
2155 HType type = instr->hydrogen()->value()->type();
2156 SmiCheck check_needed =
2157 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2158 __ RecordWriteField(scratch,
2159 JSGlobalPropertyCell::kValueOffset,
2160 value,
2161 scratch2,
2162 kRAHasBeenSaved,
2163 kSaveFPRegs,
2164 OMIT_REMEMBERED_SET,
2165 check_needed);
2166 }
2167 }
2168
2169
2170 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2171 ASSERT(ToRegister(instr->global_object()).is(a1));
2172 ASSERT(ToRegister(instr->value()).is(a0));
2173
2174 __ li(a2, Operand(instr->name()));
2175 Handle<Code> ic = instr->strict_mode()
2176 ? isolate()->builtins()->StoreIC_Initialize_Strict()
2177 : isolate()->builtins()->StoreIC_Initialize();
2178 CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2179 }
2180
2181
2182 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2183 Register context = ToRegister(instr->context());
2184 Register result = ToRegister(instr->result());
2185 __ lw(result, ContextOperand(context, instr->slot_index()));
2186 }
2187
2188
2189 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2190 Register context = ToRegister(instr->context());
2191 Register value = ToRegister(instr->value());
2192 MemOperand target = ContextOperand(context, instr->slot_index());
2193 __ sw(value, target);
2194 if (instr->hydrogen()->NeedsWriteBarrier()) {
2195 HType type = instr->hydrogen()->value()->type();
2196 SmiCheck check_needed =
2197 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2198 __ RecordWriteContextSlot(context,
2199 target.offset(),
2200 value,
2201 scratch0(),
2202 kRAHasBeenSaved,
2203 kSaveFPRegs,
2204 EMIT_REMEMBERED_SET,
2205 check_needed);
2206 }
2207 }
2208
2209
2210 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2211 Register object = ToRegister(instr->InputAt(0));
2212 Register result = ToRegister(instr->result());
2213 if (instr->hydrogen()->is_in_object()) {
2214 __ lw(result, FieldMemOperand(object, instr->hydrogen()->offset()));
2215 } else {
2216 __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2217 __ lw(result, FieldMemOperand(result, instr->hydrogen()->offset()));
2218 }
2219 }
2220
2221
2222 void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2223 Register object,
2224 Handle<Map> type,
2225 Handle<String> name) {
2226 LookupResult lookup(isolate());
2227 type->LookupInDescriptors(NULL, *name, &lookup);
2228 ASSERT(lookup.IsProperty() &&
2229 (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
2230 if (lookup.type() == FIELD) {
2231 int index = lookup.GetLocalFieldIndexFromMap(*type);
2232 int offset = index * kPointerSize;
2233 if (index < 0) {
2234 // Negative property indices are in-object properties, indexed
2235 // from the end of the fixed part of the object.
2236 __ lw(result, FieldMemOperand(object, offset + type->instance_size()));
2237 } else {
2238 // Non-negative property indices are in the properties array.
2239 __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2240 __ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
2241 }
2242 } else {
2243 Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
2244 LoadHeapObject(result, Handle<HeapObject>::cast(function));
2245 }
2246 }
2247
2248
2249 void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2250 Register object = ToRegister(instr->object());
2251 Register result = ToRegister(instr->result());
2252 Register scratch = scratch0();
2253 int map_count = instr->hydrogen()->types()->length();
2254 Handle<String> name = instr->hydrogen()->name();
2255 if (map_count == 0) {
2256 ASSERT(instr->hydrogen()->need_generic());
2257 __ li(a2, Operand(name));
2258 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2259 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2260 } else {
2261 Label done;
2262 __ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2263 for (int i = 0; i < map_count - 1; ++i) {
2264 Handle<Map> map = instr->hydrogen()->types()->at(i);
2265 Label next;
2266 __ Branch(&next, ne, scratch, Operand(map));
2267 EmitLoadFieldOrConstantFunction(result, object, map, name);
2268 __ Branch(&done);
2269 __ bind(&next);
2270 }
2271 Handle<Map> map = instr->hydrogen()->types()->last();
2272 if (instr->hydrogen()->need_generic()) {
2273 Label generic;
2274 __ Branch(&generic, ne, scratch, Operand(map));
2275 EmitLoadFieldOrConstantFunction(result, object, map, name);
2276 __ Branch(&done);
2277 __ bind(&generic);
2278 __ li(a2, Operand(name));
2279 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2280 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2281 } else {
2282 DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
2283 EmitLoadFieldOrConstantFunction(result, object, map, name);
2284 }
2285 __ bind(&done);
2286 }
2287 }
2288
2289
2290 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2291 ASSERT(ToRegister(instr->object()).is(a0));
2292 ASSERT(ToRegister(instr->result()).is(v0));
2293
2294 // Name is always in a2.
2295 __ li(a2, Operand(instr->name()));
2296 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2297 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2298 }
2299
2300
2301 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2302 Register scratch = scratch0();
2303 Register function = ToRegister(instr->function());
2304 Register result = ToRegister(instr->result());
2305
2306 // Check that the function really is a function. Load map into the
2307 // result register.
2308 __ GetObjectType(function, result, scratch);
2309 DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE));
2310
2311 // Make sure that the function has an instance prototype.
2312 Label non_instance;
2313 __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2314 __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
2315 __ Branch(&non_instance, ne, scratch, Operand(zero_reg));
2316
2317 // Get the prototype or initial map from the function.
2318 __ lw(result,
2319 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2320
2321 // Check that the function has a prototype or an initial map.
2322 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2323 DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2324
2325 // If the function does not have an initial map, we're done.
2326 Label done;
2327 __ GetObjectType(result, scratch, scratch);
2328 __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
2329
2330 // Get the prototype from the initial map.
2331 __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
2332 __ Branch(&done);
2333
2334 // Non-instance prototype: Fetch prototype from constructor field
2335 // in initial map.
2336 __ bind(&non_instance);
2337 __ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
2338
2339 // All done.
2340 __ bind(&done);
2341 }
2342
2343
2344 void LCodeGen::DoLoadElements(LLoadElements* instr) {
2345 Register result = ToRegister(instr->result());
2346 Register input = ToRegister(instr->InputAt(0));
2347 Register scratch = scratch0();
2348
2349 __ lw(result, FieldMemOperand(input, JSObject::kElementsOffset));
2350 if (FLAG_debug_code) {
2351 Label done, fail;
2352 __ lw(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
2353 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
2354 __ Branch(USE_DELAY_SLOT, &done, eq, scratch, Operand(at));
2355 __ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex); // In the delay slot.
2356 __ Branch(&done, eq, scratch, Operand(at));
2357 // |scratch| still contains |input|'s map.
2358 __ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
2359 __ Ext(scratch, scratch, Map::kElementsKindShift,
2360 Map::kElementsKindBitCount);
2361 __ Branch(&done, eq, scratch,
2362 Operand(FAST_ELEMENTS));
2363 __ Branch(&fail, lt, scratch,
2364 Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2365 __ Branch(&done, le, scratch,
2366 Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2367 __ bind(&fail);
2368 __ Abort("Check for fast or external elements failed.");
2369 __ bind(&done);
2370 }
2371 }
2372
2373
2374 void LCodeGen::DoLoadExternalArrayPointer(
2375 LLoadExternalArrayPointer* instr) {
2376 Register to_reg = ToRegister(instr->result());
2377 Register from_reg = ToRegister(instr->InputAt(0));
2378 __ lw(to_reg, FieldMemOperand(from_reg,
2379 ExternalArray::kExternalPointerOffset));
2380 }
2381
2382
2383 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2384 Register arguments = ToRegister(instr->arguments());
2385 Register length = ToRegister(instr->length());
2386 Register index = ToRegister(instr->index());
2387 Register result = ToRegister(instr->result());
2388
2389 // Bailout index is not a valid argument index. Use unsigned check to get
2390 // negative check for free.
2391
2392 // TODO(plind): Shoud be optimized to do the sub before the DeoptimizeIf(),
2393 // as they do in Arm. It will save us an instruction.
2394 DeoptimizeIf(ls, instr->environment(), length, Operand(index));
2395
2396 // There are two words between the frame pointer and the last argument.
2397 // Subtracting from length accounts for one of them, add one more.
2398 __ subu(length, length, index);
2399 __ Addu(length, length, Operand(1));
2400 __ sll(length, length, kPointerSizeLog2);
2401 __ Addu(at, arguments, Operand(length));
2402 __ lw(result, MemOperand(at, 0));
2403 }
2404
2405
2406 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2407 Register elements = ToRegister(instr->elements());
2408 Register key = EmitLoadRegister(instr->key(), scratch0());
2409 Register result = ToRegister(instr->result());
2410 Register scratch = scratch0();
2411
2412 // Load the result.
2413 __ sll(scratch, key, kPointerSizeLog2); // Key indexes words.
2414 __ addu(scratch, elements, scratch);
2415 __ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
2416
2417 // Check for the hole value.
2418 if (instr->hydrogen()->RequiresHoleCheck()) {
2419 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2420 DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
2421 }
2422 }
2423
2424
2425 void LCodeGen::DoLoadKeyedFastDoubleElement(
2426 LLoadKeyedFastDoubleElement* instr) {
2427 Register elements = ToRegister(instr->elements());
2428 bool key_is_constant = instr->key()->IsConstantOperand();
2429 Register key = no_reg;
2430 DoubleRegister result = ToDoubleRegister(instr->result());
2431 Register scratch = scratch0();
2432
2433 int shift_size =
2434 ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
2435 int constant_key = 0;
2436 if (key_is_constant) {
2437 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2438 if (constant_key & 0xF0000000) {
2439 Abort("array index constant value too big.");
2440 }
2441 } else {
2442 key = ToRegister(instr->key());
2443 }
2444
2445 if (key_is_constant) {
2446 __ Addu(elements, elements, Operand(constant_key * (1 << shift_size) +
2447 FixedDoubleArray::kHeaderSize - kHeapObjectTag));
2448 } else {
2449 __ sll(scratch, key, shift_size);
2450 __ Addu(elements, elements, Operand(scratch));
2451 __ Addu(elements, elements,
2452 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
2453 }
2454
2455 __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
2456 DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
2457
2458 __ ldc1(result, MemOperand(elements));
2459 }
2460
2461
2462 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2463 LLoadKeyedSpecializedArrayElement* instr) {
2464 Register external_pointer = ToRegister(instr->external_pointer());
2465 Register key = no_reg;
2466 ElementsKind elements_kind = instr->elements_kind();
2467 bool key_is_constant = instr->key()->IsConstantOperand();
2468 int constant_key = 0;
2469 if (key_is_constant) {
2470 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2471 if (constant_key & 0xF0000000) {
2472 Abort("array index constant value too big.");
2473 }
2474 } else {
2475 key = ToRegister(instr->key());
2476 }
2477 int shift_size = ElementsKindToShiftSize(elements_kind);
2478
2479 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
2480 elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
2481 FPURegister result = ToDoubleRegister(instr->result());
2482 if (key_is_constant) {
2483 __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
2484 } else {
2485 __ sll(scratch0(), key, shift_size);
2486 __ Addu(scratch0(), scratch0(), external_pointer);
2487 }
2488
2489 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
2490 __ lwc1(result, MemOperand(scratch0()));
2491 __ cvt_d_s(result, result);
2492 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2493 __ ldc1(result, MemOperand(scratch0()));
2494 }
2495 } else {
2496 Register result = ToRegister(instr->result());
2497 Register scratch = scratch0();
2498 MemOperand mem_operand(zero_reg);
2499 if (key_is_constant) {
2500 mem_operand = MemOperand(external_pointer,
2501 constant_key * (1 << shift_size));
2502 } else {
2503 __ sll(scratch, key, shift_size);
2504 __ Addu(scratch, scratch, external_pointer);
2505 mem_operand = MemOperand(scratch);
2506 }
2507 switch (elements_kind) {
2508 case EXTERNAL_BYTE_ELEMENTS:
2509 __ lb(result, mem_operand);
2510 break;
2511 case EXTERNAL_PIXEL_ELEMENTS:
2512 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
2513 __ lbu(result, mem_operand);
2514 break;
2515 case EXTERNAL_SHORT_ELEMENTS:
2516 __ lh(result, mem_operand);
2517 break;
2518 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
2519 __ lhu(result, mem_operand);
2520 break;
2521 case EXTERNAL_INT_ELEMENTS:
2522 __ lw(result, mem_operand);
2523 break;
2524 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
2525 __ lw(result, mem_operand);
2526 // TODO(danno): we could be more clever here, perhaps having a special
2527 // version of the stub that detects if the overflow case actually
2528 // happens, and generate code that returns a double rather than int.
2529 DeoptimizeIf(Ugreater_equal, instr->environment(),
2530 result, Operand(0x80000000));
2531 break;
2532 case EXTERNAL_FLOAT_ELEMENTS:
2533 case EXTERNAL_DOUBLE_ELEMENTS:
2534 case FAST_DOUBLE_ELEMENTS:
2535 case FAST_ELEMENTS:
2536 case FAST_SMI_ONLY_ELEMENTS:
2537 case DICTIONARY_ELEMENTS:
2538 case NON_STRICT_ARGUMENTS_ELEMENTS:
2539 UNREACHABLE();
2540 break;
2541 }
2542 }
2543 }
2544
2545
2546 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2547 ASSERT(ToRegister(instr->object()).is(a1));
2548 ASSERT(ToRegister(instr->key()).is(a0));
2549
2550 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
2551 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2552 }
2553
2554
2555 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2556 Register scratch = scratch0();
2557 Register temp = scratch1();
2558 Register result = ToRegister(instr->result());
2559
2560 // Check if the calling frame is an arguments adaptor frame.
2561 Label done, adapted;
2562 __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2563 __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
2564 __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2565
2566 // Result is the frame pointer for the frame if not adapted and for the real
2567 // frame below the adaptor frame if adapted.
2568 __ movn(result, fp, temp); // move only if temp is not equal to zero (ne)
2569 __ movz(result, scratch, temp); // move only if temp is equal to zero (eq)
2570 }
2571
2572
2573 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2574 Register elem = ToRegister(instr->InputAt(0));
2575 Register result = ToRegister(instr->result());
2576
2577 Label done;
2578
2579 // If no arguments adaptor frame the number of arguments is fixed.
2580 __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
2581 __ Branch(&done, eq, fp, Operand(elem));
2582
2583 // Arguments adaptor frame present. Get argument length from there.
2584 __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2585 __ lw(result,
2586 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
2587 __ SmiUntag(result);
2588
2589 // Argument length is in result register.
2590 __ bind(&done);
2591 }
2592
2593
2594 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2595 Register receiver = ToRegister(instr->receiver());
2596 Register function = ToRegister(instr->function());
2597 Register length = ToRegister(instr->length());
2598 Register elements = ToRegister(instr->elements());
2599 Register scratch = scratch0();
2600 ASSERT(receiver.is(a0)); // Used for parameter count.
2601 ASSERT(function.is(a1)); // Required by InvokeFunction.
2602 ASSERT(ToRegister(instr->result()).is(v0));
2603
2604 // If the receiver is null or undefined, we have to pass the global
2605 // object as a receiver to normal functions. Values have to be
2606 // passed unchanged to builtins and strict-mode functions.
2607 Label global_object, receiver_ok;
2608
2609 // Do not transform the receiver to object for strict mode
2610 // functions.
2611 __ lw(scratch,
2612 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2613 __ lw(scratch,
2614 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2615
2616 // Do not transform the receiver to object for builtins.
2617 int32_t strict_mode_function_mask =
2618 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
2619 int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
2620 __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
2621 __ Branch(&receiver_ok, ne, scratch, Operand(zero_reg));
2622
2623 // Normal function. Replace undefined or null with global receiver.
2624 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
2625 __ Branch(&global_object, eq, receiver, Operand(scratch));
2626 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
2627 __ Branch(&global_object, eq, receiver, Operand(scratch));
2628
2629 // Deoptimize if the receiver is not a JS object.
2630 __ And(scratch, receiver, Operand(kSmiTagMask));
2631 DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
2632
2633 __ GetObjectType(receiver, scratch, scratch);
2634 DeoptimizeIf(lt, instr->environment(),
2635 scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
2636 __ Branch(&receiver_ok);
2637
2638 __ bind(&global_object);
2639 __ lw(receiver, GlobalObjectOperand());
2640 __ lw(receiver,
2641 FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
2642 __ bind(&receiver_ok);
2643
2644 // Copy the arguments to this function possibly from the
2645 // adaptor frame below it.
2646 const uint32_t kArgumentsLimit = 1 * KB;
2647 DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
2648
2649 // Push the receiver and use the register to keep the original
2650 // number of arguments.
2651 __ push(receiver);
2652 __ Move(receiver, length);
2653 // The arguments are at a one pointer size offset from elements.
2654 __ Addu(elements, elements, Operand(1 * kPointerSize));
2655
2656 // Loop through the arguments pushing them onto the execution
2657 // stack.
2658 Label invoke, loop;
2659 // length is a small non-negative integer, due to the test above.
2660 __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
2661 __ sll(scratch, length, 2);
2662 __ bind(&loop);
2663 __ Addu(scratch, elements, scratch);
2664 __ lw(scratch, MemOperand(scratch));
2665 __ push(scratch);
2666 __ Subu(length, length, Operand(1));
2667 __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
2668 __ sll(scratch, length, 2);
2669
2670 __ bind(&invoke);
2671 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
2672 LPointerMap* pointers = instr->pointer_map();
2673 LEnvironment* env = instr->deoptimization_environment();
2674 RecordPosition(pointers->position());
2675 RegisterEnvironmentForDeoptimization(env);
2676 SafepointGenerator safepoint_generator(this,
2677 pointers,
2678 env->deoptimization_index());
2679 // The number of arguments is stored in receiver which is a0, as expected
2680 // by InvokeFunction.
2681 v8::internal::ParameterCount actual(receiver);
2682 __ InvokeFunction(function, actual, CALL_FUNCTION,
2683 safepoint_generator, CALL_AS_METHOD);
2684 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2685 }
2686
2687
2688 void LCodeGen::DoPushArgument(LPushArgument* instr) {
2689 LOperand* argument = instr->InputAt(0);
2690 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
2691 Abort("DoPushArgument not implemented for double type.");
2692 } else {
2693 Register argument_reg = EmitLoadRegister(argument, at);
2694 __ push(argument_reg);
2695 }
2696 }
2697
2698
2699 void LCodeGen::DoThisFunction(LThisFunction* instr) {
2700 Register result = ToRegister(instr->result());
2701 LoadHeapObject(result, instr->hydrogen()->closure());
2702 }
2703
2704
2705 void LCodeGen::DoContext(LContext* instr) {
2706 Register result = ToRegister(instr->result());
2707 __ mov(result, cp);
2708 }
2709
2710
2711 void LCodeGen::DoOuterContext(LOuterContext* instr) {
2712 Register context = ToRegister(instr->context());
2713 Register result = ToRegister(instr->result());
2714 __ lw(result,
2715 MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2716 }
2717
2718
2719 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
2720 Register context = ToRegister(instr->context());
2721 Register result = ToRegister(instr->result());
2722 __ lw(result, ContextOperand(cp, Context::GLOBAL_INDEX));
2723 }
2724
2725
2726 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
2727 Register global = ToRegister(instr->global());
2728 Register result = ToRegister(instr->result());
2729 __ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
2730 }
2731
2732
2733 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2734 int arity,
2735 LInstruction* instr,
2736 CallKind call_kind) {
2737 // Change context if needed.
2738 bool change_context =
2739 (info()->closure()->context() != function->context()) ||
2740 scope()->contains_with() ||
2741 (scope()->num_heap_slots() > 0);
2742 if (change_context) {
2743 __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
2744 }
2745
2746 // Set a0 to arguments count if adaption is not needed. Assumes that a0
2747 // is available to write to at this point.
2748 if (!function->NeedsArgumentsAdaption()) {
2749 __ li(a0, Operand(arity));
2750 }
2751
2752 LPointerMap* pointers = instr->pointer_map();
2753 RecordPosition(pointers->position());
2754
2755 // Invoke function.
2756 __ SetCallKind(t1, call_kind);
2757 __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
2758 __ Call(at);
2759
2760 // Setup deoptimization.
2761 RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
2762
2763 // Restore context.
2764 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2765 }
2766
2767
2768 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
2769 ASSERT(ToRegister(instr->result()).is(v0));
2770 __ mov(a0, v0);
2771 __ li(a1, Operand(instr->function()));
2772 CallKnownFunction(instr->function(), instr->arity(), instr, CALL_AS_METHOD);
2773 }
2774
2775
2776 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
2777 Register input = ToRegister(instr->InputAt(0));
2778 Register result = ToRegister(instr->result());
2779 Register scratch = scratch0();
2780
2781 // Deoptimize if not a heap number.
2782 __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2783 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2784 DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
2785
2786 Label done;
2787 Register exponent = scratch0();
2788 scratch = no_reg;
2789 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
2790 // Check the sign of the argument. If the argument is positive, just
2791 // return it.
2792 __ Move(result, input);
2793 __ And(at, exponent, Operand(HeapNumber::kSignMask));
2794 __ Branch(&done, eq, at, Operand(zero_reg));
2795
2796 // Input is negative. Reverse its sign.
2797 // Preserve the value of all registers.
2798 {
2799 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2800
2801 // Registers were saved at the safepoint, so we can use
2802 // many scratch registers.
2803 Register tmp1 = input.is(a1) ? a0 : a1;
2804 Register tmp2 = input.is(a2) ? a0 : a2;
2805 Register tmp3 = input.is(a3) ? a0 : a3;
2806 Register tmp4 = input.is(t0) ? a0 : t0;
2807
2808 // exponent: floating point exponent value.
2809
2810 Label allocated, slow;
2811 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
2812 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
2813 __ Branch(&allocated);
2814
2815 // Slow case: Call the runtime system to do the number allocation.
2816 __ bind(&slow);
2817
2818 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
2819 // Set the pointer to the new heap number in tmp.
2820 if (!tmp1.is(v0))
2821 __ mov(tmp1, v0);
2822 // Restore input_reg after call to runtime.
2823 __ LoadFromSafepointRegisterSlot(input, input);
2824 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
2825
2826 __ bind(&allocated);
2827 // exponent: floating point exponent value.
2828 // tmp1: allocated heap number.
2829 __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
2830 __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
2831 __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
2832 __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
2833
2834 __ StoreToSafepointRegisterSlot(tmp1, result);
2835 }
2836
2837 __ bind(&done);
2838 }
2839
2840
2841 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
2842 Register input = ToRegister(instr->InputAt(0));
2843 Register result = ToRegister(instr->result());
2844 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2845 Label done;
2846 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
2847 __ mov(result, input);
2848 ASSERT_EQ(2, masm()->InstructionsGeneratedSince(&done));
2849 __ subu(result, zero_reg, input);
2850 // Overflow if result is still negative, ie 0x80000000.
2851 DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
2852 __ bind(&done);
2853 }
2854
2855
2856 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
2857 // Class for deferred case.
2858 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
2859 public:
2860 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
2861 LUnaryMathOperation* instr)
2862 : LDeferredCode(codegen), instr_(instr) { }
2863 virtual void Generate() {
2864 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
2865 }
2866 virtual LInstruction* instr() { return instr_; }
2867 private:
2868 LUnaryMathOperation* instr_;
2869 };
2870
2871 Representation r = instr->hydrogen()->value()->representation();
2872 if (r.IsDouble()) {
2873 FPURegister input = ToDoubleRegister(instr->InputAt(0));
2874 FPURegister result = ToDoubleRegister(instr->result());
2875 __ abs_d(result, input);
2876 } else if (r.IsInteger32()) {
2877 EmitIntegerMathAbs(instr);
2878 } else {
2879 // Representation is tagged.
2880 DeferredMathAbsTaggedHeapNumber* deferred =
2881 new DeferredMathAbsTaggedHeapNumber(this, instr);
2882 Register input = ToRegister(instr->InputAt(0));
2883 // Smi check.
2884 __ JumpIfNotSmi(input, deferred->entry());
2885 // If smi, handle it directly.
2886 EmitIntegerMathAbs(instr);
2887 __ bind(deferred->exit());
2888 }
2889 }
2890
2891
2892 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
2893 DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
2894 Register result = ToRegister(instr->result());
2895 FPURegister single_scratch = double_scratch0().low();
2896 Register scratch1 = scratch0();
2897 Register except_flag = ToRegister(instr->TempAt(0));
2898
2899 __ EmitFPUTruncate(kRoundToMinusInf,
2900 single_scratch,
2901 input,
2902 scratch1,
2903 except_flag);
2904
2905 // Deopt if the operation did not succeed.
2906 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
2907
2908 // Load the result.
2909 __ mfc1(result, single_scratch);
2910
2911 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2912 // Test for -0.
2913 Label done;
2914 __ Branch(&done, ne, result, Operand(zero_reg));
2915 __ mfc1(scratch1, input.high());
2916 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
2917 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
2918 __ bind(&done);
2919 }
2920 }
2921
2922
2923 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
2924 DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
2925 Register result = ToRegister(instr->result());
2926 Register scratch = scratch0();
2927 Label done, check_sign_on_zero;
2928
2929 // Extract exponent bits.
2930 __ mfc1(result, input.high());
2931 __ Ext(scratch,
2932 result,
2933 HeapNumber::kExponentShift,
2934 HeapNumber::kExponentBits);
2935
2936 // If the number is in ]-0.5, +0.5[, the result is +/- 0.
2937 Label skip1;
2938 __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
2939 __ mov(result, zero_reg);
2940 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2941 __ Branch(&check_sign_on_zero);
2942 } else {
2943 __ Branch(&done);
2944 }
2945 __ bind(&skip1);
2946
2947 // The following conversion will not work with numbers
2948 // outside of ]-2^32, 2^32[.
2949 DeoptimizeIf(ge, instr->environment(), scratch,
2950 Operand(HeapNumber::kExponentBias + 32));
2951
2952 // Save the original sign for later comparison.
2953 __ And(scratch, result, Operand(HeapNumber::kSignMask));
2954
2955 __ Move(double_scratch0(), 0.5);
2956 __ add_d(input, input, double_scratch0());
2957
2958 // Check sign of the result: if the sign changed, the input
2959 // value was in ]0.5, 0[ and the result should be -0.
2960 __ mfc1(result, input.high());
2961 __ Xor(result, result, Operand(scratch));
2962 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2963 // ARM uses 'mi' here, which is 'lt'
2964 DeoptimizeIf(lt, instr->environment(), result,
2965 Operand(zero_reg));
2966 } else {
2967 Label skip2;
2968 // ARM uses 'mi' here, which is 'lt'
2969 // Negating it results in 'ge'
2970 __ Branch(&skip2, ge, result, Operand(zero_reg));
2971 __ mov(result, zero_reg);
2972 __ Branch(&done);
2973 __ bind(&skip2);
2974 }
2975
2976 Register except_flag = scratch;
2977
2978 __ EmitFPUTruncate(kRoundToMinusInf,
2979 double_scratch0().low(),
2980 input,
2981 result,
2982 except_flag);
2983
2984 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
2985
2986 __ mfc1(result, double_scratch0().low());
2987
2988 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2989 // Test for -0.
2990 __ Branch(&done, ne, result, Operand(zero_reg));
2991 __ bind(&check_sign_on_zero);
2992 __ mfc1(scratch, input.high());
2993 __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
2994 DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
2995 }
2996 __ bind(&done);
2997 }
2998
2999
3000 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3001 DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3002 DoubleRegister result = ToDoubleRegister(instr->result());
3003 __ sqrt_d(result, input);
3004 }
3005
3006
3007 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
3008 DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3009 DoubleRegister result = ToDoubleRegister(instr->result());
3010 DoubleRegister double_scratch = double_scratch0();
3011
3012 // Add +0 to convert -0 to +0.
3013 __ mtc1(zero_reg, double_scratch.low());
3014 __ mtc1(zero_reg, double_scratch.high());
3015 __ add_d(result, input, double_scratch);
3016 __ sqrt_d(result, result);
3017 }
3018
3019
3020 void LCodeGen::DoPower(LPower* instr) {
3021 LOperand* left = instr->InputAt(0);
3022 LOperand* right = instr->InputAt(1);
3023 Register scratch = scratch0();
3024 DoubleRegister result_reg = ToDoubleRegister(instr->result());
3025 Representation exponent_type = instr->hydrogen()->right()->representation();
3026 if (exponent_type.IsDouble()) {
3027 // Prepare arguments and call C function.
3028 __ PrepareCallCFunction(0, 2, scratch);
3029 __ SetCallCDoubleArguments(ToDoubleRegister(left),
3030 ToDoubleRegister(right));
3031 __ CallCFunction(
3032 ExternalReference::power_double_double_function(isolate()), 0, 2);
3033 } else if (exponent_type.IsInteger32()) {
3034 ASSERT(ToRegister(right).is(a0));
3035 // Prepare arguments and call C function.
3036 __ PrepareCallCFunction(1, 1, scratch);
3037 __ SetCallCDoubleArguments(ToDoubleRegister(left), ToRegister(right));
3038 __ CallCFunction(
3039 ExternalReference::power_double_int_function(isolate()), 1, 1);
3040 } else {
3041 ASSERT(exponent_type.IsTagged());
3042 ASSERT(instr->hydrogen()->left()->representation().IsDouble());
3043
3044 Register right_reg = ToRegister(right);
3045
3046 // Check for smi on the right hand side.
3047 Label non_smi, call;
3048 __ JumpIfNotSmi(right_reg, &non_smi);
3049
3050 // Untag smi and convert it to a double.
3051 __ SmiUntag(right_reg);
3052 FPURegister single_scratch = double_scratch0();
3053 __ mtc1(right_reg, single_scratch);
3054 __ cvt_d_w(result_reg, single_scratch);
3055 __ Branch(&call);
3056
3057 // Heap number map check.
3058 __ bind(&non_smi);
3059 __ lw(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset));
3060 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3061 DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
3062 __ ldc1(result_reg, FieldMemOperand(right_reg, HeapNumber::kValueOffset));
3063
3064 // Prepare arguments and call C function.
3065 __ bind(&call);
3066 __ PrepareCallCFunction(0, 2, scratch);
3067 __ SetCallCDoubleArguments(ToDoubleRegister(left), result_reg);
3068 __ CallCFunction(
3069 ExternalReference::power_double_double_function(isolate()), 0, 2);
3070 }
3071 // Store the result in the result register.
3072 __ GetCFunctionDoubleResult(result_reg);
3073 }
3074
3075
3076 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3077 ASSERT(ToDoubleRegister(instr->result()).is(f4));
3078 TranscendentalCacheStub stub(TranscendentalCache::LOG,
3079 TranscendentalCacheStub::UNTAGGED);
3080 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3081 }
3082
3083
3084 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
3085 ASSERT(ToDoubleRegister(instr->result()).is(f4));
3086 TranscendentalCacheStub stub(TranscendentalCache::COS,
3087 TranscendentalCacheStub::UNTAGGED);
3088 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3089 }
3090
3091
3092 void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
3093 ASSERT(ToDoubleRegister(instr->result()).is(f4));
3094 TranscendentalCacheStub stub(TranscendentalCache::SIN,
3095 TranscendentalCacheStub::UNTAGGED);
3096 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3097 }
3098
3099
3100 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
3101 switch (instr->op()) {
3102 case kMathAbs:
3103 DoMathAbs(instr);
3104 break;
3105 case kMathFloor:
3106 DoMathFloor(instr);
3107 break;
3108 case kMathRound:
3109 DoMathRound(instr);
3110 break;
3111 case kMathSqrt:
3112 DoMathSqrt(instr);
3113 break;
3114 case kMathPowHalf:
3115 DoMathPowHalf(instr);
3116 break;
3117 case kMathCos:
3118 DoMathCos(instr);
3119 break;
3120 case kMathSin:
3121 DoMathSin(instr);
3122 break;
3123 case kMathLog:
3124 DoMathLog(instr);
3125 break;
3126 default:
3127 Abort("Unimplemented type of LUnaryMathOperation.");
3128 UNREACHABLE();
3129 }
3130 }
3131
3132
3133 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3134 ASSERT(ToRegister(instr->function()).is(a1));
3135 ASSERT(instr->HasPointerMap());
3136 ASSERT(instr->HasDeoptimizationEnvironment());
3137 LPointerMap* pointers = instr->pointer_map();
3138 LEnvironment* env = instr->deoptimization_environment();
3139 RecordPosition(pointers->position());
3140 RegisterEnvironmentForDeoptimization(env);
3141 SafepointGenerator generator(this, pointers, env->deoptimization_index());
3142 ParameterCount count(instr->arity());
3143 __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
3144 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3145 }
3146
3147
3148 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3149 ASSERT(ToRegister(instr->result()).is(v0));
3150
3151 int arity = instr->arity();
3152 Handle<Code> ic =
3153 isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3154 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3155 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3156 }
3157
3158
3159 void LCodeGen::DoCallNamed(LCallNamed* instr) {
3160 ASSERT(ToRegister(instr->result()).is(v0));
3161
3162 int arity = instr->arity();
3163 RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3164 Handle<Code> ic =
3165 isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3166 __ li(a2, Operand(instr->name()));
3167 CallCode(ic, mode, instr);
3168 // Restore context register.
3169 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3170 }
3171
3172
3173 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3174 ASSERT(ToRegister(instr->result()).is(v0));
3175
3176 int arity = instr->arity();
3177 CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
3178 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3179 __ Drop(1);
3180 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3181 }
3182
3183
3184 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3185 ASSERT(ToRegister(instr->result()).is(v0));
3186
3187 int arity = instr->arity();
3188 RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3189 Handle<Code> ic =
3190 isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3191 __ li(a2, Operand(instr->name()));
3192 CallCode(ic, mode, instr);
3193 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3194 }
3195
3196
3197 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3198 ASSERT(ToRegister(instr->result()).is(v0));
3199 __ li(a1, Operand(instr->target()));
3200 CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
3201 }
3202
3203
3204 void LCodeGen::DoCallNew(LCallNew* instr) {
3205 ASSERT(ToRegister(instr->InputAt(0)).is(a1));
3206 ASSERT(ToRegister(instr->result()).is(v0));
3207
3208 Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
3209 __ li(a0, Operand(instr->arity()));
3210 CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
3211 }
3212
3213
3214 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3215 CallRuntime(instr->function(), instr->arity(), instr);
3216 }
3217
3218
3219 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3220 Register object = ToRegister(instr->object());
3221 Register value = ToRegister(instr->value());
3222 Register scratch = scratch0();
3223 int offset = instr->offset();
3224
3225 ASSERT(!object.is(value));
3226
3227 if (!instr->transition().is_null()) {
3228 __ li(scratch, Operand(instr->transition()));
3229 __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3230 }
3231
3232 // Do the store.
3233 HType type = instr->hydrogen()->value()->type();
3234 SmiCheck check_needed =
3235 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3236 if (instr->is_in_object()) {
3237 __ sw(value, FieldMemOperand(object, offset));
3238 if (instr->hydrogen()->NeedsWriteBarrier()) {
3239 // Update the write barrier for the object for in-object properties.
3240 __ RecordWriteField(object,
3241 offset,
3242 value,
3243 scratch,
3244 kRAHasBeenSaved,
3245 kSaveFPRegs,
3246 EMIT_REMEMBERED_SET,
3247 check_needed);
3248 }
3249 } else {
3250 __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
3251 __ sw(value, FieldMemOperand(scratch, offset));
3252 if (instr->hydrogen()->NeedsWriteBarrier()) {
3253 // Update the write barrier for the properties array.
3254 // object is used as a scratch register.
3255 __ RecordWriteField(scratch,
3256 offset,
3257 value,
3258 object,
3259 kRAHasBeenSaved,
3260 kSaveFPRegs,
3261 EMIT_REMEMBERED_SET,
3262 check_needed);
3263 }
3264 }
3265 }
3266
3267
3268 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3269 ASSERT(ToRegister(instr->object()).is(a1));
3270 ASSERT(ToRegister(instr->value()).is(a0));
3271
3272 // Name is always in a2.
3273 __ li(a2, Operand(instr->name()));
3274 Handle<Code> ic = instr->strict_mode()
3275 ? isolate()->builtins()->StoreIC_Initialize_Strict()
3276 : isolate()->builtins()->StoreIC_Initialize();
3277 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3278 }
3279
3280
3281 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3282 DeoptimizeIf(hs,
3283 instr->environment(),
3284 ToRegister(instr->index()),
3285 Operand(ToRegister(instr->length())));
3286 }
3287
3288
3289 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3290 Register value = ToRegister(instr->value());
3291 Register elements = ToRegister(instr->object());
3292 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
3293 Register scratch = scratch0();
3294
3295 // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
3296 // conversion, so it deopts in that case.
3297 if (instr->hydrogen()->ValueNeedsSmiCheck()) {
3298 __ And(at, value, Operand(kSmiTagMask));
3299 DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
3300 }
3301
3302 // Do the store.
3303 if (instr->key()->IsConstantOperand()) {
3304 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3305 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3306 int offset =
3307 ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
3308 __ sw(value, FieldMemOperand(elements, offset));
3309 } else {
3310 __ sll(scratch, key, kPointerSizeLog2);
3311 __ addu(scratch, elements, scratch);
3312 __ sw(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
3313 }
3314
3315 if (instr->hydrogen()->NeedsWriteBarrier()) {
3316 HType type = instr->hydrogen()->value()->type();
3317 SmiCheck check_needed =
3318 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3319 // Compute address of modified element and store it into key register.
3320 __ Addu(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3321 __ RecordWrite(elements,
3322 key,
3323 value,
3324 kRAHasBeenSaved,
3325 kSaveFPRegs,
3326 EMIT_REMEMBERED_SET,
3327 check_needed);
3328 }
3329 }
3330
3331
3332 void LCodeGen::DoStoreKeyedFastDoubleElement(
3333 LStoreKeyedFastDoubleElement* instr) {
3334 DoubleRegister value = ToDoubleRegister(instr->value());
3335 Register elements = ToRegister(instr->elements());
3336 Register key = no_reg;
3337 Register scratch = scratch0();
3338 bool key_is_constant = instr->key()->IsConstantOperand();
3339 int constant_key = 0;
3340 Label not_nan;
3341
3342 // Calculate the effective address of the slot in the array to store the
3343 // double value.
3344 if (key_is_constant) {
3345 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3346 if (constant_key & 0xF0000000) {
3347 Abort("array index constant value too big.");
3348 }
3349 } else {
3350 key = ToRegister(instr->key());
3351 }
3352 int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3353 if (key_is_constant) {
3354 __ Addu(scratch, elements, Operand(constant_key * (1 << shift_size) +
3355 FixedDoubleArray::kHeaderSize - kHeapObjectTag));
3356 } else {
3357 __ sll(scratch, key, shift_size);
3358 __ Addu(scratch, elements, Operand(scratch));
3359 __ Addu(scratch, scratch,
3360 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
3361 }
3362
3363 Label is_nan;
3364 // Check for NaN. All NaNs must be canonicalized.
3365 __ BranchF(NULL, &is_nan, eq, value, value);
3366 __ Branch(&not_nan);
3367
3368 // Only load canonical NaN if the comparison above set the overflow.
3369 __ bind(&is_nan);
3370 __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
3371
3372 __ bind(&not_nan);
3373 __ sdc1(value, MemOperand(scratch));
3374 }
3375
3376
3377 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3378 LStoreKeyedSpecializedArrayElement* instr) {
3379
3380 Register external_pointer = ToRegister(instr->external_pointer());
3381 Register key = no_reg;
3382 ElementsKind elements_kind = instr->elements_kind();
3383 bool key_is_constant = instr->key()->IsConstantOperand();
3384 int constant_key = 0;
3385 if (key_is_constant) {
3386 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3387 if (constant_key & 0xF0000000) {
3388 Abort("array index constant value too big.");
3389 }
3390 } else {
3391 key = ToRegister(instr->key());
3392 }
3393 int shift_size = ElementsKindToShiftSize(elements_kind);
3394
3395 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
3396 elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3397 FPURegister value(ToDoubleRegister(instr->value()));
3398 if (key_is_constant) {
3399 __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
3400 } else {
3401 __ sll(scratch0(), key, shift_size);
3402 __ Addu(scratch0(), scratch0(), external_pointer);
3403 }
3404
3405 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3406 __ cvt_s_d(double_scratch0(), value);
3407 __ swc1(double_scratch0(), MemOperand(scratch0()));
3408 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3409 __ sdc1(value, MemOperand(scratch0()));
3410 }
3411 } else {
3412 Register value(ToRegister(instr->value()));
3413 MemOperand mem_operand(zero_reg);
3414 Register scratch = scratch0();
3415 if (key_is_constant) {
3416 mem_operand = MemOperand(external_pointer,
3417 constant_key * (1 << shift_size));
3418 } else {
3419 __ sll(scratch, key, shift_size);
3420 __ Addu(scratch, scratch, external_pointer);
3421 mem_operand = MemOperand(scratch);
3422 }
3423 switch (elements_kind) {
3424 case EXTERNAL_PIXEL_ELEMENTS:
3425 case EXTERNAL_BYTE_ELEMENTS:
3426 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3427 __ sb(value, mem_operand);
3428 break;
3429 case EXTERNAL_SHORT_ELEMENTS:
3430 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3431 __ sh(value, mem_operand);
3432 break;
3433 case EXTERNAL_INT_ELEMENTS:
3434 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3435 __ sw(value, mem_operand);
3436 break;
3437 case EXTERNAL_FLOAT_ELEMENTS:
3438 case EXTERNAL_DOUBLE_ELEMENTS:
3439 case FAST_DOUBLE_ELEMENTS:
3440 case FAST_ELEMENTS:
3441 case FAST_SMI_ONLY_ELEMENTS:
3442 case DICTIONARY_ELEMENTS:
3443 case NON_STRICT_ARGUMENTS_ELEMENTS:
3444 UNREACHABLE();
3445 break;
3446 }
3447 }
3448 }
3449
3450 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3451 ASSERT(ToRegister(instr->object()).is(a2));
3452 ASSERT(ToRegister(instr->key()).is(a1));
3453 ASSERT(ToRegister(instr->value()).is(a0));
3454
3455 Handle<Code> ic = instr->strict_mode()
3456 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
3457 : isolate()->builtins()->KeyedStoreIC_Initialize();
3458 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3459 }
3460
3461
3462 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3463 Register object_reg = ToRegister(instr->object());
3464 Register new_map_reg = ToRegister(instr->new_map_reg());
3465 Register scratch = scratch0();
3466
3467 Handle<Map> from_map = instr->original_map();
3468 Handle<Map> to_map = instr->transitioned_map();
3469 ElementsKind from_kind = from_map->elements_kind();
3470 ElementsKind to_kind = to_map->elements_kind();
3471
3472 __ mov(ToRegister(instr->result()), object_reg);
3473
3474 Label not_applicable;
3475 __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
3476 __ Branch(&not_applicable, ne, scratch, Operand(from_map));
3477
3478 __ li(new_map_reg, Operand(to_map));
3479 if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
3480 __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
3481 // Write barrier.
3482 __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
3483 scratch, kRAHasBeenSaved, kDontSaveFPRegs);
3484 } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
3485 to_kind == FAST_DOUBLE_ELEMENTS) {
3486 Register fixed_object_reg = ToRegister(instr->temp_reg());
3487 ASSERT(fixed_object_reg.is(a2));
3488 ASSERT(new_map_reg.is(a3));
3489 __ mov(fixed_object_reg, object_reg);
3490 CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
3491 RelocInfo::CODE_TARGET, instr);
3492 } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
3493 Register fixed_object_reg = ToRegister(instr->temp_reg());
3494 ASSERT(fixed_object_reg.is(a2));
3495 ASSERT(new_map_reg.is(a3));
3496 __ mov(fixed_object_reg, object_reg);
3497 CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
3498 RelocInfo::CODE_TARGET, instr);
3499 } else {
3500 UNREACHABLE();
3501 }
3502 __ bind(&not_applicable);
3503 }
3504
3505
3506 void LCodeGen::DoStringAdd(LStringAdd* instr) {
3507 __ push(ToRegister(instr->left()));
3508 __ push(ToRegister(instr->right()));
3509 StringAddStub stub(NO_STRING_CHECK_IN_STUB);
3510 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3511 }
3512
3513
3514 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
3515 class DeferredStringCharCodeAt: public LDeferredCode {
3516 public:
3517 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
3518 : LDeferredCode(codegen), instr_(instr) { }
3519 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
3520 virtual LInstruction* instr() { return instr_; }
3521 private:
3522 LStringCharCodeAt* instr_;
3523 };
3524
3525 Register temp = scratch1();
3526 Register string = ToRegister(instr->string());
3527 Register index = ToRegister(instr->index());
3528 Register result = ToRegister(instr->result());
3529 DeferredStringCharCodeAt* deferred =
3530 new DeferredStringCharCodeAt(this, instr);
3531
3532 // Fetch the instance type of the receiver into result register.
3533 __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
3534 __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
3535
3536 // We need special handling for indirect strings.
3537 Label check_sequential;
3538 __ And(temp, result, kIsIndirectStringMask);
3539 __ Branch(&check_sequential, eq, temp, Operand(zero_reg));
3540
3541 // Dispatch on the indirect string shape: slice or cons.
3542 Label cons_string;
3543 __ And(temp, result, kSlicedNotConsMask);
3544 __ Branch(&cons_string, eq, temp, Operand(zero_reg));
3545
3546 // Handle slices.
3547 Label indirect_string_loaded;
3548 __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
3549 __ sra(temp, result, kSmiTagSize);
3550 __ addu(index, index, temp);
3551 __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
3552 __ jmp(&indirect_string_loaded);
3553
3554 // Handle conses.
3555 // Check whether the right hand side is the empty string (i.e. if
3556 // this is really a flat string in a cons string). If that is not
3557 // the case we would rather go to the runtime system now to flatten
3558 // the string.
3559 __ bind(&cons_string);
3560 __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
3561 __ LoadRoot(temp, Heap::kEmptyStringRootIndex);
3562 __ Branch(deferred->entry(), ne, result, Operand(temp));
3563 // Get the first of the two strings and load its instance type.
3564 __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
3565
3566 __ bind(&indirect_string_loaded);
3567 __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
3568 __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
3569
3570 // Check whether the string is sequential. The only non-sequential
3571 // shapes we support have just been unwrapped above.
3572 __ bind(&check_sequential);
3573 STATIC_ASSERT(kSeqStringTag == 0);
3574 __ And(temp, result, Operand(kStringRepresentationMask));
3575 __ Branch(deferred->entry(), ne, temp, Operand(zero_reg));
3576
3577 // Dispatch on the encoding: ASCII or two-byte.
3578 Label ascii_string;
3579 STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
3580 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3581 __ And(temp, result, Operand(kStringEncodingMask));
3582 __ Branch(&ascii_string, ne, temp, Operand(zero_reg));
3583
3584 // Two-byte string.
3585 // Load the two-byte character code into the result register.
3586 Label done;
3587 __ Addu(result,
3588 string,
3589 Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3590 __ sll(temp, index, 1);
3591 __ Addu(result, result, temp);
3592 __ lhu(result, MemOperand(result, 0));
3593 __ Branch(&done);
3594
3595 // ASCII string.
3596 // Load the byte into the result register.
3597 __ bind(&ascii_string);
3598 __ Addu(result,
3599 string,
3600 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
3601 __ Addu(result, result, index);
3602 __ lbu(result, MemOperand(result, 0));
3603
3604 __ bind(&done);
3605 __ bind(deferred->exit());
3606 }
3607
3608
3609 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
3610 Register string = ToRegister(instr->string());
3611 Register result = ToRegister(instr->result());
3612 Register scratch = scratch0();
3613
3614 // TODO(3095996): Get rid of this. For now, we need to make the
3615 // result register contain a valid pointer because it is already
3616 // contained in the register pointer map.
3617 __ mov(result, zero_reg);
3618
3619 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3620 __ push(string);
3621 // Push the index as a smi. This is safe because of the checks in
3622 // DoStringCharCodeAt above.
3623 if (instr->index()->IsConstantOperand()) {
3624 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3625 __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
3626 __ push(scratch);
3627 } else {
3628 Register index = ToRegister(instr->index());
3629 __ SmiTag(index);
3630 __ push(index);
3631 }
3632 CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
3633 if (FLAG_debug_code) {
3634 __ AbortIfNotSmi(v0);
3635 }
3636 __ SmiUntag(v0);
3637 __ StoreToSafepointRegisterSlot(v0, result);
3638 }
3639
3640
3641 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
3642 class DeferredStringCharFromCode: public LDeferredCode {
3643 public:
3644 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
3645 : LDeferredCode(codegen), instr_(instr) { }
3646 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
3647 virtual LInstruction* instr() { return instr_; }
3648 private:
3649 LStringCharFromCode* instr_;
3650 };
3651
3652 DeferredStringCharFromCode* deferred =
3653 new DeferredStringCharFromCode(this, instr);
3654
3655 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
3656 Register char_code = ToRegister(instr->char_code());
3657 Register result = ToRegister(instr->result());
3658 Register scratch = scratch0();
3659 ASSERT(!char_code.is(result));
3660
3661 __ Branch(deferred->entry(), hi,
3662 char_code, Operand(String::kMaxAsciiCharCode));
3663 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
3664 __ sll(scratch, char_code, kPointerSizeLog2);
3665 __ Addu(result, result, scratch);
3666 __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
3667 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3668 __ Branch(deferred->entry(), eq, result, Operand(scratch));
3669 __ bind(deferred->exit());
3670 }
3671
3672
3673 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
3674 Register char_code = ToRegister(instr->char_code());
3675 Register result = ToRegister(instr->result());
3676
3677 // TODO(3095996): Get rid of this. For now, we need to make the
3678 // result register contain a valid pointer because it is already
3679 // contained in the register pointer map.
3680 __ mov(result, zero_reg);
3681
3682 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3683 __ SmiTag(char_code);
3684 __ push(char_code);
3685 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
3686 __ StoreToSafepointRegisterSlot(v0, result);
3687 }
3688
3689
3690 void LCodeGen::DoStringLength(LStringLength* instr) {
3691 Register string = ToRegister(instr->InputAt(0));
3692 Register result = ToRegister(instr->result());
3693 __ lw(result, FieldMemOperand(string, String::kLengthOffset));
3694 }
3695
3696
3697 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3698 LOperand* input = instr->InputAt(0);
3699 ASSERT(input->IsRegister() || input->IsStackSlot());
3700 LOperand* output = instr->result();
3701 ASSERT(output->IsDoubleRegister());
3702 FPURegister single_scratch = double_scratch0().low();
3703 if (input->IsStackSlot()) {
3704 Register scratch = scratch0();
3705 __ lw(scratch, ToMemOperand(input));
3706 __ mtc1(scratch, single_scratch);
3707 } else {
3708 __ mtc1(ToRegister(input), single_scratch);
3709 }
3710 __ cvt_d_w(ToDoubleRegister(output), single_scratch);
3711 }
3712
3713
3714 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
3715 class DeferredNumberTagI: public LDeferredCode {
3716 public:
3717 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
3718 : LDeferredCode(codegen), instr_(instr) { }
3719 virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
3720 virtual LInstruction* instr() { return instr_; }
3721 private:
3722 LNumberTagI* instr_;
3723 };
3724
3725 LOperand* input = instr->InputAt(0);
3726 ASSERT(input->IsRegister() && input->Equals(instr->result()));
3727 Register reg = ToRegister(input);
3728 Register overflow = scratch0();
3729
3730 DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
3731 __ SmiTagCheckOverflow(reg, overflow);
3732 __ BranchOnOverflow(deferred->entry(), overflow);
3733 __ bind(deferred->exit());
3734 }
3735
3736
3737 void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
3738 Label slow;
3739 Register reg = ToRegister(instr->InputAt(0));
3740 FPURegister dbl_scratch = double_scratch0();
3741
3742 // Preserve the value of all registers.
3743 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3744
3745 // There was overflow, so bits 30 and 31 of the original integer
3746 // disagree. Try to allocate a heap number in new space and store
3747 // the value in there. If that fails, call the runtime system.
3748 Label done;
3749 __ SmiUntag(reg);
3750 __ Xor(reg, reg, Operand(0x80000000));
3751 __ mtc1(reg, dbl_scratch);
3752 __ cvt_d_w(dbl_scratch, dbl_scratch);
3753 if (FLAG_inline_new) {
3754 __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
3755 __ AllocateHeapNumber(t1, a3, t0, t2, &slow);
3756 if (!reg.is(t1)) __ mov(reg, t1);
3757 __ Branch(&done);
3758 }
3759
3760 // Slow case: Call the runtime system to do the number allocation.
3761 __ bind(&slow);
3762
3763 // TODO(3095996): Put a valid pointer value in the stack slot where the result
3764 // register is stored, as this register is in the pointer map, but contains an
3765 // integer value.
3766 __ StoreToSafepointRegisterSlot(zero_reg, reg);
3767 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3768 if (!reg.is(v0)) __ mov(reg, v0);
3769
3770 // Done. Put the value in dbl_scratch into the value of the allocated heap
3771 // number.
3772 __ bind(&done);
3773 __ sdc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
3774 __ StoreToSafepointRegisterSlot(reg, reg);
3775 }
3776
3777
3778 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
3779 class DeferredNumberTagD: public LDeferredCode {
3780 public:
3781 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
3782 : LDeferredCode(codegen), instr_(instr) { }
3783 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
3784 virtual LInstruction* instr() { return instr_; }
3785 private:
3786 LNumberTagD* instr_;
3787 };
3788
3789 DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
3790 Register scratch = scratch0();
3791 Register reg = ToRegister(instr->result());
3792 Register temp1 = ToRegister(instr->TempAt(0));
3793 Register temp2 = ToRegister(instr->TempAt(1));
3794
3795 DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
3796 if (FLAG_inline_new) {
3797 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
3798 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
3799 } else {
3800 __ Branch(deferred->entry());
3801 }
3802 __ bind(deferred->exit());
3803 __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
3804 }
3805
3806
3807 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
3808 // TODO(3095996): Get rid of this. For now, we need to make the
3809 // result register contain a valid pointer because it is already
3810 // contained in the register pointer map.
3811 Register reg = ToRegister(instr->result());
3812 __ mov(reg, zero_reg);
3813
3814 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3815 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3816 __ StoreToSafepointRegisterSlot(v0, reg);
3817 }
3818
3819
3820 void LCodeGen::DoSmiTag(LSmiTag* instr) {
3821 LOperand* input = instr->InputAt(0);
3822 ASSERT(input->IsRegister() && input->Equals(instr->result()));
3823 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
3824 __ SmiTag(ToRegister(input));
3825 }
3826
3827
3828 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
3829 Register scratch = scratch0();
3830 LOperand* input = instr->InputAt(0);
3831 ASSERT(input->IsRegister() && input->Equals(instr->result()));
3832 if (instr->needs_check()) {
3833 STATIC_ASSERT(kHeapObjectTag == 1);
3834 // If the input is a HeapObject, value of scratch won't be zero.
3835 __ And(scratch, ToRegister(input), Operand(kHeapObjectTag));
3836 __ SmiUntag(ToRegister(input));
3837 DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
3838 } else {
3839 __ SmiUntag(ToRegister(input));
3840 }
3841 }
3842
3843
3844 void LCodeGen::EmitNumberUntagD(Register input_reg,
3845 DoubleRegister result_reg,
3846 bool deoptimize_on_undefined,
3847 LEnvironment* env) {
3848 Register scratch = scratch0();
3849
3850 Label load_smi, heap_number, done;
3851
3852 // Smi check.
3853 __ JumpIfSmi(input_reg, &load_smi);
3854
3855 // Heap number map check.
3856 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
3857 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3858 if (deoptimize_on_undefined) {
3859 DeoptimizeIf(ne, env, scratch, Operand(at));
3860 } else {
3861 Label heap_number;
3862 __ Branch(&heap_number, eq, scratch, Operand(at));
3863
3864 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3865 DeoptimizeIf(ne, env, input_reg, Operand(at));
3866
3867 // Convert undefined to NaN.
3868 __ LoadRoot(at, Heap::kNanValueRootIndex);
3869 __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
3870 __ Branch(&done);
3871
3872 __ bind(&heap_number);
3873 }
3874 // Heap number to double register conversion.
3875 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
3876 __ Branch(&done);
3877
3878 // Smi to double register conversion
3879 __ bind(&load_smi);
3880 __ SmiUntag(input_reg); // Untag smi before converting to float.
3881 __ mtc1(input_reg, result_reg);
3882 __ cvt_d_w(result_reg, result_reg);
3883 __ SmiTag(input_reg); // Retag smi.
3884 __ bind(&done);
3885 }
3886
3887
3888 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
3889 Register input_reg = ToRegister(instr->InputAt(0));
3890 Register scratch1 = scratch0();
3891 Register scratch2 = ToRegister(instr->TempAt(0));
3892 DoubleRegister double_scratch = double_scratch0();
3893 FPURegister single_scratch = double_scratch.low();
3894
3895 ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
3896 ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
3897
3898 Label done;
3899
3900 // The input is a tagged HeapObject.
3901 // Heap number map check.
3902 __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
3903 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3904 // This 'at' value and scratch1 map value are used for tests in both clauses
3905 // of the if.
3906
3907 if (instr->truncating()) {
3908 Register scratch3 = ToRegister(instr->TempAt(1));
3909 DoubleRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
3910 ASSERT(!scratch3.is(input_reg) &&
3911 !scratch3.is(scratch1) &&
3912 !scratch3.is(scratch2));
3913 // Performs a truncating conversion of a floating point number as used by
3914 // the JS bitwise operations.
3915 Label heap_number;
3916 __ Branch(&heap_number, eq, scratch1, Operand(at)); // HeapNumber map?
3917 // Check for undefined. Undefined is converted to zero for truncating
3918 // conversions.
3919 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3920 DeoptimizeIf(ne, instr->environment(), input_reg, Operand(at));
3921 ASSERT(ToRegister(instr->result()).is(input_reg));
3922 __ mov(input_reg, zero_reg);
3923 __ Branch(&done);
3924
3925 __ bind(&heap_number);
3926 __ ldc1(double_scratch2,
3927 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
3928 __ EmitECMATruncate(input_reg,
3929 double_scratch2,
3930 single_scratch,
3931 scratch1,
3932 scratch2,
3933 scratch3);
3934 } else {
3935 // Deoptimize if we don't have a heap number.
3936 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
3937
3938 // Load the double value.
3939 __ ldc1(double_scratch,
3940 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
3941
3942 Register except_flag = scratch2;
3943 __ EmitFPUTruncate(kRoundToZero,
3944 single_scratch,
3945 double_scratch,
3946 scratch1,
3947 except_flag,
3948 kCheckForInexactConversion);
3949
3950 // Deopt if the operation did not succeed.
3951 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3952
3953 // Load the result.
3954 __ mfc1(input_reg, single_scratch);
3955
3956 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3957 __ Branch(&done, ne, input_reg, Operand(zero_reg));
3958
3959 __ mfc1(scratch1, double_scratch.high());
3960 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3961 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
3962 }
3963 }
3964 __ bind(&done);
3965 }
3966
3967
3968 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
3969 class DeferredTaggedToI: public LDeferredCode {
3970 public:
3971 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
3972 : LDeferredCode(codegen), instr_(instr) { }
3973 virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
3974 virtual LInstruction* instr() { return instr_; }
3975 private:
3976 LTaggedToI* instr_;
3977 };
3978
3979 LOperand* input = instr->InputAt(0);
3980 ASSERT(input->IsRegister());
3981 ASSERT(input->Equals(instr->result()));
3982
3983 Register input_reg = ToRegister(input);
3984
3985 DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
3986
3987 // Let the deferred code handle the HeapObject case.
3988 __ JumpIfNotSmi(input_reg, deferred->entry());
3989
3990 // Smi to int32 conversion.
3991 __ SmiUntag(input_reg);
3992 __ bind(deferred->exit());
3993 }
3994
3995
3996 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
3997 LOperand* input = instr->InputAt(0);
3998 ASSERT(input->IsRegister());
3999 LOperand* result = instr->result();
4000 ASSERT(result->IsDoubleRegister());
4001
4002 Register input_reg = ToRegister(input);
4003 DoubleRegister result_reg = ToDoubleRegister(result);
4004
4005 EmitNumberUntagD(input_reg, result_reg,
4006 instr->hydrogen()->deoptimize_on_undefined(),
4007 instr->environment());
4008 }
4009
4010
4011 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4012 Register result_reg = ToRegister(instr->result());
4013 Register scratch1 = scratch0();
4014 Register scratch2 = ToRegister(instr->TempAt(0));
4015 DoubleRegister double_input = ToDoubleRegister(instr->InputAt(0));
4016 DoubleRegister double_scratch = double_scratch0();
4017 FPURegister single_scratch = double_scratch0().low();
4018
4019 if (instr->truncating()) {
4020 Register scratch3 = ToRegister(instr->TempAt(1));
4021 __ EmitECMATruncate(result_reg,
4022 double_input,
4023 single_scratch,
4024 scratch1,
4025 scratch2,
4026 scratch3);
4027 } else {
4028 Register except_flag = scratch2;
4029
4030 __ EmitFPUTruncate(kRoundToMinusInf,
4031 single_scratch,
4032 double_input,
4033 scratch1,
4034 except_flag,
4035 kCheckForInexactConversion);
4036
4037 // Deopt if the operation did not succeed (except_flag != 0).
4038 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
4039
4040 // Load the result.
4041 __ mfc1(result_reg, single_scratch);
4042 }
4043 }
4044
4045
4046 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4047 LOperand* input = instr->InputAt(0);
4048 __ And(at, ToRegister(input), Operand(kSmiTagMask));
4049 DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
4050 }
4051
4052
4053 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4054 LOperand* input = instr->InputAt(0);
4055 __ And(at, ToRegister(input), Operand(kSmiTagMask));
4056 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
4057 }
4058
4059
4060 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4061 Register input = ToRegister(instr->InputAt(0));
4062 Register scratch = scratch0();
4063
4064 __ GetObjectType(input, scratch, scratch);
4065
4066 if (instr->hydrogen()->is_interval_check()) {
4067 InstanceType first;
4068 InstanceType last;
4069 instr->hydrogen()->GetCheckInterval(&first, &last);
4070
4071 // If there is only one type in the interval check for equality.
4072 if (first == last) {
4073 DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
4074 } else {
4075 DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
4076 // Omit check for the last type.
4077 if (last != LAST_TYPE) {
4078 DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
4079 }
4080 }
4081 } else {
4082 uint8_t mask;
4083 uint8_t tag;
4084 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4085
4086 if (IsPowerOf2(mask)) {
4087 ASSERT(tag == 0 || IsPowerOf2(tag));
4088 __ And(at, scratch, mask);
4089 DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
4090 at, Operand(zero_reg));
4091 } else {
4092 __ And(scratch, scratch, Operand(mask));
4093 DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
4094 }
4095 }
4096 }
4097
4098
4099 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
4100 ASSERT(instr->InputAt(0)->IsRegister());
4101 Register reg = ToRegister(instr->InputAt(0));
4102 DeoptimizeIf(ne, instr->environment(), reg,
4103 Operand(instr->hydrogen()->target()));
4104 }
4105
4106
4107 void LCodeGen::DoCheckMap(LCheckMap* instr) {
4108 Register scratch = scratch0();
4109 LOperand* input = instr->InputAt(0);
4110 ASSERT(input->IsRegister());
4111 Register reg = ToRegister(input);
4112 __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
4113 DeoptimizeIf(ne,
4114 instr->environment(),
4115 scratch,
4116 Operand(instr->hydrogen()->map()));
4117 }
4118
4119
4120 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4121 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
4122 Register result_reg = ToRegister(instr->result());
4123 DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
4124 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
4125 }
4126
4127
4128 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4129 Register unclamped_reg = ToRegister(instr->unclamped());
4130 Register result_reg = ToRegister(instr->result());
4131 __ ClampUint8(result_reg, unclamped_reg);
4132 }
4133
4134
4135 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4136 Register scratch = scratch0();
4137 Register input_reg = ToRegister(instr->unclamped());
4138 Register result_reg = ToRegister(instr->result());
4139 DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
4140 Label is_smi, done, heap_number;
4141
4142 // Both smi and heap number cases are handled.
4143 __ JumpIfSmi(input_reg, &is_smi);
4144
4145 // Check for heap number
4146 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4147 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
4148
4149 // Check for undefined. Undefined is converted to zero for clamping
4150 // conversions.
4151 DeoptimizeIf(ne, instr->environment(), input_reg,
4152 Operand(factory()->undefined_value()));
4153 __ mov(result_reg, zero_reg);
4154 __ jmp(&done);
4155
4156 // Heap number
4157 __ bind(&heap_number);
4158 __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
4159 HeapNumber::kValueOffset));
4160 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
4161 __ jmp(&done);
4162
4163 // smi
4164 __ bind(&is_smi);
4165 __ SmiUntag(scratch, input_reg);
4166 __ ClampUint8(result_reg, scratch);
4167
4168 __ bind(&done);
4169 }
4170
4171
4172 void LCodeGen::LoadHeapObject(Register result,
4173 Handle<HeapObject> object) {
4174 if (heap()->InNewSpace(*object)) {
4175 Handle<JSGlobalPropertyCell> cell =
4176 factory()->NewJSGlobalPropertyCell(object);
4177 __ li(result, Operand(cell));
4178 __ lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
4179 } else {
4180 __ li(result, Operand(object));
4181 }
4182 }
4183
4184
4185 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4186 Register temp1 = ToRegister(instr->TempAt(0));
4187 Register temp2 = ToRegister(instr->TempAt(1));
4188
4189 Handle<JSObject> holder = instr->holder();
4190 Handle<JSObject> current_prototype = instr->prototype();
4191
4192 // Load prototype object.
4193 LoadHeapObject(temp1, current_prototype);
4194
4195 // Check prototype maps up to the holder.
4196 while (!current_prototype.is_identical_to(holder)) {
4197 __ lw(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
4198 DeoptimizeIf(ne,
4199 instr->environment(),
4200 temp2,
4201 Operand(Handle<Map>(current_prototype->map())));
4202 current_prototype =
4203 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
4204 // Load next prototype object.
4205 LoadHeapObject(temp1, current_prototype);
4206 }
4207
4208 // Check the holder map.
4209 __ lw(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
4210 DeoptimizeIf(ne,
4211 instr->environment(),
4212 temp2,
4213 Operand(Handle<Map>(current_prototype->map())));
4214 }
4215
4216
4217 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4218 Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
4219 ASSERT_EQ(2, constant_elements->length());
4220 ElementsKind constant_elements_kind =
4221 static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
4222
4223 __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4224 __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
4225 __ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4226 __ li(a1, Operand(constant_elements));
4227 __ Push(a3, a2, a1);
4228
4229 // Pick the right runtime function or stub to call.
4230 int length = instr->hydrogen()->length();
4231 if (instr->hydrogen()->IsCopyOnWrite()) {
4232 ASSERT(instr->hydrogen()->depth() == 1);
4233 FastCloneShallowArrayStub::Mode mode =
4234 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
4235 FastCloneShallowArrayStub stub(mode, length);
4236 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4237 } else if (instr->hydrogen()->depth() > 1) {
4238 CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
4239 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
4240 CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
4241 } else {
4242 FastCloneShallowArrayStub::Mode mode =
4243 constant_elements_kind == FAST_DOUBLE_ELEMENTS
4244 ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
4245 : FastCloneShallowArrayStub::CLONE_ELEMENTS;
4246 FastCloneShallowArrayStub stub(mode, length);
4247 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4248 }
4249 }
4250
4251
4252 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
4253 ASSERT(ToRegister(instr->result()).is(v0));
4254 __ lw(t0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4255 __ lw(t0, FieldMemOperand(t0, JSFunction::kLiteralsOffset));
4256 __ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4257 __ li(a2, Operand(instr->hydrogen()->constant_properties()));
4258 __ li(a1, Operand(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
4259 __ Push(t0, a3, a2, a1);
4260
4261 // Pick the right runtime function to call.
4262 if (instr->hydrogen()->depth() > 1) {
4263 CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
4264 } else {
4265 CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
4266 }
4267 }
4268
4269
4270 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
4271 ASSERT(ToRegister(instr->InputAt(0)).is(a0));
4272 ASSERT(ToRegister(instr->result()).is(v0));
4273 __ push(a0);
4274 CallRuntime(Runtime::kToFastProperties, 1, instr);
4275 }
4276
4277
4278 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
4279 Label materialized;
4280 // Registers will be used as follows:
4281 // a3 = JS function.
4282 // t3 = literals array.
4283 // a1 = regexp literal.
4284 // a0 = regexp literal clone.
4285 // a2 and t0-t2 are used as temporaries.
4286 __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4287 __ lw(t3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
4288 int literal_offset = FixedArray::kHeaderSize +
4289 instr->hydrogen()->literal_index() * kPointerSize;
4290 __ lw(a1, FieldMemOperand(t3, literal_offset));
4291 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4292 __ Branch(&materialized, ne, a1, Operand(at));
4293
4294 // Create regexp literal using runtime function
4295 // Result will be in v0.
4296 __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4297 __ li(t1, Operand(instr->hydrogen()->pattern()));
4298 __ li(t0, Operand(instr->hydrogen()->flags()));
4299 __ Push(t3, t2, t1, t0);
4300 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
4301 __ mov(a1, v0);
4302
4303 __ bind(&materialized);
4304 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
4305 Label allocated, runtime_allocate;
4306
4307 __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
4308 __ jmp(&allocated);
4309
4310 __ bind(&runtime_allocate);
4311 __ li(a0, Operand(Smi::FromInt(size)));
4312 __ Push(a1, a0);
4313 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4314 __ pop(a1);
4315
4316 __ bind(&allocated);
4317 // Copy the content into the newly allocated memory.
4318 // (Unroll copy loop once for better throughput).
4319 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
4320 __ lw(a3, FieldMemOperand(a1, i));
4321 __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
4322 __ sw(a3, FieldMemOperand(v0, i));
4323 __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
4324 }
4325 if ((size % (2 * kPointerSize)) != 0) {
4326 __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
4327 __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
4328 }
4329 }
4330
4331
4332 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
4333 // Use the fast case closure allocation code that allocates in new
4334 // space for nested functions that don't need literals cloning.
4335 Handle<SharedFunctionInfo> shared_info = instr->shared_info();
4336 bool pretenure = instr->hydrogen()->pretenure();
4337 if (!pretenure && shared_info->num_literals() == 0) {
4338 FastNewClosureStub stub(shared_info->strict_mode_flag());
4339 __ li(a1, Operand(shared_info));
4340 __ push(a1);
4341 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4342 } else {
4343 __ li(a2, Operand(shared_info));
4344 __ li(a1, Operand(pretenure
4345 ? factory()->true_value()
4346 : factory()->false_value()));
4347 __ Push(cp, a2, a1);
4348 CallRuntime(Runtime::kNewClosure, 3, instr);
4349 }
4350 }
4351
4352
4353 void LCodeGen::DoTypeof(LTypeof* instr) {
4354 ASSERT(ToRegister(instr->result()).is(v0));
4355 Register input = ToRegister(instr->InputAt(0));
4356 __ push(input);
4357 CallRuntime(Runtime::kTypeof, 1, instr);
4358 }
4359
4360
4361 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
4362 Register input = ToRegister(instr->InputAt(0));
4363 int true_block = chunk_->LookupDestination(instr->true_block_id());
4364 int false_block = chunk_->LookupDestination(instr->false_block_id());
4365 Label* true_label = chunk_->GetAssemblyLabel(true_block);
4366 Label* false_label = chunk_->GetAssemblyLabel(false_block);
4367
4368 Register cmp1 = no_reg;
4369 Operand cmp2 = Operand(no_reg);
4370
4371 Condition final_branch_condition = EmitTypeofIs(true_label,
4372 false_label,
4373 input,
4374 instr->type_literal(),
4375 cmp1,
4376 cmp2);
4377
4378 ASSERT(cmp1.is_valid());
4379 ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
4380
4381 if (final_branch_condition != kNoCondition) {
4382 EmitBranch(true_block, false_block, final_branch_condition, cmp1, cmp2);
4383 }
4384 }
4385
4386
4387 Condition LCodeGen::EmitTypeofIs(Label* true_label,
4388 Label* false_label,
4389 Register input,
4390 Handle<String> type_name,
4391 Register& cmp1,
4392 Operand& cmp2) {
4393 // This function utilizes the delay slot heavily. This is used to load
4394 // values that are always usable without depending on the type of the input
4395 // register.
4396 Condition final_branch_condition = kNoCondition;
4397 Register scratch = scratch0();
4398 if (type_name->Equals(heap()->number_symbol())) {
4399 __ JumpIfSmi(input, true_label);
4400 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
4401 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4402 cmp1 = input;
4403 cmp2 = Operand(at);
4404 final_branch_condition = eq;
4405
4406 } else if (type_name->Equals(heap()->string_symbol())) {
4407 __ JumpIfSmi(input, false_label);
4408 __ GetObjectType(input, input, scratch);
4409 __ Branch(USE_DELAY_SLOT, false_label,
4410 ge, scratch, Operand(FIRST_NONSTRING_TYPE));
4411 // input is an object so we can load the BitFieldOffset even if we take the
4412 // other branch.
4413 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
4414 __ And(at, at, 1 << Map::kIsUndetectable);
4415 cmp1 = at;
4416 cmp2 = Operand(zero_reg);
4417 final_branch_condition = eq;
4418
4419 } else if (type_name->Equals(heap()->boolean_symbol())) {
4420 __ LoadRoot(at, Heap::kTrueValueRootIndex);
4421 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
4422 __ LoadRoot(at, Heap::kFalseValueRootIndex);
4423 cmp1 = at;
4424 cmp2 = Operand(input);
4425 final_branch_condition = eq;
4426
4427 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
4428 __ LoadRoot(at, Heap::kNullValueRootIndex);
4429 cmp1 = at;
4430 cmp2 = Operand(input);
4431 final_branch_condition = eq;
4432
4433 } else if (type_name->Equals(heap()->undefined_symbol())) {
4434 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4435 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
4436 // The first instruction of JumpIfSmi is an And - it is safe in the delay
4437 // slot.
4438 __ JumpIfSmi(input, false_label);
4439 // Check for undetectable objects => true.
4440 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
4441 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
4442 __ And(at, at, 1 << Map::kIsUndetectable);
4443 cmp1 = at;
4444 cmp2 = Operand(zero_reg);
4445 final_branch_condition = ne;
4446
4447 } else if (type_name->Equals(heap()->function_symbol())) {
4448 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
4449 __ JumpIfSmi(input, false_label);
4450 __ GetObjectType(input, scratch, input);
4451 __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
4452 cmp1 = input;
4453 cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
4454 final_branch_condition = eq;
4455
4456 } else if (type_name->Equals(heap()->object_symbol())) {
4457 __ JumpIfSmi(input, false_label);
4458 if (!FLAG_harmony_typeof) {
4459 __ LoadRoot(at, Heap::kNullValueRootIndex);
4460 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
4461 }
4462 // input is an object, it is safe to use GetObjectType in the delay slot.
4463 __ GetObjectType(input, input, scratch);
4464 __ Branch(USE_DELAY_SLOT, false_label,
4465 lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
4466 // Still an object, so the InstanceType can be loaded.
4467 __ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset));
4468 __ Branch(USE_DELAY_SLOT, false_label,
4469 gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
4470 // Still an object, so the BitField can be loaded.
4471 // Check for undetectable objects => false.
4472 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
4473 __ And(at, at, 1 << Map::kIsUndetectable);
4474 cmp1 = at;
4475 cmp2 = Operand(zero_reg);
4476 final_branch_condition = eq;
4477
4478 } else {
4479 cmp1 = at;
4480 cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
4481 __ Branch(false_label);
4482 }
4483
4484 return final_branch_condition;
4485 }
4486
4487
4488 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
4489 Register temp1 = ToRegister(instr->TempAt(0));
4490 int true_block = chunk_->LookupDestination(instr->true_block_id());
4491 int false_block = chunk_->LookupDestination(instr->false_block_id());
4492
4493 EmitIsConstructCall(temp1, scratch0());
4494
4495 EmitBranch(true_block, false_block, eq, temp1,
4496 Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
4497 }
4498
4499
4500 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
4501 ASSERT(!temp1.is(temp2));
4502 // Get the frame pointer for the calling frame.
4503 __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4504
4505 // Skip the arguments adaptor frame if it exists.
4506 Label check_frame_marker;
4507 __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
4508 __ Branch(&check_frame_marker, ne, temp2,
4509 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4510 __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
4511
4512 // Check the marker in the calling frame.
4513 __ bind(&check_frame_marker);
4514 __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
4515 }
4516
4517
4518 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
4519 // No code for lazy bailout instruction. Used to capture environment after a
4520 // call for populating the safepoint data with deoptimization data.
4521 }
4522
4523
4524 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
4525 DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
4526 }
4527
4528
4529 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
4530 Register object = ToRegister(instr->object());
4531 Register key = ToRegister(instr->key());
4532 Register strict = scratch0();
4533 __ li(strict, Operand(Smi::FromInt(strict_mode_flag())));
4534 __ Push(object, key, strict);
4535 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
4536 LPointerMap* pointers = instr->pointer_map();
4537 LEnvironment* env = instr->deoptimization_environment();
4538 RecordPosition(pointers->position());
4539 RegisterEnvironmentForDeoptimization(env);
4540 SafepointGenerator safepoint_generator(this,
4541 pointers,
4542 env->deoptimization_index());
4543 __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
4544 }
4545
4546
4547 void LCodeGen::DoIn(LIn* instr) {
4548 Register obj = ToRegister(instr->object());
4549 Register key = ToRegister(instr->key());
4550 __ Push(key, obj);
4551 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
4552 LPointerMap* pointers = instr->pointer_map();
4553 LEnvironment* env = instr->deoptimization_environment();
4554 RecordPosition(pointers->position());
4555 RegisterEnvironmentForDeoptimization(env);
4556 SafepointGenerator safepoint_generator(this,
4557 pointers,
4558 env->deoptimization_index());
4559 __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
4560 }
4561
4562
4563 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
4564 {
4565 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4566 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
4567 RegisterLazyDeoptimization(
4568 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4569 }
4570
4571 // The gap code includes the restoring of the safepoint registers.
4572 int pc = masm()->pc_offset();
4573 safepoints_.SetPcAfterGap(pc);
4574 }
4575
4576
4577 void LCodeGen::DoStackCheck(LStackCheck* instr) {
4578 class DeferredStackCheck: public LDeferredCode {
4579 public:
4580 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
4581 : LDeferredCode(codegen), instr_(instr) { }
4582 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
4583 virtual LInstruction* instr() { return instr_; }
4584 private:
4585 LStackCheck* instr_;
4586 };
4587
4588 if (instr->hydrogen()->is_function_entry()) {
4589 // Perform stack overflow check.
4590 Label done;
4591 __ LoadRoot(at, Heap::kStackLimitRootIndex);
4592 __ Branch(&done, hs, sp, Operand(at));
4593 StackCheckStub stub;
4594 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4595 __ bind(&done);
4596 } else {
4597 ASSERT(instr->hydrogen()->is_backwards_branch());
4598 // Perform stack overflow check if this goto needs it before jumping.
4599 DeferredStackCheck* deferred_stack_check =
4600 new DeferredStackCheck(this, instr);
4601 __ LoadRoot(at, Heap::kStackLimitRootIndex);
4602 __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
4603 __ bind(instr->done_label());
4604 deferred_stack_check->SetExit(instr->done_label());
4605 }
4606 }
4607
4608
4609 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
4610 // This is a pseudo-instruction that ensures that the environment here is
4611 // properly registered for deoptimization and records the assembler's PC
4612 // offset.
4613 LEnvironment* environment = instr->environment();
4614 environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
4615 instr->SpilledDoubleRegisterArray());
4616
4617 // If the environment were already registered, we would have no way of
4618 // backpatching it with the spill slot operands.
4619 ASSERT(!environment->HasBeenRegistered());
4620 RegisterEnvironmentForDeoptimization(environment);
4621 ASSERT(osr_pc_offset_ == -1);
4622 osr_pc_offset_ = masm()->pc_offset();
4623 }
4624
4625
4626 #undef __
4627
4628 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/mips/lithium-codegen-mips.h ('k') | src/mips/lithium-gap-resolver-mips.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698