OLD | NEW |
| (Empty) |
1 // Copyright 2012 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "src/code-factory.h" | |
6 #include "src/code-stubs.h" | |
7 #include "src/hydrogen-osr.h" | |
8 #include "src/ic/ic.h" | |
9 #include "src/ic/stub-cache.h" | |
10 #include "src/mips64/lithium-codegen-mips64.h" | |
11 #include "src/mips64/lithium-gap-resolver-mips64.h" | |
12 #include "src/profiler/cpu-profiler.h" | |
13 | |
14 namespace v8 { | |
15 namespace internal { | |
16 | |
17 | |
18 class SafepointGenerator final : public CallWrapper { | |
19 public: | |
20 SafepointGenerator(LCodeGen* codegen, | |
21 LPointerMap* pointers, | |
22 Safepoint::DeoptMode mode) | |
23 : codegen_(codegen), | |
24 pointers_(pointers), | |
25 deopt_mode_(mode) { } | |
26 virtual ~SafepointGenerator() {} | |
27 | |
28 void BeforeCall(int call_size) const override {} | |
29 | |
30 void AfterCall() const override { | |
31 codegen_->RecordSafepoint(pointers_, deopt_mode_); | |
32 } | |
33 | |
34 private: | |
35 LCodeGen* codegen_; | |
36 LPointerMap* pointers_; | |
37 Safepoint::DeoptMode deopt_mode_; | |
38 }; | |
39 | |
40 | |
41 #define __ masm()-> | |
42 | |
43 bool LCodeGen::GenerateCode() { | |
44 LPhase phase("Z_Code generation", chunk()); | |
45 DCHECK(is_unused()); | |
46 status_ = GENERATING; | |
47 | |
48 // Open a frame scope to indicate that there is a frame on the stack. The | |
49 // NONE indicates that the scope shouldn't actually generate code to set up | |
50 // the frame (that is done in GeneratePrologue). | |
51 FrameScope frame_scope(masm_, StackFrame::NONE); | |
52 | |
53 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && | |
54 GenerateJumpTable() && GenerateSafepointTable(); | |
55 } | |
56 | |
57 | |
58 void LCodeGen::FinishCode(Handle<Code> code) { | |
59 DCHECK(is_done()); | |
60 code->set_stack_slots(GetStackSlotCount()); | |
61 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); | |
62 PopulateDeoptimizationData(code); | |
63 } | |
64 | |
65 | |
66 void LCodeGen::SaveCallerDoubles() { | |
67 DCHECK(info()->saves_caller_doubles()); | |
68 DCHECK(NeedsEagerFrame()); | |
69 Comment(";;; Save clobbered callee double registers"); | |
70 int count = 0; | |
71 BitVector* doubles = chunk()->allocated_double_registers(); | |
72 BitVector::Iterator save_iterator(doubles); | |
73 while (!save_iterator.Done()) { | |
74 __ sdc1(DoubleRegister::from_code(save_iterator.Current()), | |
75 MemOperand(sp, count * kDoubleSize)); | |
76 save_iterator.Advance(); | |
77 count++; | |
78 } | |
79 } | |
80 | |
81 | |
82 void LCodeGen::RestoreCallerDoubles() { | |
83 DCHECK(info()->saves_caller_doubles()); | |
84 DCHECK(NeedsEagerFrame()); | |
85 Comment(";;; Restore clobbered callee double registers"); | |
86 BitVector* doubles = chunk()->allocated_double_registers(); | |
87 BitVector::Iterator save_iterator(doubles); | |
88 int count = 0; | |
89 while (!save_iterator.Done()) { | |
90 __ ldc1(DoubleRegister::from_code(save_iterator.Current()), | |
91 MemOperand(sp, count * kDoubleSize)); | |
92 save_iterator.Advance(); | |
93 count++; | |
94 } | |
95 } | |
96 | |
97 | |
98 bool LCodeGen::GeneratePrologue() { | |
99 DCHECK(is_generating()); | |
100 | |
101 if (info()->IsOptimizing()) { | |
102 ProfileEntryHookStub::MaybeCallEntryHook(masm_); | |
103 | |
104 #ifdef DEBUG | |
105 if (strlen(FLAG_stop_at) > 0 && | |
106 info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { | |
107 __ stop("stop_at"); | |
108 } | |
109 #endif | |
110 | |
111 // a1: Callee's JS function. | |
112 // cp: Callee's context. | |
113 // fp: Caller's frame pointer. | |
114 // lr: Caller's pc. | |
115 | |
116 // Sloppy mode functions and builtins need to replace the receiver with the | |
117 // global proxy when called as functions (without an explicit receiver | |
118 // object). | |
119 if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) { | |
120 Label ok; | |
121 int receiver_offset = info_->scope()->num_parameters() * kPointerSize; | |
122 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | |
123 __ ld(a2, MemOperand(sp, receiver_offset)); | |
124 __ Branch(&ok, ne, a2, Operand(at)); | |
125 | |
126 __ ld(a2, GlobalObjectOperand()); | |
127 __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset)); | |
128 | |
129 __ sd(a2, MemOperand(sp, receiver_offset)); | |
130 | |
131 __ bind(&ok); | |
132 } | |
133 } | |
134 | |
135 info()->set_prologue_offset(masm_->pc_offset()); | |
136 if (NeedsEagerFrame()) { | |
137 if (info()->IsStub()) { | |
138 __ StubPrologue(); | |
139 } else { | |
140 __ Prologue(info()->IsCodePreAgingActive()); | |
141 } | |
142 frame_is_built_ = true; | |
143 } | |
144 | |
145 // Reserve space for the stack slots needed by the code. | |
146 int slots = GetStackSlotCount(); | |
147 if (slots > 0) { | |
148 if (FLAG_debug_code) { | |
149 __ Dsubu(sp, sp, Operand(slots * kPointerSize)); | |
150 __ Push(a0, a1); | |
151 __ Daddu(a0, sp, Operand(slots * kPointerSize)); | |
152 __ li(a1, Operand(kSlotsZapValue)); | |
153 Label loop; | |
154 __ bind(&loop); | |
155 __ Dsubu(a0, a0, Operand(kPointerSize)); | |
156 __ sd(a1, MemOperand(a0, 2 * kPointerSize)); | |
157 __ Branch(&loop, ne, a0, Operand(sp)); | |
158 __ Pop(a0, a1); | |
159 } else { | |
160 __ Dsubu(sp, sp, Operand(slots * kPointerSize)); | |
161 } | |
162 } | |
163 | |
164 if (info()->saves_caller_doubles()) { | |
165 SaveCallerDoubles(); | |
166 } | |
167 return !is_aborted(); | |
168 } | |
169 | |
170 | |
171 void LCodeGen::DoPrologue(LPrologue* instr) { | |
172 Comment(";;; Prologue begin"); | |
173 | |
174 // Possibly allocate a local context. | |
175 if (info()->scope()->num_heap_slots() > 0) { | |
176 Comment(";;; Allocate local context"); | |
177 bool need_write_barrier = true; | |
178 // Argument to NewContext is the function, which is in a1. | |
179 int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; | |
180 Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt; | |
181 if (info()->scope()->is_script_scope()) { | |
182 __ push(a1); | |
183 __ Push(info()->scope()->GetScopeInfo(info()->isolate())); | |
184 __ CallRuntime(Runtime::kNewScriptContext, 2); | |
185 deopt_mode = Safepoint::kLazyDeopt; | |
186 } else if (slots <= FastNewContextStub::kMaximumSlots) { | |
187 FastNewContextStub stub(isolate(), slots); | |
188 __ CallStub(&stub); | |
189 // Result of FastNewContextStub is always in new space. | |
190 need_write_barrier = false; | |
191 } else { | |
192 __ push(a1); | |
193 __ CallRuntime(Runtime::kNewFunctionContext, 1); | |
194 } | |
195 RecordSafepoint(deopt_mode); | |
196 | |
197 // Context is returned in both v0. It replaces the context passed to us. | |
198 // It's saved in the stack and kept live in cp. | |
199 __ mov(cp, v0); | |
200 __ sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset)); | |
201 // Copy any necessary parameters into the context. | |
202 int num_parameters = scope()->num_parameters(); | |
203 int first_parameter = scope()->has_this_declaration() ? -1 : 0; | |
204 for (int i = first_parameter; i < num_parameters; i++) { | |
205 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i); | |
206 if (var->IsContextSlot()) { | |
207 int parameter_offset = StandardFrameConstants::kCallerSPOffset + | |
208 (num_parameters - 1 - i) * kPointerSize; | |
209 // Load parameter from stack. | |
210 __ ld(a0, MemOperand(fp, parameter_offset)); | |
211 // Store it in the context. | |
212 MemOperand target = ContextOperand(cp, var->index()); | |
213 __ sd(a0, target); | |
214 // Update the write barrier. This clobbers a3 and a0. | |
215 if (need_write_barrier) { | |
216 __ RecordWriteContextSlot( | |
217 cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs); | |
218 } else if (FLAG_debug_code) { | |
219 Label done; | |
220 __ JumpIfInNewSpace(cp, a0, &done); | |
221 __ Abort(kExpectedNewSpaceObject); | |
222 __ bind(&done); | |
223 } | |
224 } | |
225 } | |
226 Comment(";;; End allocate local context"); | |
227 } | |
228 | |
229 Comment(";;; Prologue end"); | |
230 } | |
231 | |
232 | |
233 void LCodeGen::GenerateOsrPrologue() { | |
234 // Generate the OSR entry prologue at the first unknown OSR value, or if there | |
235 // are none, at the OSR entrypoint instruction. | |
236 if (osr_pc_offset_ >= 0) return; | |
237 | |
238 osr_pc_offset_ = masm()->pc_offset(); | |
239 | |
240 // Adjust the frame size, subsuming the unoptimized frame into the | |
241 // optimized frame. | |
242 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); | |
243 DCHECK(slots >= 0); | |
244 __ Dsubu(sp, sp, Operand(slots * kPointerSize)); | |
245 } | |
246 | |
247 | |
248 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { | |
249 if (instr->IsCall()) { | |
250 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); | |
251 } | |
252 if (!instr->IsLazyBailout() && !instr->IsGap()) { | |
253 safepoints_.BumpLastLazySafepointIndex(); | |
254 } | |
255 } | |
256 | |
257 | |
258 bool LCodeGen::GenerateDeferredCode() { | |
259 DCHECK(is_generating()); | |
260 if (deferred_.length() > 0) { | |
261 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { | |
262 LDeferredCode* code = deferred_[i]; | |
263 | |
264 HValue* value = | |
265 instructions_->at(code->instruction_index())->hydrogen_value(); | |
266 RecordAndWritePosition( | |
267 chunk()->graph()->SourcePositionToScriptPosition(value->position())); | |
268 | |
269 Comment(";;; <@%d,#%d> " | |
270 "-------------------- Deferred %s --------------------", | |
271 code->instruction_index(), | |
272 code->instr()->hydrogen_value()->id(), | |
273 code->instr()->Mnemonic()); | |
274 __ bind(code->entry()); | |
275 if (NeedsDeferredFrame()) { | |
276 Comment(";;; Build frame"); | |
277 DCHECK(!frame_is_built_); | |
278 DCHECK(info()->IsStub()); | |
279 frame_is_built_ = true; | |
280 __ MultiPush(cp.bit() | fp.bit() | ra.bit()); | |
281 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); | |
282 __ push(scratch0()); | |
283 __ Daddu(fp, sp, | |
284 Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); | |
285 Comment(";;; Deferred code"); | |
286 } | |
287 code->Generate(); | |
288 if (NeedsDeferredFrame()) { | |
289 Comment(";;; Destroy frame"); | |
290 DCHECK(frame_is_built_); | |
291 __ pop(at); | |
292 __ MultiPop(cp.bit() | fp.bit() | ra.bit()); | |
293 frame_is_built_ = false; | |
294 } | |
295 __ jmp(code->exit()); | |
296 } | |
297 } | |
298 // Deferred code is the last part of the instruction sequence. Mark | |
299 // the generated code as done unless we bailed out. | |
300 if (!is_aborted()) status_ = DONE; | |
301 return !is_aborted(); | |
302 } | |
303 | |
304 | |
305 bool LCodeGen::GenerateJumpTable() { | |
306 if (jump_table_.length() > 0) { | |
307 Comment(";;; -------------------- Jump table --------------------"); | |
308 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); | |
309 Label table_start, call_deopt_entry; | |
310 | |
311 __ bind(&table_start); | |
312 Label needs_frame; | |
313 Address base = jump_table_[0]->address; | |
314 for (int i = 0; i < jump_table_.length(); i++) { | |
315 Deoptimizer::JumpTableEntry* table_entry = jump_table_[i]; | |
316 __ bind(&table_entry->label); | |
317 Address entry = table_entry->address; | |
318 DeoptComment(table_entry->deopt_info); | |
319 | |
320 // Second-level deopt table entries are contiguous and small, so instead | |
321 // of loading the full, absolute address of each one, load the base | |
322 // address and add an immediate offset. | |
323 if (is_int16(entry - base)) { | |
324 if (table_entry->needs_frame) { | |
325 DCHECK(!info()->saves_caller_doubles()); | |
326 Comment(";;; call deopt with frame"); | |
327 __ MultiPush(cp.bit() | fp.bit() | ra.bit()); | |
328 __ BranchAndLink(&needs_frame, USE_DELAY_SLOT); | |
329 __ li(t9, Operand(entry - base)); | |
330 } else { | |
331 __ BranchAndLink(&call_deopt_entry, USE_DELAY_SLOT); | |
332 __ li(t9, Operand(entry - base)); | |
333 } | |
334 | |
335 } else { | |
336 __ li(t9, Operand(entry - base)); | |
337 if (table_entry->needs_frame) { | |
338 DCHECK(!info()->saves_caller_doubles()); | |
339 Comment(";;; call deopt with frame"); | |
340 __ MultiPush(cp.bit() | fp.bit() | ra.bit()); | |
341 __ BranchAndLink(&needs_frame); | |
342 } else { | |
343 __ BranchAndLink(&call_deopt_entry); | |
344 } | |
345 } | |
346 info()->LogDeoptCallPosition(masm()->pc_offset(), | |
347 table_entry->deopt_info.inlining_id); | |
348 } | |
349 if (needs_frame.is_linked()) { | |
350 __ bind(&needs_frame); | |
351 // This variant of deopt can only be used with stubs. Since we don't | |
352 // have a function pointer to install in the stack frame that we're | |
353 // building, install a special marker there instead. | |
354 DCHECK(info()->IsStub()); | |
355 __ li(at, Operand(Smi::FromInt(StackFrame::STUB))); | |
356 __ push(at); | |
357 __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); | |
358 } | |
359 | |
360 Comment(";;; call deopt"); | |
361 __ bind(&call_deopt_entry); | |
362 | |
363 if (info()->saves_caller_doubles()) { | |
364 DCHECK(info()->IsStub()); | |
365 RestoreCallerDoubles(); | |
366 } | |
367 | |
368 __ li(at, | |
369 Operand(reinterpret_cast<int64_t>(base), RelocInfo::RUNTIME_ENTRY)); | |
370 __ Daddu(t9, t9, Operand(at)); | |
371 __ Jump(t9); | |
372 } | |
373 // The deoptimization jump table is the last part of the instruction | |
374 // sequence. Mark the generated code as done unless we bailed out. | |
375 if (!is_aborted()) status_ = DONE; | |
376 return !is_aborted(); | |
377 } | |
378 | |
379 | |
380 bool LCodeGen::GenerateSafepointTable() { | |
381 DCHECK(is_done()); | |
382 safepoints_.Emit(masm(), GetStackSlotCount()); | |
383 return !is_aborted(); | |
384 } | |
385 | |
386 | |
387 Register LCodeGen::ToRegister(int index) const { | |
388 return Register::from_code(index); | |
389 } | |
390 | |
391 | |
392 DoubleRegister LCodeGen::ToDoubleRegister(int index) const { | |
393 return DoubleRegister::from_code(index); | |
394 } | |
395 | |
396 | |
397 Register LCodeGen::ToRegister(LOperand* op) const { | |
398 DCHECK(op->IsRegister()); | |
399 return ToRegister(op->index()); | |
400 } | |
401 | |
402 | |
403 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { | |
404 if (op->IsRegister()) { | |
405 return ToRegister(op->index()); | |
406 } else if (op->IsConstantOperand()) { | |
407 LConstantOperand* const_op = LConstantOperand::cast(op); | |
408 HConstant* constant = chunk_->LookupConstant(const_op); | |
409 Handle<Object> literal = constant->handle(isolate()); | |
410 Representation r = chunk_->LookupLiteralRepresentation(const_op); | |
411 if (r.IsInteger32()) { | |
412 AllowDeferredHandleDereference get_number; | |
413 DCHECK(literal->IsNumber()); | |
414 __ li(scratch, Operand(static_cast<int32_t>(literal->Number()))); | |
415 } else if (r.IsSmi()) { | |
416 DCHECK(constant->HasSmiValue()); | |
417 __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value()))); | |
418 } else if (r.IsDouble()) { | |
419 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); | |
420 } else { | |
421 DCHECK(r.IsSmiOrTagged()); | |
422 __ li(scratch, literal); | |
423 } | |
424 return scratch; | |
425 } else if (op->IsStackSlot()) { | |
426 __ ld(scratch, ToMemOperand(op)); | |
427 return scratch; | |
428 } | |
429 UNREACHABLE(); | |
430 return scratch; | |
431 } | |
432 | |
433 | |
434 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { | |
435 DCHECK(op->IsDoubleRegister()); | |
436 return ToDoubleRegister(op->index()); | |
437 } | |
438 | |
439 | |
440 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, | |
441 FloatRegister flt_scratch, | |
442 DoubleRegister dbl_scratch) { | |
443 if (op->IsDoubleRegister()) { | |
444 return ToDoubleRegister(op->index()); | |
445 } else if (op->IsConstantOperand()) { | |
446 LConstantOperand* const_op = LConstantOperand::cast(op); | |
447 HConstant* constant = chunk_->LookupConstant(const_op); | |
448 Handle<Object> literal = constant->handle(isolate()); | |
449 Representation r = chunk_->LookupLiteralRepresentation(const_op); | |
450 if (r.IsInteger32()) { | |
451 DCHECK(literal->IsNumber()); | |
452 __ li(at, Operand(static_cast<int32_t>(literal->Number()))); | |
453 __ mtc1(at, flt_scratch); | |
454 __ cvt_d_w(dbl_scratch, flt_scratch); | |
455 return dbl_scratch; | |
456 } else if (r.IsDouble()) { | |
457 Abort(kUnsupportedDoubleImmediate); | |
458 } else if (r.IsTagged()) { | |
459 Abort(kUnsupportedTaggedImmediate); | |
460 } | |
461 } else if (op->IsStackSlot()) { | |
462 MemOperand mem_op = ToMemOperand(op); | |
463 __ ldc1(dbl_scratch, mem_op); | |
464 return dbl_scratch; | |
465 } | |
466 UNREACHABLE(); | |
467 return dbl_scratch; | |
468 } | |
469 | |
470 | |
471 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { | |
472 HConstant* constant = chunk_->LookupConstant(op); | |
473 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); | |
474 return constant->handle(isolate()); | |
475 } | |
476 | |
477 | |
478 bool LCodeGen::IsInteger32(LConstantOperand* op) const { | |
479 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); | |
480 } | |
481 | |
482 | |
483 bool LCodeGen::IsSmi(LConstantOperand* op) const { | |
484 return chunk_->LookupLiteralRepresentation(op).IsSmi(); | |
485 } | |
486 | |
487 | |
488 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { | |
489 // return ToRepresentation(op, Representation::Integer32()); | |
490 HConstant* constant = chunk_->LookupConstant(op); | |
491 return constant->Integer32Value(); | |
492 } | |
493 | |
494 | |
495 int64_t LCodeGen::ToRepresentation_donotuse(LConstantOperand* op, | |
496 const Representation& r) const { | |
497 HConstant* constant = chunk_->LookupConstant(op); | |
498 int32_t value = constant->Integer32Value(); | |
499 if (r.IsInteger32()) return value; | |
500 DCHECK(r.IsSmiOrTagged()); | |
501 return reinterpret_cast<int64_t>(Smi::FromInt(value)); | |
502 } | |
503 | |
504 | |
505 Smi* LCodeGen::ToSmi(LConstantOperand* op) const { | |
506 HConstant* constant = chunk_->LookupConstant(op); | |
507 return Smi::FromInt(constant->Integer32Value()); | |
508 } | |
509 | |
510 | |
511 double LCodeGen::ToDouble(LConstantOperand* op) const { | |
512 HConstant* constant = chunk_->LookupConstant(op); | |
513 DCHECK(constant->HasDoubleValue()); | |
514 return constant->DoubleValue(); | |
515 } | |
516 | |
517 | |
518 Operand LCodeGen::ToOperand(LOperand* op) { | |
519 if (op->IsConstantOperand()) { | |
520 LConstantOperand* const_op = LConstantOperand::cast(op); | |
521 HConstant* constant = chunk()->LookupConstant(const_op); | |
522 Representation r = chunk_->LookupLiteralRepresentation(const_op); | |
523 if (r.IsSmi()) { | |
524 DCHECK(constant->HasSmiValue()); | |
525 return Operand(Smi::FromInt(constant->Integer32Value())); | |
526 } else if (r.IsInteger32()) { | |
527 DCHECK(constant->HasInteger32Value()); | |
528 return Operand(constant->Integer32Value()); | |
529 } else if (r.IsDouble()) { | |
530 Abort(kToOperandUnsupportedDoubleImmediate); | |
531 } | |
532 DCHECK(r.IsTagged()); | |
533 return Operand(constant->handle(isolate())); | |
534 } else if (op->IsRegister()) { | |
535 return Operand(ToRegister(op)); | |
536 } else if (op->IsDoubleRegister()) { | |
537 Abort(kToOperandIsDoubleRegisterUnimplemented); | |
538 return Operand((int64_t)0); | |
539 } | |
540 // Stack slots not implemented, use ToMemOperand instead. | |
541 UNREACHABLE(); | |
542 return Operand((int64_t)0); | |
543 } | |
544 | |
545 | |
546 static int ArgumentsOffsetWithoutFrame(int index) { | |
547 DCHECK(index < 0); | |
548 return -(index + 1) * kPointerSize; | |
549 } | |
550 | |
551 | |
552 MemOperand LCodeGen::ToMemOperand(LOperand* op) const { | |
553 DCHECK(!op->IsRegister()); | |
554 DCHECK(!op->IsDoubleRegister()); | |
555 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); | |
556 if (NeedsEagerFrame()) { | |
557 return MemOperand(fp, StackSlotOffset(op->index())); | |
558 } else { | |
559 // Retrieve parameter without eager stack-frame relative to the | |
560 // stack-pointer. | |
561 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index())); | |
562 } | |
563 } | |
564 | |
565 | |
566 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { | |
567 DCHECK(op->IsDoubleStackSlot()); | |
568 if (NeedsEagerFrame()) { | |
569 // return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize); | |
570 return MemOperand(fp, StackSlotOffset(op->index()) + kIntSize); | |
571 } else { | |
572 // Retrieve parameter without eager stack-frame relative to the | |
573 // stack-pointer. | |
574 // return MemOperand( | |
575 // sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); | |
576 return MemOperand( | |
577 sp, ArgumentsOffsetWithoutFrame(op->index()) + kIntSize); | |
578 } | |
579 } | |
580 | |
581 | |
582 void LCodeGen::WriteTranslation(LEnvironment* environment, | |
583 Translation* translation) { | |
584 if (environment == NULL) return; | |
585 | |
586 // The translation includes one command per value in the environment. | |
587 int translation_size = environment->translation_size(); | |
588 | |
589 WriteTranslation(environment->outer(), translation); | |
590 WriteTranslationFrame(environment, translation); | |
591 | |
592 int object_index = 0; | |
593 int dematerialized_index = 0; | |
594 for (int i = 0; i < translation_size; ++i) { | |
595 LOperand* value = environment->values()->at(i); | |
596 AddToTranslation( | |
597 environment, translation, value, environment->HasTaggedValueAt(i), | |
598 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index); | |
599 } | |
600 } | |
601 | |
602 | |
603 void LCodeGen::AddToTranslation(LEnvironment* environment, | |
604 Translation* translation, | |
605 LOperand* op, | |
606 bool is_tagged, | |
607 bool is_uint32, | |
608 int* object_index_pointer, | |
609 int* dematerialized_index_pointer) { | |
610 if (op == LEnvironment::materialization_marker()) { | |
611 int object_index = (*object_index_pointer)++; | |
612 if (environment->ObjectIsDuplicateAt(object_index)) { | |
613 int dupe_of = environment->ObjectDuplicateOfAt(object_index); | |
614 translation->DuplicateObject(dupe_of); | |
615 return; | |
616 } | |
617 int object_length = environment->ObjectLengthAt(object_index); | |
618 if (environment->ObjectIsArgumentsAt(object_index)) { | |
619 translation->BeginArgumentsObject(object_length); | |
620 } else { | |
621 translation->BeginCapturedObject(object_length); | |
622 } | |
623 int dematerialized_index = *dematerialized_index_pointer; | |
624 int env_offset = environment->translation_size() + dematerialized_index; | |
625 *dematerialized_index_pointer += object_length; | |
626 for (int i = 0; i < object_length; ++i) { | |
627 LOperand* value = environment->values()->at(env_offset + i); | |
628 AddToTranslation(environment, | |
629 translation, | |
630 value, | |
631 environment->HasTaggedValueAt(env_offset + i), | |
632 environment->HasUint32ValueAt(env_offset + i), | |
633 object_index_pointer, | |
634 dematerialized_index_pointer); | |
635 } | |
636 return; | |
637 } | |
638 | |
639 if (op->IsStackSlot()) { | |
640 int index = op->index(); | |
641 if (index >= 0) { | |
642 index += StandardFrameConstants::kFixedFrameSize / kPointerSize; | |
643 } | |
644 if (is_tagged) { | |
645 translation->StoreStackSlot(index); | |
646 } else if (is_uint32) { | |
647 translation->StoreUint32StackSlot(index); | |
648 } else { | |
649 translation->StoreInt32StackSlot(index); | |
650 } | |
651 } else if (op->IsDoubleStackSlot()) { | |
652 int index = op->index(); | |
653 if (index >= 0) { | |
654 index += StandardFrameConstants::kFixedFrameSize / kPointerSize; | |
655 } | |
656 translation->StoreDoubleStackSlot(index); | |
657 } else if (op->IsRegister()) { | |
658 Register reg = ToRegister(op); | |
659 if (is_tagged) { | |
660 translation->StoreRegister(reg); | |
661 } else if (is_uint32) { | |
662 translation->StoreUint32Register(reg); | |
663 } else { | |
664 translation->StoreInt32Register(reg); | |
665 } | |
666 } else if (op->IsDoubleRegister()) { | |
667 DoubleRegister reg = ToDoubleRegister(op); | |
668 translation->StoreDoubleRegister(reg); | |
669 } else if (op->IsConstantOperand()) { | |
670 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); | |
671 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); | |
672 translation->StoreLiteral(src_index); | |
673 } else { | |
674 UNREACHABLE(); | |
675 } | |
676 } | |
677 | |
678 | |
679 void LCodeGen::CallCode(Handle<Code> code, | |
680 RelocInfo::Mode mode, | |
681 LInstruction* instr) { | |
682 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); | |
683 } | |
684 | |
685 | |
686 void LCodeGen::CallCodeGeneric(Handle<Code> code, | |
687 RelocInfo::Mode mode, | |
688 LInstruction* instr, | |
689 SafepointMode safepoint_mode) { | |
690 DCHECK(instr != NULL); | |
691 __ Call(code, mode); | |
692 RecordSafepointWithLazyDeopt(instr, safepoint_mode); | |
693 } | |
694 | |
695 | |
696 void LCodeGen::CallRuntime(const Runtime::Function* function, | |
697 int num_arguments, | |
698 LInstruction* instr, | |
699 SaveFPRegsMode save_doubles) { | |
700 DCHECK(instr != NULL); | |
701 | |
702 __ CallRuntime(function, num_arguments, save_doubles); | |
703 | |
704 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); | |
705 } | |
706 | |
707 | |
708 void LCodeGen::LoadContextFromDeferred(LOperand* context) { | |
709 if (context->IsRegister()) { | |
710 __ Move(cp, ToRegister(context)); | |
711 } else if (context->IsStackSlot()) { | |
712 __ ld(cp, ToMemOperand(context)); | |
713 } else if (context->IsConstantOperand()) { | |
714 HConstant* constant = | |
715 chunk_->LookupConstant(LConstantOperand::cast(context)); | |
716 __ li(cp, Handle<Object>::cast(constant->handle(isolate()))); | |
717 } else { | |
718 UNREACHABLE(); | |
719 } | |
720 } | |
721 | |
722 | |
723 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, | |
724 int argc, | |
725 LInstruction* instr, | |
726 LOperand* context) { | |
727 LoadContextFromDeferred(context); | |
728 __ CallRuntimeSaveDoubles(id); | |
729 RecordSafepointWithRegisters( | |
730 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); | |
731 } | |
732 | |
733 | |
734 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, | |
735 Safepoint::DeoptMode mode) { | |
736 environment->set_has_been_used(); | |
737 if (!environment->HasBeenRegistered()) { | |
738 // Physical stack frame layout: | |
739 // -x ............. -4 0 ..................................... y | |
740 // [incoming arguments] [spill slots] [pushed outgoing arguments] | |
741 | |
742 // Layout of the environment: | |
743 // 0 ..................................................... size-1 | |
744 // [parameters] [locals] [expression stack including arguments] | |
745 | |
746 // Layout of the translation: | |
747 // 0 ........................................................ size - 1 + 4 | |
748 // [expression stack including arguments] [locals] [4 words] [parameters] | |
749 // |>------------ translation_size ------------<| | |
750 | |
751 int frame_count = 0; | |
752 int jsframe_count = 0; | |
753 for (LEnvironment* e = environment; e != NULL; e = e->outer()) { | |
754 ++frame_count; | |
755 if (e->frame_type() == JS_FUNCTION) { | |
756 ++jsframe_count; | |
757 } | |
758 } | |
759 Translation translation(&translations_, frame_count, jsframe_count, zone()); | |
760 WriteTranslation(environment, &translation); | |
761 int deoptimization_index = deoptimizations_.length(); | |
762 int pc_offset = masm()->pc_offset(); | |
763 environment->Register(deoptimization_index, | |
764 translation.index(), | |
765 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | |
766 deoptimizations_.Add(environment, zone()); | |
767 } | |
768 } | |
769 | |
770 | |
771 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, | |
772 Deoptimizer::DeoptReason deopt_reason, | |
773 Deoptimizer::BailoutType bailout_type, | |
774 Register src1, const Operand& src2) { | |
775 LEnvironment* environment = instr->environment(); | |
776 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | |
777 DCHECK(environment->HasBeenRegistered()); | |
778 int id = environment->deoptimization_index(); | |
779 Address entry = | |
780 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); | |
781 if (entry == NULL) { | |
782 Abort(kBailoutWasNotPrepared); | |
783 return; | |
784 } | |
785 | |
786 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { | |
787 Register scratch = scratch0(); | |
788 ExternalReference count = ExternalReference::stress_deopt_count(isolate()); | |
789 Label no_deopt; | |
790 __ Push(a1, scratch); | |
791 __ li(scratch, Operand(count)); | |
792 __ lw(a1, MemOperand(scratch)); | |
793 __ Subu(a1, a1, Operand(1)); | |
794 __ Branch(&no_deopt, ne, a1, Operand(zero_reg)); | |
795 __ li(a1, Operand(FLAG_deopt_every_n_times)); | |
796 __ sw(a1, MemOperand(scratch)); | |
797 __ Pop(a1, scratch); | |
798 | |
799 __ Call(entry, RelocInfo::RUNTIME_ENTRY); | |
800 __ bind(&no_deopt); | |
801 __ sw(a1, MemOperand(scratch)); | |
802 __ Pop(a1, scratch); | |
803 } | |
804 | |
805 if (info()->ShouldTrapOnDeopt()) { | |
806 Label skip; | |
807 if (condition != al) { | |
808 __ Branch(&skip, NegateCondition(condition), src1, src2); | |
809 } | |
810 __ stop("trap_on_deopt"); | |
811 __ bind(&skip); | |
812 } | |
813 | |
814 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason); | |
815 | |
816 DCHECK(info()->IsStub() || frame_is_built_); | |
817 // Go through jump table if we need to handle condition, build frame, or | |
818 // restore caller doubles. | |
819 if (condition == al && frame_is_built_ && | |
820 !info()->saves_caller_doubles()) { | |
821 DeoptComment(deopt_info); | |
822 __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2); | |
823 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id); | |
824 } else { | |
825 Deoptimizer::JumpTableEntry* table_entry = | |
826 new (zone()) Deoptimizer::JumpTableEntry( | |
827 entry, deopt_info, bailout_type, !frame_is_built_); | |
828 // We often have several deopts to the same entry, reuse the last | |
829 // jump entry if this is the case. | |
830 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() || | |
831 jump_table_.is_empty() || | |
832 !table_entry->IsEquivalentTo(*jump_table_.last())) { | |
833 jump_table_.Add(table_entry, zone()); | |
834 } | |
835 __ Branch(&jump_table_.last()->label, condition, src1, src2); | |
836 } | |
837 } | |
838 | |
839 | |
840 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, | |
841 Deoptimizer::DeoptReason deopt_reason, | |
842 Register src1, const Operand& src2) { | |
843 Deoptimizer::BailoutType bailout_type = info()->IsStub() | |
844 ? Deoptimizer::LAZY | |
845 : Deoptimizer::EAGER; | |
846 DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2); | |
847 } | |
848 | |
849 | |
850 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { | |
851 int length = deoptimizations_.length(); | |
852 if (length == 0) return; | |
853 Handle<DeoptimizationInputData> data = | |
854 DeoptimizationInputData::New(isolate(), length, TENURED); | |
855 | |
856 Handle<ByteArray> translations = | |
857 translations_.CreateByteArray(isolate()->factory()); | |
858 data->SetTranslationByteArray(*translations); | |
859 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); | |
860 data->SetOptimizationId(Smi::FromInt(info_->optimization_id())); | |
861 if (info_->IsOptimizing()) { | |
862 // Reference to shared function info does not change between phases. | |
863 AllowDeferredHandleDereference allow_handle_dereference; | |
864 data->SetSharedFunctionInfo(*info_->shared_info()); | |
865 } else { | |
866 data->SetSharedFunctionInfo(Smi::FromInt(0)); | |
867 } | |
868 data->SetWeakCellCache(Smi::FromInt(0)); | |
869 | |
870 Handle<FixedArray> literals = | |
871 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); | |
872 { AllowDeferredHandleDereference copy_handles; | |
873 for (int i = 0; i < deoptimization_literals_.length(); i++) { | |
874 literals->set(i, *deoptimization_literals_[i]); | |
875 } | |
876 data->SetLiteralArray(*literals); | |
877 } | |
878 | |
879 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt())); | |
880 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); | |
881 | |
882 // Populate the deoptimization entries. | |
883 for (int i = 0; i < length; i++) { | |
884 LEnvironment* env = deoptimizations_[i]; | |
885 data->SetAstId(i, env->ast_id()); | |
886 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index())); | |
887 data->SetArgumentsStackHeight(i, | |
888 Smi::FromInt(env->arguments_stack_height())); | |
889 data->SetPc(i, Smi::FromInt(env->pc_offset())); | |
890 } | |
891 code->set_deoptimization_data(*data); | |
892 } | |
893 | |
894 | |
895 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { | |
896 DCHECK_EQ(0, deoptimization_literals_.length()); | |
897 for (auto function : chunk()->inlined_functions()) { | |
898 DefineDeoptimizationLiteral(function); | |
899 } | |
900 inlined_function_count_ = deoptimization_literals_.length(); | |
901 } | |
902 | |
903 | |
904 void LCodeGen::RecordSafepointWithLazyDeopt( | |
905 LInstruction* instr, SafepointMode safepoint_mode) { | |
906 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { | |
907 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); | |
908 } else { | |
909 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | |
910 RecordSafepointWithRegisters( | |
911 instr->pointer_map(), 0, Safepoint::kLazyDeopt); | |
912 } | |
913 } | |
914 | |
915 | |
916 void LCodeGen::RecordSafepoint( | |
917 LPointerMap* pointers, | |
918 Safepoint::Kind kind, | |
919 int arguments, | |
920 Safepoint::DeoptMode deopt_mode) { | |
921 DCHECK(expected_safepoint_kind_ == kind); | |
922 | |
923 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); | |
924 Safepoint safepoint = safepoints_.DefineSafepoint(masm(), | |
925 kind, arguments, deopt_mode); | |
926 for (int i = 0; i < operands->length(); i++) { | |
927 LOperand* pointer = operands->at(i); | |
928 if (pointer->IsStackSlot()) { | |
929 safepoint.DefinePointerSlot(pointer->index(), zone()); | |
930 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { | |
931 safepoint.DefinePointerRegister(ToRegister(pointer), zone()); | |
932 } | |
933 } | |
934 } | |
935 | |
936 | |
937 void LCodeGen::RecordSafepoint(LPointerMap* pointers, | |
938 Safepoint::DeoptMode deopt_mode) { | |
939 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode); | |
940 } | |
941 | |
942 | |
943 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { | |
944 LPointerMap empty_pointers(zone()); | |
945 RecordSafepoint(&empty_pointers, deopt_mode); | |
946 } | |
947 | |
948 | |
949 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, | |
950 int arguments, | |
951 Safepoint::DeoptMode deopt_mode) { | |
952 RecordSafepoint( | |
953 pointers, Safepoint::kWithRegisters, arguments, deopt_mode); | |
954 } | |
955 | |
956 | |
957 void LCodeGen::RecordAndWritePosition(int position) { | |
958 if (position == RelocInfo::kNoPosition) return; | |
959 masm()->positions_recorder()->RecordPosition(position); | |
960 masm()->positions_recorder()->WriteRecordedPositions(); | |
961 } | |
962 | |
963 | |
964 static const char* LabelType(LLabel* label) { | |
965 if (label->is_loop_header()) return " (loop header)"; | |
966 if (label->is_osr_entry()) return " (OSR entry)"; | |
967 return ""; | |
968 } | |
969 | |
970 | |
971 void LCodeGen::DoLabel(LLabel* label) { | |
972 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", | |
973 current_instruction_, | |
974 label->hydrogen_value()->id(), | |
975 label->block_id(), | |
976 LabelType(label)); | |
977 __ bind(label->label()); | |
978 current_block_ = label->block_id(); | |
979 DoGap(label); | |
980 } | |
981 | |
982 | |
983 void LCodeGen::DoParallelMove(LParallelMove* move) { | |
984 resolver_.Resolve(move); | |
985 } | |
986 | |
987 | |
988 void LCodeGen::DoGap(LGap* gap) { | |
989 for (int i = LGap::FIRST_INNER_POSITION; | |
990 i <= LGap::LAST_INNER_POSITION; | |
991 i++) { | |
992 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); | |
993 LParallelMove* move = gap->GetParallelMove(inner_pos); | |
994 if (move != NULL) DoParallelMove(move); | |
995 } | |
996 } | |
997 | |
998 | |
999 void LCodeGen::DoInstructionGap(LInstructionGap* instr) { | |
1000 DoGap(instr); | |
1001 } | |
1002 | |
1003 | |
1004 void LCodeGen::DoParameter(LParameter* instr) { | |
1005 // Nothing to do. | |
1006 } | |
1007 | |
1008 | |
1009 void LCodeGen::DoCallStub(LCallStub* instr) { | |
1010 DCHECK(ToRegister(instr->context()).is(cp)); | |
1011 DCHECK(ToRegister(instr->result()).is(v0)); | |
1012 switch (instr->hydrogen()->major_key()) { | |
1013 case CodeStub::RegExpExec: { | |
1014 RegExpExecStub stub(isolate()); | |
1015 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | |
1016 break; | |
1017 } | |
1018 case CodeStub::SubString: { | |
1019 SubStringStub stub(isolate()); | |
1020 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | |
1021 break; | |
1022 } | |
1023 default: | |
1024 UNREACHABLE(); | |
1025 } | |
1026 } | |
1027 | |
1028 | |
1029 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { | |
1030 GenerateOsrPrologue(); | |
1031 } | |
1032 | |
1033 | |
1034 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { | |
1035 Register dividend = ToRegister(instr->dividend()); | |
1036 int32_t divisor = instr->divisor(); | |
1037 DCHECK(dividend.is(ToRegister(instr->result()))); | |
1038 | |
1039 // Theoretically, a variation of the branch-free code for integer division by | |
1040 // a power of 2 (calculating the remainder via an additional multiplication | |
1041 // (which gets simplified to an 'and') and subtraction) should be faster, and | |
1042 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to | |
1043 // indicate that positive dividends are heavily favored, so the branching | |
1044 // version performs better. | |
1045 HMod* hmod = instr->hydrogen(); | |
1046 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | |
1047 Label dividend_is_not_negative, done; | |
1048 | |
1049 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { | |
1050 __ Branch(÷nd_is_not_negative, ge, dividend, Operand(zero_reg)); | |
1051 // Note: The code below even works when right contains kMinInt. | |
1052 __ dsubu(dividend, zero_reg, dividend); | |
1053 __ And(dividend, dividend, Operand(mask)); | |
1054 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
1055 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend, | |
1056 Operand(zero_reg)); | |
1057 } | |
1058 __ Branch(USE_DELAY_SLOT, &done); | |
1059 __ dsubu(dividend, zero_reg, dividend); | |
1060 } | |
1061 | |
1062 __ bind(÷nd_is_not_negative); | |
1063 __ And(dividend, dividend, Operand(mask)); | |
1064 __ bind(&done); | |
1065 } | |
1066 | |
1067 | |
1068 void LCodeGen::DoModByConstI(LModByConstI* instr) { | |
1069 Register dividend = ToRegister(instr->dividend()); | |
1070 int32_t divisor = instr->divisor(); | |
1071 Register result = ToRegister(instr->result()); | |
1072 DCHECK(!dividend.is(result)); | |
1073 | |
1074 if (divisor == 0) { | |
1075 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); | |
1076 return; | |
1077 } | |
1078 | |
1079 __ TruncatingDiv(result, dividend, Abs(divisor)); | |
1080 __ Dmul(result, result, Operand(Abs(divisor))); | |
1081 __ Dsubu(result, dividend, Operand(result)); | |
1082 | |
1083 // Check for negative zero. | |
1084 HMod* hmod = instr->hydrogen(); | |
1085 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
1086 Label remainder_not_zero; | |
1087 __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg)); | |
1088 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, dividend, | |
1089 Operand(zero_reg)); | |
1090 __ bind(&remainder_not_zero); | |
1091 } | |
1092 } | |
1093 | |
1094 | |
1095 void LCodeGen::DoModI(LModI* instr) { | |
1096 HMod* hmod = instr->hydrogen(); | |
1097 const Register left_reg = ToRegister(instr->left()); | |
1098 const Register right_reg = ToRegister(instr->right()); | |
1099 const Register result_reg = ToRegister(instr->result()); | |
1100 | |
1101 // div runs in the background while we check for special cases. | |
1102 __ Dmod(result_reg, left_reg, right_reg); | |
1103 | |
1104 Label done; | |
1105 // Check for x % 0, we have to deopt in this case because we can't return a | |
1106 // NaN. | |
1107 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { | |
1108 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, right_reg, | |
1109 Operand(zero_reg)); | |
1110 } | |
1111 | |
1112 // Check for kMinInt % -1, div will return kMinInt, which is not what we | |
1113 // want. We have to deopt if we care about -0, because we can't return that. | |
1114 if (hmod->CheckFlag(HValue::kCanOverflow)) { | |
1115 Label no_overflow_possible; | |
1116 __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt)); | |
1117 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
1118 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, right_reg, Operand(-1)); | |
1119 } else { | |
1120 __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1)); | |
1121 __ Branch(USE_DELAY_SLOT, &done); | |
1122 __ mov(result_reg, zero_reg); | |
1123 } | |
1124 __ bind(&no_overflow_possible); | |
1125 } | |
1126 | |
1127 // If we care about -0, test if the dividend is <0 and the result is 0. | |
1128 __ Branch(&done, ge, left_reg, Operand(zero_reg)); | |
1129 | |
1130 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
1131 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result_reg, | |
1132 Operand(zero_reg)); | |
1133 } | |
1134 __ bind(&done); | |
1135 } | |
1136 | |
1137 | |
1138 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { | |
1139 Register dividend = ToRegister(instr->dividend()); | |
1140 int32_t divisor = instr->divisor(); | |
1141 Register result = ToRegister(instr->result()); | |
1142 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); | |
1143 DCHECK(!result.is(dividend)); | |
1144 | |
1145 // Check for (0 / -x) that will produce negative zero. | |
1146 HDiv* hdiv = instr->hydrogen(); | |
1147 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | |
1148 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend, | |
1149 Operand(zero_reg)); | |
1150 } | |
1151 // Check for (kMinInt / -1). | |
1152 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { | |
1153 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, dividend, Operand(kMinInt)); | |
1154 } | |
1155 // Deoptimize if remainder will not be 0. | |
1156 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && | |
1157 divisor != 1 && divisor != -1) { | |
1158 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | |
1159 __ And(at, dividend, Operand(mask)); | |
1160 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, at, Operand(zero_reg)); | |
1161 } | |
1162 | |
1163 if (divisor == -1) { // Nice shortcut, not needed for correctness. | |
1164 __ Dsubu(result, zero_reg, dividend); | |
1165 return; | |
1166 } | |
1167 uint16_t shift = WhichPowerOf2Abs(divisor); | |
1168 if (shift == 0) { | |
1169 __ Move(result, dividend); | |
1170 } else if (shift == 1) { | |
1171 __ dsrl32(result, dividend, 31); | |
1172 __ Daddu(result, dividend, Operand(result)); | |
1173 } else { | |
1174 __ dsra32(result, dividend, 31); | |
1175 __ dsrl32(result, result, 32 - shift); | |
1176 __ Daddu(result, dividend, Operand(result)); | |
1177 } | |
1178 if (shift > 0) __ dsra(result, result, shift); | |
1179 if (divisor < 0) __ Dsubu(result, zero_reg, result); | |
1180 } | |
1181 | |
1182 | |
1183 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { | |
1184 Register dividend = ToRegister(instr->dividend()); | |
1185 int32_t divisor = instr->divisor(); | |
1186 Register result = ToRegister(instr->result()); | |
1187 DCHECK(!dividend.is(result)); | |
1188 | |
1189 if (divisor == 0) { | |
1190 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); | |
1191 return; | |
1192 } | |
1193 | |
1194 // Check for (0 / -x) that will produce negative zero. | |
1195 HDiv* hdiv = instr->hydrogen(); | |
1196 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | |
1197 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend, | |
1198 Operand(zero_reg)); | |
1199 } | |
1200 | |
1201 __ TruncatingDiv(result, dividend, Abs(divisor)); | |
1202 if (divisor < 0) __ Subu(result, zero_reg, result); | |
1203 | |
1204 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | |
1205 __ Dmul(scratch0(), result, Operand(divisor)); | |
1206 __ Dsubu(scratch0(), scratch0(), dividend); | |
1207 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, scratch0(), | |
1208 Operand(zero_reg)); | |
1209 } | |
1210 } | |
1211 | |
1212 | |
1213 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. | |
1214 void LCodeGen::DoDivI(LDivI* instr) { | |
1215 HBinaryOperation* hdiv = instr->hydrogen(); | |
1216 Register dividend = ToRegister(instr->dividend()); | |
1217 Register divisor = ToRegister(instr->divisor()); | |
1218 const Register result = ToRegister(instr->result()); | |
1219 | |
1220 // On MIPS div is asynchronous - it will run in the background while we | |
1221 // check for special cases. | |
1222 __ Div(result, dividend, divisor); | |
1223 | |
1224 // Check for x / 0. | |
1225 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | |
1226 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor, | |
1227 Operand(zero_reg)); | |
1228 } | |
1229 | |
1230 // Check for (0 / -x) that will produce negative zero. | |
1231 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
1232 Label left_not_zero; | |
1233 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); | |
1234 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor, | |
1235 Operand(zero_reg)); | |
1236 __ bind(&left_not_zero); | |
1237 } | |
1238 | |
1239 // Check for (kMinInt / -1). | |
1240 if (hdiv->CheckFlag(HValue::kCanOverflow) && | |
1241 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | |
1242 Label left_not_min_int; | |
1243 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); | |
1244 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1)); | |
1245 __ bind(&left_not_min_int); | |
1246 } | |
1247 | |
1248 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | |
1249 // Calculate remainder. | |
1250 Register remainder = ToRegister(instr->temp()); | |
1251 if (kArchVariant != kMips64r6) { | |
1252 __ mfhi(remainder); | |
1253 } else { | |
1254 __ dmod(remainder, dividend, divisor); | |
1255 } | |
1256 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, remainder, | |
1257 Operand(zero_reg)); | |
1258 } | |
1259 } | |
1260 | |
1261 | |
1262 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { | |
1263 DoubleRegister addend = ToDoubleRegister(instr->addend()); | |
1264 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); | |
1265 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); | |
1266 | |
1267 // This is computed in-place. | |
1268 DCHECK(addend.is(ToDoubleRegister(instr->result()))); | |
1269 | |
1270 __ Madd_d(addend, addend, multiplier, multiplicand, double_scratch0()); | |
1271 } | |
1272 | |
1273 | |
1274 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { | |
1275 Register dividend = ToRegister(instr->dividend()); | |
1276 Register result = ToRegister(instr->result()); | |
1277 int32_t divisor = instr->divisor(); | |
1278 Register scratch = result.is(dividend) ? scratch0() : dividend; | |
1279 DCHECK(!result.is(dividend) || !scratch.is(dividend)); | |
1280 | |
1281 // If the divisor is 1, return the dividend. | |
1282 if (divisor == 1) { | |
1283 __ Move(result, dividend); | |
1284 return; | |
1285 } | |
1286 | |
1287 // If the divisor is positive, things are easy: There can be no deopts and we | |
1288 // can simply do an arithmetic right shift. | |
1289 uint16_t shift = WhichPowerOf2Abs(divisor); | |
1290 if (divisor > 1) { | |
1291 __ dsra(result, dividend, shift); | |
1292 return; | |
1293 } | |
1294 | |
1295 // If the divisor is negative, we have to negate and handle edge cases. | |
1296 // Dividend can be the same register as result so save the value of it | |
1297 // for checking overflow. | |
1298 __ Move(scratch, dividend); | |
1299 | |
1300 __ Dsubu(result, zero_reg, dividend); | |
1301 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
1302 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg)); | |
1303 } | |
1304 | |
1305 __ Xor(scratch, scratch, result); | |
1306 // Dividing by -1 is basically negation, unless we overflow. | |
1307 if (divisor == -1) { | |
1308 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | |
1309 DeoptimizeIf(gt, instr, Deoptimizer::kOverflow, result, Operand(kMaxInt)); | |
1310 } | |
1311 return; | |
1312 } | |
1313 | |
1314 // If the negation could not overflow, simply shifting is OK. | |
1315 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | |
1316 __ dsra(result, result, shift); | |
1317 return; | |
1318 } | |
1319 | |
1320 Label no_overflow, done; | |
1321 __ Branch(&no_overflow, lt, scratch, Operand(zero_reg)); | |
1322 __ li(result, Operand(kMinInt / divisor), CONSTANT_SIZE); | |
1323 __ Branch(&done); | |
1324 __ bind(&no_overflow); | |
1325 __ dsra(result, result, shift); | |
1326 __ bind(&done); | |
1327 } | |
1328 | |
1329 | |
1330 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { | |
1331 Register dividend = ToRegister(instr->dividend()); | |
1332 int32_t divisor = instr->divisor(); | |
1333 Register result = ToRegister(instr->result()); | |
1334 DCHECK(!dividend.is(result)); | |
1335 | |
1336 if (divisor == 0) { | |
1337 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); | |
1338 return; | |
1339 } | |
1340 | |
1341 // Check for (0 / -x) that will produce negative zero. | |
1342 HMathFloorOfDiv* hdiv = instr->hydrogen(); | |
1343 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | |
1344 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend, | |
1345 Operand(zero_reg)); | |
1346 } | |
1347 | |
1348 // Easy case: We need no dynamic check for the dividend and the flooring | |
1349 // division is the same as the truncating division. | |
1350 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || | |
1351 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { | |
1352 __ TruncatingDiv(result, dividend, Abs(divisor)); | |
1353 if (divisor < 0) __ Dsubu(result, zero_reg, result); | |
1354 return; | |
1355 } | |
1356 | |
1357 // In the general case we may need to adjust before and after the truncating | |
1358 // division to get a flooring division. | |
1359 Register temp = ToRegister(instr->temp()); | |
1360 DCHECK(!temp.is(dividend) && !temp.is(result)); | |
1361 Label needs_adjustment, done; | |
1362 __ Branch(&needs_adjustment, divisor > 0 ? lt : gt, | |
1363 dividend, Operand(zero_reg)); | |
1364 __ TruncatingDiv(result, dividend, Abs(divisor)); | |
1365 if (divisor < 0) __ Dsubu(result, zero_reg, result); | |
1366 __ jmp(&done); | |
1367 __ bind(&needs_adjustment); | |
1368 __ Daddu(temp, dividend, Operand(divisor > 0 ? 1 : -1)); | |
1369 __ TruncatingDiv(result, temp, Abs(divisor)); | |
1370 if (divisor < 0) __ Dsubu(result, zero_reg, result); | |
1371 __ Dsubu(result, result, Operand(1)); | |
1372 __ bind(&done); | |
1373 } | |
1374 | |
1375 | |
1376 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. | |
1377 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { | |
1378 HBinaryOperation* hdiv = instr->hydrogen(); | |
1379 Register dividend = ToRegister(instr->dividend()); | |
1380 Register divisor = ToRegister(instr->divisor()); | |
1381 const Register result = ToRegister(instr->result()); | |
1382 | |
1383 // On MIPS div is asynchronous - it will run in the background while we | |
1384 // check for special cases. | |
1385 __ Ddiv(result, dividend, divisor); | |
1386 | |
1387 // Check for x / 0. | |
1388 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | |
1389 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor, | |
1390 Operand(zero_reg)); | |
1391 } | |
1392 | |
1393 // Check for (0 / -x) that will produce negative zero. | |
1394 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
1395 Label left_not_zero; | |
1396 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); | |
1397 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor, | |
1398 Operand(zero_reg)); | |
1399 __ bind(&left_not_zero); | |
1400 } | |
1401 | |
1402 // Check for (kMinInt / -1). | |
1403 if (hdiv->CheckFlag(HValue::kCanOverflow) && | |
1404 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | |
1405 Label left_not_min_int; | |
1406 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); | |
1407 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1)); | |
1408 __ bind(&left_not_min_int); | |
1409 } | |
1410 | |
1411 // We performed a truncating division. Correct the result if necessary. | |
1412 Label done; | |
1413 Register remainder = scratch0(); | |
1414 if (kArchVariant != kMips64r6) { | |
1415 __ mfhi(remainder); | |
1416 } else { | |
1417 __ dmod(remainder, dividend, divisor); | |
1418 } | |
1419 __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT); | |
1420 __ Xor(remainder, remainder, Operand(divisor)); | |
1421 __ Branch(&done, ge, remainder, Operand(zero_reg)); | |
1422 __ Dsubu(result, result, Operand(1)); | |
1423 __ bind(&done); | |
1424 } | |
1425 | |
1426 | |
1427 void LCodeGen::DoMulS(LMulS* instr) { | |
1428 Register scratch = scratch0(); | |
1429 Register result = ToRegister(instr->result()); | |
1430 // Note that result may alias left. | |
1431 Register left = ToRegister(instr->left()); | |
1432 LOperand* right_op = instr->right(); | |
1433 | |
1434 bool bailout_on_minus_zero = | |
1435 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | |
1436 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | |
1437 | |
1438 if (right_op->IsConstantOperand()) { | |
1439 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); | |
1440 | |
1441 if (bailout_on_minus_zero && (constant < 0)) { | |
1442 // The case of a null constant will be handled separately. | |
1443 // If constant is negative and left is null, the result should be -0. | |
1444 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg)); | |
1445 } | |
1446 | |
1447 switch (constant) { | |
1448 case -1: | |
1449 if (overflow) { | |
1450 __ DsubuAndCheckForOverflow(result, zero_reg, left, scratch); | |
1451 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch, | |
1452 Operand(zero_reg)); | |
1453 } else { | |
1454 __ Dsubu(result, zero_reg, left); | |
1455 } | |
1456 break; | |
1457 case 0: | |
1458 if (bailout_on_minus_zero) { | |
1459 // If left is strictly negative and the constant is null, the | |
1460 // result is -0. Deoptimize if required, otherwise return 0. | |
1461 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left, | |
1462 Operand(zero_reg)); | |
1463 } | |
1464 __ mov(result, zero_reg); | |
1465 break; | |
1466 case 1: | |
1467 // Nothing to do. | |
1468 __ Move(result, left); | |
1469 break; | |
1470 default: | |
1471 // Multiplying by powers of two and powers of two plus or minus | |
1472 // one can be done faster with shifted operands. | |
1473 // For other constants we emit standard code. | |
1474 int32_t mask = constant >> 31; | |
1475 uint32_t constant_abs = (constant + mask) ^ mask; | |
1476 | |
1477 if (base::bits::IsPowerOfTwo32(constant_abs)) { | |
1478 int32_t shift = WhichPowerOf2(constant_abs); | |
1479 __ dsll(result, left, shift); | |
1480 // Correct the sign of the result if the constant is negative. | |
1481 if (constant < 0) __ Dsubu(result, zero_reg, result); | |
1482 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) { | |
1483 int32_t shift = WhichPowerOf2(constant_abs - 1); | |
1484 __ dsll(scratch, left, shift); | |
1485 __ Daddu(result, scratch, left); | |
1486 // Correct the sign of the result if the constant is negative. | |
1487 if (constant < 0) __ Dsubu(result, zero_reg, result); | |
1488 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) { | |
1489 int32_t shift = WhichPowerOf2(constant_abs + 1); | |
1490 __ dsll(scratch, left, shift); | |
1491 __ Dsubu(result, scratch, left); | |
1492 // Correct the sign of the result if the constant is negative. | |
1493 if (constant < 0) __ Dsubu(result, zero_reg, result); | |
1494 } else { | |
1495 // Generate standard code. | |
1496 __ li(at, constant); | |
1497 __ Dmul(result, left, at); | |
1498 } | |
1499 } | |
1500 } else { | |
1501 DCHECK(right_op->IsRegister()); | |
1502 Register right = ToRegister(right_op); | |
1503 | |
1504 if (overflow) { | |
1505 // hi:lo = left * right. | |
1506 __ Dmulh(result, left, right); | |
1507 __ dsra32(scratch, result, 0); | |
1508 __ sra(at, result, 31); | |
1509 __ SmiTag(result); | |
1510 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at)); | |
1511 } else { | |
1512 __ SmiUntag(result, left); | |
1513 __ dmul(result, result, right); | |
1514 } | |
1515 | |
1516 if (bailout_on_minus_zero) { | |
1517 Label done; | |
1518 __ Xor(at, left, right); | |
1519 __ Branch(&done, ge, at, Operand(zero_reg)); | |
1520 // Bail out if the result is minus zero. | |
1521 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, | |
1522 Operand(zero_reg)); | |
1523 __ bind(&done); | |
1524 } | |
1525 } | |
1526 } | |
1527 | |
1528 | |
1529 void LCodeGen::DoMulI(LMulI* instr) { | |
1530 Register scratch = scratch0(); | |
1531 Register result = ToRegister(instr->result()); | |
1532 // Note that result may alias left. | |
1533 Register left = ToRegister(instr->left()); | |
1534 LOperand* right_op = instr->right(); | |
1535 | |
1536 bool bailout_on_minus_zero = | |
1537 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | |
1538 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | |
1539 | |
1540 if (right_op->IsConstantOperand()) { | |
1541 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); | |
1542 | |
1543 if (bailout_on_minus_zero && (constant < 0)) { | |
1544 // The case of a null constant will be handled separately. | |
1545 // If constant is negative and left is null, the result should be -0. | |
1546 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg)); | |
1547 } | |
1548 | |
1549 switch (constant) { | |
1550 case -1: | |
1551 if (overflow) { | |
1552 __ SubuAndCheckForOverflow(result, zero_reg, left, scratch); | |
1553 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch, | |
1554 Operand(zero_reg)); | |
1555 } else { | |
1556 __ Subu(result, zero_reg, left); | |
1557 } | |
1558 break; | |
1559 case 0: | |
1560 if (bailout_on_minus_zero) { | |
1561 // If left is strictly negative and the constant is null, the | |
1562 // result is -0. Deoptimize if required, otherwise return 0. | |
1563 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left, | |
1564 Operand(zero_reg)); | |
1565 } | |
1566 __ mov(result, zero_reg); | |
1567 break; | |
1568 case 1: | |
1569 // Nothing to do. | |
1570 __ Move(result, left); | |
1571 break; | |
1572 default: | |
1573 // Multiplying by powers of two and powers of two plus or minus | |
1574 // one can be done faster with shifted operands. | |
1575 // For other constants we emit standard code. | |
1576 int32_t mask = constant >> 31; | |
1577 uint32_t constant_abs = (constant + mask) ^ mask; | |
1578 | |
1579 if (base::bits::IsPowerOfTwo32(constant_abs)) { | |
1580 int32_t shift = WhichPowerOf2(constant_abs); | |
1581 __ sll(result, left, shift); | |
1582 // Correct the sign of the result if the constant is negative. | |
1583 if (constant < 0) __ Subu(result, zero_reg, result); | |
1584 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) { | |
1585 int32_t shift = WhichPowerOf2(constant_abs - 1); | |
1586 __ sll(scratch, left, shift); | |
1587 __ addu(result, scratch, left); | |
1588 // Correct the sign of the result if the constant is negative. | |
1589 if (constant < 0) __ Subu(result, zero_reg, result); | |
1590 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) { | |
1591 int32_t shift = WhichPowerOf2(constant_abs + 1); | |
1592 __ sll(scratch, left, shift); | |
1593 __ Subu(result, scratch, left); | |
1594 // Correct the sign of the result if the constant is negative. | |
1595 if (constant < 0) __ Subu(result, zero_reg, result); | |
1596 } else { | |
1597 // Generate standard code. | |
1598 __ li(at, constant); | |
1599 __ Mul(result, left, at); | |
1600 } | |
1601 } | |
1602 | |
1603 } else { | |
1604 DCHECK(right_op->IsRegister()); | |
1605 Register right = ToRegister(right_op); | |
1606 | |
1607 if (overflow) { | |
1608 // hi:lo = left * right. | |
1609 __ Dmul(result, left, right); | |
1610 __ dsra32(scratch, result, 0); | |
1611 __ sra(at, result, 31); | |
1612 | |
1613 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at)); | |
1614 } else { | |
1615 __ mul(result, left, right); | |
1616 } | |
1617 | |
1618 if (bailout_on_minus_zero) { | |
1619 Label done; | |
1620 __ Xor(at, left, right); | |
1621 __ Branch(&done, ge, at, Operand(zero_reg)); | |
1622 // Bail out if the result is minus zero. | |
1623 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, | |
1624 Operand(zero_reg)); | |
1625 __ bind(&done); | |
1626 } | |
1627 } | |
1628 } | |
1629 | |
1630 | |
1631 void LCodeGen::DoBitI(LBitI* instr) { | |
1632 LOperand* left_op = instr->left(); | |
1633 LOperand* right_op = instr->right(); | |
1634 DCHECK(left_op->IsRegister()); | |
1635 Register left = ToRegister(left_op); | |
1636 Register result = ToRegister(instr->result()); | |
1637 Operand right(no_reg); | |
1638 | |
1639 if (right_op->IsStackSlot()) { | |
1640 right = Operand(EmitLoadRegister(right_op, at)); | |
1641 } else { | |
1642 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand()); | |
1643 right = ToOperand(right_op); | |
1644 } | |
1645 | |
1646 switch (instr->op()) { | |
1647 case Token::BIT_AND: | |
1648 __ And(result, left, right); | |
1649 break; | |
1650 case Token::BIT_OR: | |
1651 __ Or(result, left, right); | |
1652 break; | |
1653 case Token::BIT_XOR: | |
1654 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) { | |
1655 __ Nor(result, zero_reg, left); | |
1656 } else { | |
1657 __ Xor(result, left, right); | |
1658 } | |
1659 break; | |
1660 default: | |
1661 UNREACHABLE(); | |
1662 break; | |
1663 } | |
1664 } | |
1665 | |
1666 | |
1667 void LCodeGen::DoShiftI(LShiftI* instr) { | |
1668 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so | |
1669 // result may alias either of them. | |
1670 LOperand* right_op = instr->right(); | |
1671 Register left = ToRegister(instr->left()); | |
1672 Register result = ToRegister(instr->result()); | |
1673 | |
1674 if (right_op->IsRegister()) { | |
1675 // No need to mask the right operand on MIPS, it is built into the variable | |
1676 // shift instructions. | |
1677 switch (instr->op()) { | |
1678 case Token::ROR: | |
1679 __ Ror(result, left, Operand(ToRegister(right_op))); | |
1680 break; | |
1681 case Token::SAR: | |
1682 __ srav(result, left, ToRegister(right_op)); | |
1683 break; | |
1684 case Token::SHR: | |
1685 __ srlv(result, left, ToRegister(right_op)); | |
1686 if (instr->can_deopt()) { | |
1687 // TODO(yy): (-1) >>> 0. anything else? | |
1688 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, result, | |
1689 Operand(zero_reg)); | |
1690 DeoptimizeIf(gt, instr, Deoptimizer::kNegativeValue, result, | |
1691 Operand(kMaxInt)); | |
1692 } | |
1693 break; | |
1694 case Token::SHL: | |
1695 __ sllv(result, left, ToRegister(right_op)); | |
1696 break; | |
1697 default: | |
1698 UNREACHABLE(); | |
1699 break; | |
1700 } | |
1701 } else { | |
1702 // Mask the right_op operand. | |
1703 int value = ToInteger32(LConstantOperand::cast(right_op)); | |
1704 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); | |
1705 switch (instr->op()) { | |
1706 case Token::ROR: | |
1707 if (shift_count != 0) { | |
1708 __ Ror(result, left, Operand(shift_count)); | |
1709 } else { | |
1710 __ Move(result, left); | |
1711 } | |
1712 break; | |
1713 case Token::SAR: | |
1714 if (shift_count != 0) { | |
1715 __ sra(result, left, shift_count); | |
1716 } else { | |
1717 __ Move(result, left); | |
1718 } | |
1719 break; | |
1720 case Token::SHR: | |
1721 if (shift_count != 0) { | |
1722 __ srl(result, left, shift_count); | |
1723 } else { | |
1724 if (instr->can_deopt()) { | |
1725 __ And(at, left, Operand(0x80000000)); | |
1726 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue, at, | |
1727 Operand(zero_reg)); | |
1728 } | |
1729 __ Move(result, left); | |
1730 } | |
1731 break; | |
1732 case Token::SHL: | |
1733 if (shift_count != 0) { | |
1734 if (instr->hydrogen_value()->representation().IsSmi()) { | |
1735 __ dsll(result, left, shift_count); | |
1736 } else { | |
1737 __ sll(result, left, shift_count); | |
1738 } | |
1739 } else { | |
1740 __ Move(result, left); | |
1741 } | |
1742 break; | |
1743 default: | |
1744 UNREACHABLE(); | |
1745 break; | |
1746 } | |
1747 } | |
1748 } | |
1749 | |
1750 | |
1751 void LCodeGen::DoSubS(LSubS* instr) { | |
1752 LOperand* left = instr->left(); | |
1753 LOperand* right = instr->right(); | |
1754 LOperand* result = instr->result(); | |
1755 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | |
1756 | |
1757 if (!can_overflow) { | |
1758 DCHECK(right->IsRegister() || right->IsConstantOperand()); | |
1759 __ Dsubu(ToRegister(result), ToRegister(left), ToOperand(right)); | |
1760 } else { // can_overflow. | |
1761 Register overflow = scratch0(); | |
1762 Register scratch = scratch1(); | |
1763 DCHECK(right->IsRegister() || right->IsConstantOperand()); | |
1764 __ DsubuAndCheckForOverflow(ToRegister(result), ToRegister(left), | |
1765 ToOperand(right), overflow, scratch); | |
1766 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow, | |
1767 Operand(zero_reg)); | |
1768 } | |
1769 } | |
1770 | |
1771 | |
1772 void LCodeGen::DoSubI(LSubI* instr) { | |
1773 LOperand* left = instr->left(); | |
1774 LOperand* right = instr->right(); | |
1775 LOperand* result = instr->result(); | |
1776 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | |
1777 | |
1778 if (!can_overflow) { | |
1779 DCHECK(right->IsRegister() || right->IsConstantOperand()); | |
1780 __ Subu(ToRegister(result), ToRegister(left), ToOperand(right)); | |
1781 } else { // can_overflow. | |
1782 Register overflow = scratch0(); | |
1783 Register scratch = scratch1(); | |
1784 DCHECK(right->IsRegister() || right->IsConstantOperand()); | |
1785 __ SubuAndCheckForOverflow(ToRegister(result), ToRegister(left), | |
1786 ToOperand(right), overflow, scratch); | |
1787 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow, | |
1788 Operand(zero_reg)); | |
1789 } | |
1790 } | |
1791 | |
1792 | |
1793 void LCodeGen::DoConstantI(LConstantI* instr) { | |
1794 __ li(ToRegister(instr->result()), Operand(instr->value())); | |
1795 } | |
1796 | |
1797 | |
1798 void LCodeGen::DoConstantS(LConstantS* instr) { | |
1799 __ li(ToRegister(instr->result()), Operand(instr->value())); | |
1800 } | |
1801 | |
1802 | |
1803 void LCodeGen::DoConstantD(LConstantD* instr) { | |
1804 DCHECK(instr->result()->IsDoubleRegister()); | |
1805 DoubleRegister result = ToDoubleRegister(instr->result()); | |
1806 double v = instr->value(); | |
1807 __ Move(result, v); | |
1808 } | |
1809 | |
1810 | |
1811 void LCodeGen::DoConstantE(LConstantE* instr) { | |
1812 __ li(ToRegister(instr->result()), Operand(instr->value())); | |
1813 } | |
1814 | |
1815 | |
1816 void LCodeGen::DoConstantT(LConstantT* instr) { | |
1817 Handle<Object> object = instr->value(isolate()); | |
1818 AllowDeferredHandleDereference smi_check; | |
1819 __ li(ToRegister(instr->result()), object); | |
1820 } | |
1821 | |
1822 | |
1823 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) { | |
1824 Register result = ToRegister(instr->result()); | |
1825 Register map = ToRegister(instr->value()); | |
1826 __ EnumLength(result, map); | |
1827 } | |
1828 | |
1829 | |
1830 void LCodeGen::DoDateField(LDateField* instr) { | |
1831 Register object = ToRegister(instr->date()); | |
1832 Register result = ToRegister(instr->result()); | |
1833 Register scratch = ToRegister(instr->temp()); | |
1834 Smi* index = instr->index(); | |
1835 DCHECK(object.is(a0)); | |
1836 DCHECK(result.is(v0)); | |
1837 DCHECK(!scratch.is(scratch0())); | |
1838 DCHECK(!scratch.is(object)); | |
1839 | |
1840 if (index->value() == 0) { | |
1841 __ ld(result, FieldMemOperand(object, JSDate::kValueOffset)); | |
1842 } else { | |
1843 Label runtime, done; | |
1844 if (index->value() < JSDate::kFirstUncachedField) { | |
1845 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); | |
1846 __ li(scratch, Operand(stamp)); | |
1847 __ ld(scratch, MemOperand(scratch)); | |
1848 __ ld(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); | |
1849 __ Branch(&runtime, ne, scratch, Operand(scratch0())); | |
1850 __ ld(result, FieldMemOperand(object, JSDate::kValueOffset + | |
1851 kPointerSize * index->value())); | |
1852 __ jmp(&done); | |
1853 } | |
1854 __ bind(&runtime); | |
1855 __ PrepareCallCFunction(2, scratch); | |
1856 __ li(a1, Operand(index)); | |
1857 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); | |
1858 __ bind(&done); | |
1859 } | |
1860 } | |
1861 | |
1862 | |
1863 MemOperand LCodeGen::BuildSeqStringOperand(Register string, | |
1864 LOperand* index, | |
1865 String::Encoding encoding) { | |
1866 if (index->IsConstantOperand()) { | |
1867 int offset = ToInteger32(LConstantOperand::cast(index)); | |
1868 if (encoding == String::TWO_BYTE_ENCODING) { | |
1869 offset *= kUC16Size; | |
1870 } | |
1871 STATIC_ASSERT(kCharSize == 1); | |
1872 return FieldMemOperand(string, SeqString::kHeaderSize + offset); | |
1873 } | |
1874 Register scratch = scratch0(); | |
1875 DCHECK(!scratch.is(string)); | |
1876 DCHECK(!scratch.is(ToRegister(index))); | |
1877 if (encoding == String::ONE_BYTE_ENCODING) { | |
1878 __ Daddu(scratch, string, ToRegister(index)); | |
1879 } else { | |
1880 STATIC_ASSERT(kUC16Size == 2); | |
1881 __ dsll(scratch, ToRegister(index), 1); | |
1882 __ Daddu(scratch, string, scratch); | |
1883 } | |
1884 return FieldMemOperand(scratch, SeqString::kHeaderSize); | |
1885 } | |
1886 | |
1887 | |
1888 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { | |
1889 String::Encoding encoding = instr->hydrogen()->encoding(); | |
1890 Register string = ToRegister(instr->string()); | |
1891 Register result = ToRegister(instr->result()); | |
1892 | |
1893 if (FLAG_debug_code) { | |
1894 Register scratch = scratch0(); | |
1895 __ ld(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); | |
1896 __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | |
1897 | |
1898 __ And(scratch, scratch, | |
1899 Operand(kStringRepresentationMask | kStringEncodingMask)); | |
1900 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; | |
1901 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; | |
1902 __ Dsubu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING | |
1903 ? one_byte_seq_type : two_byte_seq_type)); | |
1904 __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg)); | |
1905 } | |
1906 | |
1907 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); | |
1908 if (encoding == String::ONE_BYTE_ENCODING) { | |
1909 __ lbu(result, operand); | |
1910 } else { | |
1911 __ lhu(result, operand); | |
1912 } | |
1913 } | |
1914 | |
1915 | |
1916 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { | |
1917 String::Encoding encoding = instr->hydrogen()->encoding(); | |
1918 Register string = ToRegister(instr->string()); | |
1919 Register value = ToRegister(instr->value()); | |
1920 | |
1921 if (FLAG_debug_code) { | |
1922 Register scratch = scratch0(); | |
1923 Register index = ToRegister(instr->index()); | |
1924 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; | |
1925 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; | |
1926 int encoding_mask = | |
1927 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING | |
1928 ? one_byte_seq_type : two_byte_seq_type; | |
1929 __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask); | |
1930 } | |
1931 | |
1932 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); | |
1933 if (encoding == String::ONE_BYTE_ENCODING) { | |
1934 __ sb(value, operand); | |
1935 } else { | |
1936 __ sh(value, operand); | |
1937 } | |
1938 } | |
1939 | |
1940 | |
1941 void LCodeGen::DoAddE(LAddE* instr) { | |
1942 LOperand* result = instr->result(); | |
1943 LOperand* left = instr->left(); | |
1944 LOperand* right = instr->right(); | |
1945 | |
1946 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)); | |
1947 DCHECK(right->IsRegister() || right->IsConstantOperand()); | |
1948 __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right)); | |
1949 } | |
1950 | |
1951 | |
1952 void LCodeGen::DoAddS(LAddS* instr) { | |
1953 LOperand* left = instr->left(); | |
1954 LOperand* right = instr->right(); | |
1955 LOperand* result = instr->result(); | |
1956 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | |
1957 | |
1958 if (!can_overflow) { | |
1959 DCHECK(right->IsRegister() || right->IsConstantOperand()); | |
1960 __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right)); | |
1961 } else { // can_overflow. | |
1962 Register overflow = scratch0(); | |
1963 Register scratch = scratch1(); | |
1964 DCHECK(right->IsRegister() || right->IsConstantOperand()); | |
1965 __ DadduAndCheckForOverflow(ToRegister(result), ToRegister(left), | |
1966 ToOperand(right), overflow, scratch); | |
1967 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow, | |
1968 Operand(zero_reg)); | |
1969 } | |
1970 } | |
1971 | |
1972 | |
1973 void LCodeGen::DoAddI(LAddI* instr) { | |
1974 LOperand* left = instr->left(); | |
1975 LOperand* right = instr->right(); | |
1976 LOperand* result = instr->result(); | |
1977 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | |
1978 | |
1979 if (!can_overflow) { | |
1980 DCHECK(right->IsRegister() || right->IsConstantOperand()); | |
1981 __ Addu(ToRegister(result), ToRegister(left), ToOperand(right)); | |
1982 } else { // can_overflow. | |
1983 Register overflow = scratch0(); | |
1984 Register scratch = scratch1(); | |
1985 DCHECK(right->IsRegister() || right->IsConstantOperand()); | |
1986 __ AdduAndCheckForOverflow(ToRegister(result), ToRegister(left), | |
1987 ToOperand(right), overflow, scratch); | |
1988 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow, | |
1989 Operand(zero_reg)); | |
1990 } | |
1991 } | |
1992 | |
1993 | |
1994 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { | |
1995 LOperand* left = instr->left(); | |
1996 LOperand* right = instr->right(); | |
1997 HMathMinMax::Operation operation = instr->hydrogen()->operation(); | |
1998 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; | |
1999 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { | |
2000 Register left_reg = ToRegister(left); | |
2001 Register right_reg = EmitLoadRegister(right, scratch0()); | |
2002 Register result_reg = ToRegister(instr->result()); | |
2003 Label return_right, done; | |
2004 Register scratch = scratch1(); | |
2005 __ Slt(scratch, left_reg, Operand(right_reg)); | |
2006 if (condition == ge) { | |
2007 __ Movz(result_reg, left_reg, scratch); | |
2008 __ Movn(result_reg, right_reg, scratch); | |
2009 } else { | |
2010 DCHECK(condition == le); | |
2011 __ Movn(result_reg, left_reg, scratch); | |
2012 __ Movz(result_reg, right_reg, scratch); | |
2013 } | |
2014 } else { | |
2015 DCHECK(instr->hydrogen()->representation().IsDouble()); | |
2016 FPURegister left_reg = ToDoubleRegister(left); | |
2017 FPURegister right_reg = ToDoubleRegister(right); | |
2018 FPURegister result_reg = ToDoubleRegister(instr->result()); | |
2019 Label check_nan_left, check_zero, return_left, return_right, done; | |
2020 __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg); | |
2021 __ BranchF(&return_left, NULL, condition, left_reg, right_reg); | |
2022 __ Branch(&return_right); | |
2023 | |
2024 __ bind(&check_zero); | |
2025 // left == right != 0. | |
2026 __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero); | |
2027 // At this point, both left and right are either 0 or -0. | |
2028 if (operation == HMathMinMax::kMathMin) { | |
2029 __ neg_d(left_reg, left_reg); | |
2030 __ sub_d(result_reg, left_reg, right_reg); | |
2031 __ neg_d(result_reg, result_reg); | |
2032 } else { | |
2033 __ add_d(result_reg, left_reg, right_reg); | |
2034 } | |
2035 __ Branch(&done); | |
2036 | |
2037 __ bind(&check_nan_left); | |
2038 // left == NaN. | |
2039 __ BranchF(NULL, &return_left, eq, left_reg, left_reg); | |
2040 __ bind(&return_right); | |
2041 if (!right_reg.is(result_reg)) { | |
2042 __ mov_d(result_reg, right_reg); | |
2043 } | |
2044 __ Branch(&done); | |
2045 | |
2046 __ bind(&return_left); | |
2047 if (!left_reg.is(result_reg)) { | |
2048 __ mov_d(result_reg, left_reg); | |
2049 } | |
2050 __ bind(&done); | |
2051 } | |
2052 } | |
2053 | |
2054 | |
2055 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { | |
2056 DoubleRegister left = ToDoubleRegister(instr->left()); | |
2057 DoubleRegister right = ToDoubleRegister(instr->right()); | |
2058 DoubleRegister result = ToDoubleRegister(instr->result()); | |
2059 switch (instr->op()) { | |
2060 case Token::ADD: | |
2061 __ add_d(result, left, right); | |
2062 break; | |
2063 case Token::SUB: | |
2064 __ sub_d(result, left, right); | |
2065 break; | |
2066 case Token::MUL: | |
2067 __ mul_d(result, left, right); | |
2068 break; | |
2069 case Token::DIV: | |
2070 __ div_d(result, left, right); | |
2071 break; | |
2072 case Token::MOD: { | |
2073 // Save a0-a3 on the stack. | |
2074 RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit(); | |
2075 __ MultiPush(saved_regs); | |
2076 | |
2077 __ PrepareCallCFunction(0, 2, scratch0()); | |
2078 __ MovToFloatParameters(left, right); | |
2079 __ CallCFunction( | |
2080 ExternalReference::mod_two_doubles_operation(isolate()), | |
2081 0, 2); | |
2082 // Move the result in the double result register. | |
2083 __ MovFromFloatResult(result); | |
2084 | |
2085 // Restore saved register. | |
2086 __ MultiPop(saved_regs); | |
2087 break; | |
2088 } | |
2089 default: | |
2090 UNREACHABLE(); | |
2091 break; | |
2092 } | |
2093 } | |
2094 | |
2095 | |
2096 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { | |
2097 DCHECK(ToRegister(instr->context()).is(cp)); | |
2098 DCHECK(ToRegister(instr->left()).is(a1)); | |
2099 DCHECK(ToRegister(instr->right()).is(a0)); | |
2100 DCHECK(ToRegister(instr->result()).is(v0)); | |
2101 | |
2102 Handle<Code> code = | |
2103 CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code(); | |
2104 CallCode(code, RelocInfo::CODE_TARGET, instr); | |
2105 // Other arch use a nop here, to signal that there is no inlined | |
2106 // patchable code. Mips does not need the nop, since our marker | |
2107 // instruction (andi zero_reg) will never be used in normal code. | |
2108 } | |
2109 | |
2110 | |
2111 template<class InstrType> | |
2112 void LCodeGen::EmitBranch(InstrType instr, | |
2113 Condition condition, | |
2114 Register src1, | |
2115 const Operand& src2) { | |
2116 int left_block = instr->TrueDestination(chunk_); | |
2117 int right_block = instr->FalseDestination(chunk_); | |
2118 | |
2119 int next_block = GetNextEmittedBlock(); | |
2120 if (right_block == left_block || condition == al) { | |
2121 EmitGoto(left_block); | |
2122 } else if (left_block == next_block) { | |
2123 __ Branch(chunk_->GetAssemblyLabel(right_block), | |
2124 NegateCondition(condition), src1, src2); | |
2125 } else if (right_block == next_block) { | |
2126 __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2); | |
2127 } else { | |
2128 __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2); | |
2129 __ Branch(chunk_->GetAssemblyLabel(right_block)); | |
2130 } | |
2131 } | |
2132 | |
2133 | |
2134 template<class InstrType> | |
2135 void LCodeGen::EmitBranchF(InstrType instr, | |
2136 Condition condition, | |
2137 FPURegister src1, | |
2138 FPURegister src2) { | |
2139 int right_block = instr->FalseDestination(chunk_); | |
2140 int left_block = instr->TrueDestination(chunk_); | |
2141 | |
2142 int next_block = GetNextEmittedBlock(); | |
2143 if (right_block == left_block) { | |
2144 EmitGoto(left_block); | |
2145 } else if (left_block == next_block) { | |
2146 __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL, | |
2147 NegateFpuCondition(condition), src1, src2); | |
2148 } else if (right_block == next_block) { | |
2149 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, | |
2150 condition, src1, src2); | |
2151 } else { | |
2152 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, | |
2153 condition, src1, src2); | |
2154 __ Branch(chunk_->GetAssemblyLabel(right_block)); | |
2155 } | |
2156 } | |
2157 | |
2158 | |
2159 template <class InstrType> | |
2160 void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition, | |
2161 Register src1, const Operand& src2) { | |
2162 int true_block = instr->TrueDestination(chunk_); | |
2163 __ Branch(chunk_->GetAssemblyLabel(true_block), condition, src1, src2); | |
2164 } | |
2165 | |
2166 | |
2167 template <class InstrType> | |
2168 void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition, | |
2169 Register src1, const Operand& src2) { | |
2170 int false_block = instr->FalseDestination(chunk_); | |
2171 __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2); | |
2172 } | |
2173 | |
2174 | |
2175 template<class InstrType> | |
2176 void LCodeGen::EmitFalseBranchF(InstrType instr, | |
2177 Condition condition, | |
2178 FPURegister src1, | |
2179 FPURegister src2) { | |
2180 int false_block = instr->FalseDestination(chunk_); | |
2181 __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL, | |
2182 condition, src1, src2); | |
2183 } | |
2184 | |
2185 | |
2186 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { | |
2187 __ stop("LDebugBreak"); | |
2188 } | |
2189 | |
2190 | |
2191 void LCodeGen::DoBranch(LBranch* instr) { | |
2192 Representation r = instr->hydrogen()->value()->representation(); | |
2193 if (r.IsInteger32() || r.IsSmi()) { | |
2194 DCHECK(!info()->IsStub()); | |
2195 Register reg = ToRegister(instr->value()); | |
2196 EmitBranch(instr, ne, reg, Operand(zero_reg)); | |
2197 } else if (r.IsDouble()) { | |
2198 DCHECK(!info()->IsStub()); | |
2199 DoubleRegister reg = ToDoubleRegister(instr->value()); | |
2200 // Test the double value. Zero and NaN are false. | |
2201 EmitBranchF(instr, ogl, reg, kDoubleRegZero); | |
2202 } else { | |
2203 DCHECK(r.IsTagged()); | |
2204 Register reg = ToRegister(instr->value()); | |
2205 HType type = instr->hydrogen()->value()->type(); | |
2206 if (type.IsBoolean()) { | |
2207 DCHECK(!info()->IsStub()); | |
2208 __ LoadRoot(at, Heap::kTrueValueRootIndex); | |
2209 EmitBranch(instr, eq, reg, Operand(at)); | |
2210 } else if (type.IsSmi()) { | |
2211 DCHECK(!info()->IsStub()); | |
2212 EmitBranch(instr, ne, reg, Operand(zero_reg)); | |
2213 } else if (type.IsJSArray()) { | |
2214 DCHECK(!info()->IsStub()); | |
2215 EmitBranch(instr, al, zero_reg, Operand(zero_reg)); | |
2216 } else if (type.IsHeapNumber()) { | |
2217 DCHECK(!info()->IsStub()); | |
2218 DoubleRegister dbl_scratch = double_scratch0(); | |
2219 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); | |
2220 // Test the double value. Zero and NaN are false. | |
2221 EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero); | |
2222 } else if (type.IsString()) { | |
2223 DCHECK(!info()->IsStub()); | |
2224 __ ld(at, FieldMemOperand(reg, String::kLengthOffset)); | |
2225 EmitBranch(instr, ne, at, Operand(zero_reg)); | |
2226 } else { | |
2227 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); | |
2228 // Avoid deopts in the case where we've never executed this path before. | |
2229 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); | |
2230 | |
2231 if (expected.Contains(ToBooleanStub::UNDEFINED)) { | |
2232 // undefined -> false. | |
2233 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | |
2234 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at)); | |
2235 } | |
2236 if (expected.Contains(ToBooleanStub::BOOLEAN)) { | |
2237 // Boolean -> its value. | |
2238 __ LoadRoot(at, Heap::kTrueValueRootIndex); | |
2239 __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at)); | |
2240 __ LoadRoot(at, Heap::kFalseValueRootIndex); | |
2241 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at)); | |
2242 } | |
2243 if (expected.Contains(ToBooleanStub::NULL_TYPE)) { | |
2244 // 'null' -> false. | |
2245 __ LoadRoot(at, Heap::kNullValueRootIndex); | |
2246 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at)); | |
2247 } | |
2248 | |
2249 if (expected.Contains(ToBooleanStub::SMI)) { | |
2250 // Smis: 0 -> false, all other -> true. | |
2251 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg)); | |
2252 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); | |
2253 } else if (expected.NeedsMap()) { | |
2254 // If we need a map later and have a Smi -> deopt. | |
2255 __ SmiTst(reg, at); | |
2256 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg)); | |
2257 } | |
2258 | |
2259 const Register map = scratch0(); | |
2260 if (expected.NeedsMap()) { | |
2261 __ ld(map, FieldMemOperand(reg, HeapObject::kMapOffset)); | |
2262 if (expected.CanBeUndetectable()) { | |
2263 // Undetectable -> false. | |
2264 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); | |
2265 __ And(at, at, Operand(1 << Map::kIsUndetectable)); | |
2266 __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg)); | |
2267 } | |
2268 } | |
2269 | |
2270 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) { | |
2271 // spec object -> true. | |
2272 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); | |
2273 __ Branch(instr->TrueLabel(chunk_), | |
2274 ge, at, Operand(FIRST_SPEC_OBJECT_TYPE)); | |
2275 } | |
2276 | |
2277 if (expected.Contains(ToBooleanStub::STRING)) { | |
2278 // String value -> false iff empty. | |
2279 Label not_string; | |
2280 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); | |
2281 __ Branch(¬_string, ge , at, Operand(FIRST_NONSTRING_TYPE)); | |
2282 __ ld(at, FieldMemOperand(reg, String::kLengthOffset)); | |
2283 __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg)); | |
2284 __ Branch(instr->FalseLabel(chunk_)); | |
2285 __ bind(¬_string); | |
2286 } | |
2287 | |
2288 if (expected.Contains(ToBooleanStub::SYMBOL)) { | |
2289 // Symbol value -> true. | |
2290 const Register scratch = scratch1(); | |
2291 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); | |
2292 __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE)); | |
2293 } | |
2294 | |
2295 if (expected.Contains(ToBooleanStub::SIMD_VALUE)) { | |
2296 // SIMD value -> true. | |
2297 const Register scratch = scratch1(); | |
2298 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); | |
2299 __ Branch(instr->TrueLabel(chunk_), eq, scratch, | |
2300 Operand(SIMD128_VALUE_TYPE)); | |
2301 } | |
2302 | |
2303 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { | |
2304 // heap number -> false iff +0, -0, or NaN. | |
2305 DoubleRegister dbl_scratch = double_scratch0(); | |
2306 Label not_heap_number; | |
2307 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | |
2308 __ Branch(¬_heap_number, ne, map, Operand(at)); | |
2309 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); | |
2310 __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), | |
2311 ne, dbl_scratch, kDoubleRegZero); | |
2312 // Falls through if dbl_scratch == 0. | |
2313 __ Branch(instr->FalseLabel(chunk_)); | |
2314 __ bind(¬_heap_number); | |
2315 } | |
2316 | |
2317 if (!expected.IsGeneric()) { | |
2318 // We've seen something for the first time -> deopt. | |
2319 // This can only happen if we are not generic already. | |
2320 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject, zero_reg, | |
2321 Operand(zero_reg)); | |
2322 } | |
2323 } | |
2324 } | |
2325 } | |
2326 | |
2327 | |
2328 void LCodeGen::EmitGoto(int block) { | |
2329 if (!IsNextEmittedBlock(block)) { | |
2330 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); | |
2331 } | |
2332 } | |
2333 | |
2334 | |
2335 void LCodeGen::DoGoto(LGoto* instr) { | |
2336 EmitGoto(instr->block_id()); | |
2337 } | |
2338 | |
2339 | |
2340 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { | |
2341 Condition cond = kNoCondition; | |
2342 switch (op) { | |
2343 case Token::EQ: | |
2344 case Token::EQ_STRICT: | |
2345 cond = eq; | |
2346 break; | |
2347 case Token::NE: | |
2348 case Token::NE_STRICT: | |
2349 cond = ne; | |
2350 break; | |
2351 case Token::LT: | |
2352 cond = is_unsigned ? lo : lt; | |
2353 break; | |
2354 case Token::GT: | |
2355 cond = is_unsigned ? hi : gt; | |
2356 break; | |
2357 case Token::LTE: | |
2358 cond = is_unsigned ? ls : le; | |
2359 break; | |
2360 case Token::GTE: | |
2361 cond = is_unsigned ? hs : ge; | |
2362 break; | |
2363 case Token::IN: | |
2364 case Token::INSTANCEOF: | |
2365 default: | |
2366 UNREACHABLE(); | |
2367 } | |
2368 return cond; | |
2369 } | |
2370 | |
2371 | |
2372 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { | |
2373 LOperand* left = instr->left(); | |
2374 LOperand* right = instr->right(); | |
2375 bool is_unsigned = | |
2376 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || | |
2377 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); | |
2378 Condition cond = TokenToCondition(instr->op(), is_unsigned); | |
2379 | |
2380 if (left->IsConstantOperand() && right->IsConstantOperand()) { | |
2381 // We can statically evaluate the comparison. | |
2382 double left_val = ToDouble(LConstantOperand::cast(left)); | |
2383 double right_val = ToDouble(LConstantOperand::cast(right)); | |
2384 int next_block = EvalComparison(instr->op(), left_val, right_val) ? | |
2385 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); | |
2386 EmitGoto(next_block); | |
2387 } else { | |
2388 if (instr->is_double()) { | |
2389 // Compare left and right as doubles and load the | |
2390 // resulting flags into the normal status register. | |
2391 FPURegister left_reg = ToDoubleRegister(left); | |
2392 FPURegister right_reg = ToDoubleRegister(right); | |
2393 | |
2394 // If a NaN is involved, i.e. the result is unordered, | |
2395 // jump to false block label. | |
2396 __ BranchF(NULL, instr->FalseLabel(chunk_), eq, | |
2397 left_reg, right_reg); | |
2398 | |
2399 EmitBranchF(instr, cond, left_reg, right_reg); | |
2400 } else { | |
2401 Register cmp_left; | |
2402 Operand cmp_right = Operand((int64_t)0); | |
2403 if (right->IsConstantOperand()) { | |
2404 int32_t value = ToInteger32(LConstantOperand::cast(right)); | |
2405 if (instr->hydrogen_value()->representation().IsSmi()) { | |
2406 cmp_left = ToRegister(left); | |
2407 cmp_right = Operand(Smi::FromInt(value)); | |
2408 } else { | |
2409 cmp_left = ToRegister(left); | |
2410 cmp_right = Operand(value); | |
2411 } | |
2412 } else if (left->IsConstantOperand()) { | |
2413 int32_t value = ToInteger32(LConstantOperand::cast(left)); | |
2414 if (instr->hydrogen_value()->representation().IsSmi()) { | |
2415 cmp_left = ToRegister(right); | |
2416 cmp_right = Operand(Smi::FromInt(value)); | |
2417 } else { | |
2418 cmp_left = ToRegister(right); | |
2419 cmp_right = Operand(value); | |
2420 } | |
2421 // We commuted the operands, so commute the condition. | |
2422 cond = CommuteCondition(cond); | |
2423 } else { | |
2424 cmp_left = ToRegister(left); | |
2425 cmp_right = Operand(ToRegister(right)); | |
2426 } | |
2427 | |
2428 EmitBranch(instr, cond, cmp_left, cmp_right); | |
2429 } | |
2430 } | |
2431 } | |
2432 | |
2433 | |
2434 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { | |
2435 Register left = ToRegister(instr->left()); | |
2436 Register right = ToRegister(instr->right()); | |
2437 | |
2438 EmitBranch(instr, eq, left, Operand(right)); | |
2439 } | |
2440 | |
2441 | |
2442 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { | |
2443 if (instr->hydrogen()->representation().IsTagged()) { | |
2444 Register input_reg = ToRegister(instr->object()); | |
2445 __ li(at, Operand(factory()->the_hole_value())); | |
2446 EmitBranch(instr, eq, input_reg, Operand(at)); | |
2447 return; | |
2448 } | |
2449 | |
2450 DoubleRegister input_reg = ToDoubleRegister(instr->object()); | |
2451 EmitFalseBranchF(instr, eq, input_reg, input_reg); | |
2452 | |
2453 Register scratch = scratch0(); | |
2454 __ FmoveHigh(scratch, input_reg); | |
2455 EmitBranch(instr, eq, scratch, | |
2456 Operand(static_cast<int32_t>(kHoleNanUpper32))); | |
2457 } | |
2458 | |
2459 | |
2460 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { | |
2461 Representation rep = instr->hydrogen()->value()->representation(); | |
2462 DCHECK(!rep.IsInteger32()); | |
2463 Register scratch = ToRegister(instr->temp()); | |
2464 | |
2465 if (rep.IsDouble()) { | |
2466 DoubleRegister value = ToDoubleRegister(instr->value()); | |
2467 EmitFalseBranchF(instr, ne, value, kDoubleRegZero); | |
2468 __ FmoveHigh(scratch, value); | |
2469 // Only use low 32-bits of value. | |
2470 __ dsll32(scratch, scratch, 0); | |
2471 __ dsrl32(scratch, scratch, 0); | |
2472 __ li(at, 0x80000000); | |
2473 } else { | |
2474 Register value = ToRegister(instr->value()); | |
2475 __ CheckMap(value, | |
2476 scratch, | |
2477 Heap::kHeapNumberMapRootIndex, | |
2478 instr->FalseLabel(chunk()), | |
2479 DO_SMI_CHECK); | |
2480 __ lwu(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset)); | |
2481 EmitFalseBranch(instr, ne, scratch, Operand(0x80000000)); | |
2482 __ lwu(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset)); | |
2483 __ mov(at, zero_reg); | |
2484 } | |
2485 EmitBranch(instr, eq, scratch, Operand(at)); | |
2486 } | |
2487 | |
2488 | |
2489 Condition LCodeGen::EmitIsString(Register input, | |
2490 Register temp1, | |
2491 Label* is_not_string, | |
2492 SmiCheck check_needed = INLINE_SMI_CHECK) { | |
2493 if (check_needed == INLINE_SMI_CHECK) { | |
2494 __ JumpIfSmi(input, is_not_string); | |
2495 } | |
2496 __ GetObjectType(input, temp1, temp1); | |
2497 | |
2498 return lt; | |
2499 } | |
2500 | |
2501 | |
2502 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { | |
2503 Register reg = ToRegister(instr->value()); | |
2504 Register temp1 = ToRegister(instr->temp()); | |
2505 | |
2506 SmiCheck check_needed = | |
2507 instr->hydrogen()->value()->type().IsHeapObject() | |
2508 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | |
2509 Condition true_cond = | |
2510 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed); | |
2511 | |
2512 EmitBranch(instr, true_cond, temp1, | |
2513 Operand(FIRST_NONSTRING_TYPE)); | |
2514 } | |
2515 | |
2516 | |
2517 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { | |
2518 Register input_reg = EmitLoadRegister(instr->value(), at); | |
2519 __ And(at, input_reg, kSmiTagMask); | |
2520 EmitBranch(instr, eq, at, Operand(zero_reg)); | |
2521 } | |
2522 | |
2523 | |
2524 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { | |
2525 Register input = ToRegister(instr->value()); | |
2526 Register temp = ToRegister(instr->temp()); | |
2527 | |
2528 if (!instr->hydrogen()->value()->type().IsHeapObject()) { | |
2529 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); | |
2530 } | |
2531 __ ld(temp, FieldMemOperand(input, HeapObject::kMapOffset)); | |
2532 __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); | |
2533 __ And(at, temp, Operand(1 << Map::kIsUndetectable)); | |
2534 EmitBranch(instr, ne, at, Operand(zero_reg)); | |
2535 } | |
2536 | |
2537 | |
2538 static Condition ComputeCompareCondition(Token::Value op) { | |
2539 switch (op) { | |
2540 case Token::EQ_STRICT: | |
2541 case Token::EQ: | |
2542 return eq; | |
2543 case Token::LT: | |
2544 return lt; | |
2545 case Token::GT: | |
2546 return gt; | |
2547 case Token::LTE: | |
2548 return le; | |
2549 case Token::GTE: | |
2550 return ge; | |
2551 default: | |
2552 UNREACHABLE(); | |
2553 return kNoCondition; | |
2554 } | |
2555 } | |
2556 | |
2557 | |
2558 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { | |
2559 DCHECK(ToRegister(instr->context()).is(cp)); | |
2560 DCHECK(ToRegister(instr->left()).is(a1)); | |
2561 DCHECK(ToRegister(instr->right()).is(a0)); | |
2562 | |
2563 Handle<Code> code = CodeFactory::StringCompare(isolate()).code(); | |
2564 CallCode(code, RelocInfo::CODE_TARGET, instr); | |
2565 | |
2566 EmitBranch(instr, ComputeCompareCondition(instr->op()), v0, | |
2567 Operand(zero_reg)); | |
2568 } | |
2569 | |
2570 | |
2571 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { | |
2572 InstanceType from = instr->from(); | |
2573 InstanceType to = instr->to(); | |
2574 if (from == FIRST_TYPE) return to; | |
2575 DCHECK(from == to || to == LAST_TYPE); | |
2576 return from; | |
2577 } | |
2578 | |
2579 | |
2580 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { | |
2581 InstanceType from = instr->from(); | |
2582 InstanceType to = instr->to(); | |
2583 if (from == to) return eq; | |
2584 if (to == LAST_TYPE) return hs; | |
2585 if (from == FIRST_TYPE) return ls; | |
2586 UNREACHABLE(); | |
2587 return eq; | |
2588 } | |
2589 | |
2590 | |
2591 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { | |
2592 Register scratch = scratch0(); | |
2593 Register input = ToRegister(instr->value()); | |
2594 | |
2595 if (!instr->hydrogen()->value()->type().IsHeapObject()) { | |
2596 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); | |
2597 } | |
2598 | |
2599 __ GetObjectType(input, scratch, scratch); | |
2600 EmitBranch(instr, | |
2601 BranchCondition(instr->hydrogen()), | |
2602 scratch, | |
2603 Operand(TestType(instr->hydrogen()))); | |
2604 } | |
2605 | |
2606 | |
2607 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { | |
2608 Register input = ToRegister(instr->value()); | |
2609 Register result = ToRegister(instr->result()); | |
2610 | |
2611 __ AssertString(input); | |
2612 | |
2613 __ lwu(result, FieldMemOperand(input, String::kHashFieldOffset)); | |
2614 __ IndexFromHash(result, result); | |
2615 } | |
2616 | |
2617 | |
2618 void LCodeGen::DoHasCachedArrayIndexAndBranch( | |
2619 LHasCachedArrayIndexAndBranch* instr) { | |
2620 Register input = ToRegister(instr->value()); | |
2621 Register scratch = scratch0(); | |
2622 | |
2623 __ lwu(scratch, | |
2624 FieldMemOperand(input, String::kHashFieldOffset)); | |
2625 __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask)); | |
2626 EmitBranch(instr, eq, at, Operand(zero_reg)); | |
2627 } | |
2628 | |
2629 | |
2630 // Branches to a label or falls through with the answer in flags. Trashes | |
2631 // the temp registers, but not the input. | |
2632 void LCodeGen::EmitClassOfTest(Label* is_true, | |
2633 Label* is_false, | |
2634 Handle<String>class_name, | |
2635 Register input, | |
2636 Register temp, | |
2637 Register temp2) { | |
2638 DCHECK(!input.is(temp)); | |
2639 DCHECK(!input.is(temp2)); | |
2640 DCHECK(!temp.is(temp2)); | |
2641 | |
2642 __ JumpIfSmi(input, is_false); | |
2643 | |
2644 if (String::Equals(isolate()->factory()->Function_string(), class_name)) { | |
2645 // Assuming the following assertions, we can use the same compares to test | |
2646 // for both being a function type and being in the object type range. | |
2647 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); | |
2648 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == | |
2649 FIRST_SPEC_OBJECT_TYPE + 1); | |
2650 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == | |
2651 LAST_SPEC_OBJECT_TYPE - 1); | |
2652 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); | |
2653 | |
2654 __ GetObjectType(input, temp, temp2); | |
2655 __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE)); | |
2656 __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE)); | |
2657 __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE)); | |
2658 } else { | |
2659 // Faster code path to avoid two compares: subtract lower bound from the | |
2660 // actual type and do a signed compare with the width of the type range. | |
2661 __ GetObjectType(input, temp, temp2); | |
2662 __ Dsubu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); | |
2663 __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - | |
2664 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); | |
2665 } | |
2666 | |
2667 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. | |
2668 // Check if the constructor in the map is a function. | |
2669 Register instance_type = scratch1(); | |
2670 DCHECK(!instance_type.is(temp)); | |
2671 __ GetMapConstructor(temp, temp, temp2, instance_type); | |
2672 | |
2673 // Objects with a non-function constructor have class 'Object'. | |
2674 if (String::Equals(class_name, isolate()->factory()->Object_string())) { | |
2675 __ Branch(is_true, ne, instance_type, Operand(JS_FUNCTION_TYPE)); | |
2676 } else { | |
2677 __ Branch(is_false, ne, instance_type, Operand(JS_FUNCTION_TYPE)); | |
2678 } | |
2679 | |
2680 // temp now contains the constructor function. Grab the | |
2681 // instance class name from there. | |
2682 __ ld(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset)); | |
2683 __ ld(temp, FieldMemOperand(temp, | |
2684 SharedFunctionInfo::kInstanceClassNameOffset)); | |
2685 // The class name we are testing against is internalized since it's a literal. | |
2686 // The name in the constructor is internalized because of the way the context | |
2687 // is booted. This routine isn't expected to work for random API-created | |
2688 // classes and it doesn't have to because you can't access it with natives | |
2689 // syntax. Since both sides are internalized it is sufficient to use an | |
2690 // identity comparison. | |
2691 | |
2692 // End with the address of this class_name instance in temp register. | |
2693 // On MIPS, the caller must do the comparison with Handle<String>class_name. | |
2694 } | |
2695 | |
2696 | |
2697 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { | |
2698 Register input = ToRegister(instr->value()); | |
2699 Register temp = scratch0(); | |
2700 Register temp2 = ToRegister(instr->temp()); | |
2701 Handle<String> class_name = instr->hydrogen()->class_name(); | |
2702 | |
2703 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), | |
2704 class_name, input, temp, temp2); | |
2705 | |
2706 EmitBranch(instr, eq, temp, Operand(class_name)); | |
2707 } | |
2708 | |
2709 | |
2710 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { | |
2711 Register reg = ToRegister(instr->value()); | |
2712 Register temp = ToRegister(instr->temp()); | |
2713 | |
2714 __ ld(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); | |
2715 EmitBranch(instr, eq, temp, Operand(instr->map())); | |
2716 } | |
2717 | |
2718 | |
2719 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { | |
2720 DCHECK(ToRegister(instr->context()).is(cp)); | |
2721 Label true_label, done; | |
2722 DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister())); | |
2723 DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister())); | |
2724 DCHECK(ToRegister(instr->result()).is(v0)); | |
2725 | |
2726 InstanceOfStub stub(isolate()); | |
2727 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | |
2728 } | |
2729 | |
2730 | |
2731 void LCodeGen::DoHasInPrototypeChainAndBranch( | |
2732 LHasInPrototypeChainAndBranch* instr) { | |
2733 Register const object = ToRegister(instr->object()); | |
2734 Register const object_map = scratch0(); | |
2735 Register const object_prototype = object_map; | |
2736 Register const prototype = ToRegister(instr->prototype()); | |
2737 | |
2738 // The {object} must be a spec object. It's sufficient to know that {object} | |
2739 // is not a smi, since all other non-spec objects have {null} prototypes and | |
2740 // will be ruled out below. | |
2741 if (instr->hydrogen()->ObjectNeedsSmiCheck()) { | |
2742 __ SmiTst(object, at); | |
2743 EmitFalseBranch(instr, eq, at, Operand(zero_reg)); | |
2744 } | |
2745 | |
2746 // Loop through the {object}s prototype chain looking for the {prototype}. | |
2747 __ ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); | |
2748 Label loop; | |
2749 __ bind(&loop); | |
2750 __ ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset)); | |
2751 EmitTrueBranch(instr, eq, object_prototype, Operand(prototype)); | |
2752 __ LoadRoot(at, Heap::kNullValueRootIndex); | |
2753 EmitFalseBranch(instr, eq, object_prototype, Operand(at)); | |
2754 __ Branch(&loop, USE_DELAY_SLOT); | |
2755 __ ld(object_map, FieldMemOperand(object_prototype, | |
2756 HeapObject::kMapOffset)); // In delay slot. | |
2757 } | |
2758 | |
2759 | |
2760 void LCodeGen::DoCmpT(LCmpT* instr) { | |
2761 DCHECK(ToRegister(instr->context()).is(cp)); | |
2762 Token::Value op = instr->op(); | |
2763 | |
2764 Handle<Code> ic = | |
2765 CodeFactory::CompareIC(isolate(), op, instr->strength()).code(); | |
2766 CallCode(ic, RelocInfo::CODE_TARGET, instr); | |
2767 // On MIPS there is no need for a "no inlined smi code" marker (nop). | |
2768 | |
2769 Condition condition = ComputeCompareCondition(op); | |
2770 // A minor optimization that relies on LoadRoot always emitting one | |
2771 // instruction. | |
2772 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm()); | |
2773 Label done, check; | |
2774 __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg)); | |
2775 __ bind(&check); | |
2776 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); | |
2777 DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check)); | |
2778 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); | |
2779 __ bind(&done); | |
2780 } | |
2781 | |
2782 | |
2783 void LCodeGen::DoReturn(LReturn* instr) { | |
2784 if (FLAG_trace && info()->IsOptimizing()) { | |
2785 // Push the return value on the stack as the parameter. | |
2786 // Runtime::TraceExit returns its parameter in v0. We're leaving the code | |
2787 // managed by the register allocator and tearing down the frame, it's | |
2788 // safe to write to the context register. | |
2789 __ push(v0); | |
2790 __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | |
2791 __ CallRuntime(Runtime::kTraceExit, 1); | |
2792 } | |
2793 if (info()->saves_caller_doubles()) { | |
2794 RestoreCallerDoubles(); | |
2795 } | |
2796 if (NeedsEagerFrame()) { | |
2797 __ mov(sp, fp); | |
2798 __ Pop(ra, fp); | |
2799 } | |
2800 if (instr->has_constant_parameter_count()) { | |
2801 int parameter_count = ToInteger32(instr->constant_parameter_count()); | |
2802 int32_t sp_delta = (parameter_count + 1) * kPointerSize; | |
2803 if (sp_delta != 0) { | |
2804 __ Daddu(sp, sp, Operand(sp_delta)); | |
2805 } | |
2806 } else { | |
2807 DCHECK(info()->IsStub()); // Functions would need to drop one more value. | |
2808 Register reg = ToRegister(instr->parameter_count()); | |
2809 // The argument count parameter is a smi | |
2810 __ SmiUntag(reg); | |
2811 __ dsll(at, reg, kPointerSizeLog2); | |
2812 __ Daddu(sp, sp, at); | |
2813 } | |
2814 | |
2815 __ Jump(ra); | |
2816 } | |
2817 | |
2818 | |
2819 template <class T> | |
2820 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { | |
2821 Register vector_register = ToRegister(instr->temp_vector()); | |
2822 Register slot_register = LoadWithVectorDescriptor::SlotRegister(); | |
2823 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister())); | |
2824 DCHECK(slot_register.is(a0)); | |
2825 | |
2826 AllowDeferredHandleDereference vector_structure_check; | |
2827 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector(); | |
2828 __ li(vector_register, vector); | |
2829 // No need to allocate this register. | |
2830 FeedbackVectorSlot slot = instr->hydrogen()->slot(); | |
2831 int index = vector->GetIndex(slot); | |
2832 __ li(slot_register, Operand(Smi::FromInt(index))); | |
2833 } | |
2834 | |
2835 | |
2836 template <class T> | |
2837 void LCodeGen::EmitVectorStoreICRegisters(T* instr) { | |
2838 Register vector_register = ToRegister(instr->temp_vector()); | |
2839 Register slot_register = ToRegister(instr->temp_slot()); | |
2840 | |
2841 AllowDeferredHandleDereference vector_structure_check; | |
2842 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector(); | |
2843 __ li(vector_register, vector); | |
2844 FeedbackVectorSlot slot = instr->hydrogen()->slot(); | |
2845 int index = vector->GetIndex(slot); | |
2846 __ li(slot_register, Operand(Smi::FromInt(index))); | |
2847 } | |
2848 | |
2849 | |
2850 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { | |
2851 DCHECK(ToRegister(instr->context()).is(cp)); | |
2852 DCHECK(ToRegister(instr->global_object()) | |
2853 .is(LoadDescriptor::ReceiverRegister())); | |
2854 DCHECK(ToRegister(instr->result()).is(v0)); | |
2855 | |
2856 __ li(LoadDescriptor::NameRegister(), Operand(instr->name())); | |
2857 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr); | |
2858 Handle<Code> ic = | |
2859 CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(), | |
2860 SLOPPY, PREMONOMORPHIC).code(); | |
2861 CallCode(ic, RelocInfo::CODE_TARGET, instr); | |
2862 } | |
2863 | |
2864 | |
2865 void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) { | |
2866 DCHECK(ToRegister(instr->context()).is(cp)); | |
2867 DCHECK(ToRegister(instr->result()).is(v0)); | |
2868 | |
2869 int const slot = instr->slot_index(); | |
2870 int const depth = instr->depth(); | |
2871 if (depth <= LoadGlobalViaContextStub::kMaximumDepth) { | |
2872 __ li(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot)); | |
2873 Handle<Code> stub = | |
2874 CodeFactory::LoadGlobalViaContext(isolate(), depth).code(); | |
2875 CallCode(stub, RelocInfo::CODE_TARGET, instr); | |
2876 } else { | |
2877 __ Push(Smi::FromInt(slot)); | |
2878 __ CallRuntime(Runtime::kLoadGlobalViaContext, 1); | |
2879 } | |
2880 } | |
2881 | |
2882 | |
2883 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { | |
2884 Register context = ToRegister(instr->context()); | |
2885 Register result = ToRegister(instr->result()); | |
2886 | |
2887 __ ld(result, ContextOperand(context, instr->slot_index())); | |
2888 if (instr->hydrogen()->RequiresHoleCheck()) { | |
2889 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | |
2890 | |
2891 if (instr->hydrogen()->DeoptimizesOnHole()) { | |
2892 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at)); | |
2893 } else { | |
2894 Label is_not_hole; | |
2895 __ Branch(&is_not_hole, ne, result, Operand(at)); | |
2896 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); | |
2897 __ bind(&is_not_hole); | |
2898 } | |
2899 } | |
2900 } | |
2901 | |
2902 | |
2903 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { | |
2904 Register context = ToRegister(instr->context()); | |
2905 Register value = ToRegister(instr->value()); | |
2906 Register scratch = scratch0(); | |
2907 MemOperand target = ContextOperand(context, instr->slot_index()); | |
2908 | |
2909 Label skip_assignment; | |
2910 | |
2911 if (instr->hydrogen()->RequiresHoleCheck()) { | |
2912 __ ld(scratch, target); | |
2913 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | |
2914 | |
2915 if (instr->hydrogen()->DeoptimizesOnHole()) { | |
2916 DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, Operand(at)); | |
2917 } else { | |
2918 __ Branch(&skip_assignment, ne, scratch, Operand(at)); | |
2919 } | |
2920 } | |
2921 | |
2922 __ sd(value, target); | |
2923 if (instr->hydrogen()->NeedsWriteBarrier()) { | |
2924 SmiCheck check_needed = | |
2925 instr->hydrogen()->value()->type().IsHeapObject() | |
2926 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | |
2927 __ RecordWriteContextSlot(context, | |
2928 target.offset(), | |
2929 value, | |
2930 scratch0(), | |
2931 GetRAState(), | |
2932 kSaveFPRegs, | |
2933 EMIT_REMEMBERED_SET, | |
2934 check_needed); | |
2935 } | |
2936 | |
2937 __ bind(&skip_assignment); | |
2938 } | |
2939 | |
2940 | |
2941 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { | |
2942 HObjectAccess access = instr->hydrogen()->access(); | |
2943 int offset = access.offset(); | |
2944 Register object = ToRegister(instr->object()); | |
2945 if (access.IsExternalMemory()) { | |
2946 Register result = ToRegister(instr->result()); | |
2947 MemOperand operand = MemOperand(object, offset); | |
2948 __ Load(result, operand, access.representation()); | |
2949 return; | |
2950 } | |
2951 | |
2952 if (instr->hydrogen()->representation().IsDouble()) { | |
2953 DoubleRegister result = ToDoubleRegister(instr->result()); | |
2954 __ ldc1(result, FieldMemOperand(object, offset)); | |
2955 return; | |
2956 } | |
2957 | |
2958 Register result = ToRegister(instr->result()); | |
2959 if (!access.IsInobject()) { | |
2960 __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); | |
2961 object = result; | |
2962 } | |
2963 | |
2964 Representation representation = access.representation(); | |
2965 if (representation.IsSmi() && SmiValuesAre32Bits() && | |
2966 instr->hydrogen()->representation().IsInteger32()) { | |
2967 if (FLAG_debug_code) { | |
2968 // Verify this is really an Smi. | |
2969 Register scratch = scratch0(); | |
2970 __ Load(scratch, FieldMemOperand(object, offset), representation); | |
2971 __ AssertSmi(scratch); | |
2972 } | |
2973 | |
2974 // Read int value directly from upper half of the smi. | |
2975 STATIC_ASSERT(kSmiTag == 0); | |
2976 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); | |
2977 offset = SmiWordOffset(offset); | |
2978 representation = Representation::Integer32(); | |
2979 } | |
2980 __ Load(result, FieldMemOperand(object, offset), representation); | |
2981 } | |
2982 | |
2983 | |
2984 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { | |
2985 DCHECK(ToRegister(instr->context()).is(cp)); | |
2986 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); | |
2987 DCHECK(ToRegister(instr->result()).is(v0)); | |
2988 | |
2989 // Name is always in a2. | |
2990 __ li(LoadDescriptor::NameRegister(), Operand(instr->name())); | |
2991 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr); | |
2992 Handle<Code> ic = | |
2993 CodeFactory::LoadICInOptimizedCode( | |
2994 isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(), | |
2995 instr->hydrogen()->initialization_state()).code(); | |
2996 CallCode(ic, RelocInfo::CODE_TARGET, instr); | |
2997 } | |
2998 | |
2999 | |
3000 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { | |
3001 Register scratch = scratch0(); | |
3002 Register function = ToRegister(instr->function()); | |
3003 Register result = ToRegister(instr->result()); | |
3004 | |
3005 // Get the prototype or initial map from the function. | |
3006 __ ld(result, | |
3007 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | |
3008 | |
3009 // Check that the function has a prototype or an initial map. | |
3010 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | |
3011 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at)); | |
3012 | |
3013 // If the function does not have an initial map, we're done. | |
3014 Label done; | |
3015 __ GetObjectType(result, scratch, scratch); | |
3016 __ Branch(&done, ne, scratch, Operand(MAP_TYPE)); | |
3017 | |
3018 // Get the prototype from the initial map. | |
3019 __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset)); | |
3020 | |
3021 // All done. | |
3022 __ bind(&done); | |
3023 } | |
3024 | |
3025 | |
3026 void LCodeGen::DoLoadRoot(LLoadRoot* instr) { | |
3027 Register result = ToRegister(instr->result()); | |
3028 __ LoadRoot(result, instr->index()); | |
3029 } | |
3030 | |
3031 | |
3032 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { | |
3033 Register arguments = ToRegister(instr->arguments()); | |
3034 Register result = ToRegister(instr->result()); | |
3035 // There are two words between the frame pointer and the last argument. | |
3036 // Subtracting from length accounts for one of them add one more. | |
3037 if (instr->length()->IsConstantOperand()) { | |
3038 int const_length = ToInteger32(LConstantOperand::cast(instr->length())); | |
3039 if (instr->index()->IsConstantOperand()) { | |
3040 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); | |
3041 int index = (const_length - const_index) + 1; | |
3042 __ ld(result, MemOperand(arguments, index * kPointerSize)); | |
3043 } else { | |
3044 Register index = ToRegister(instr->index()); | |
3045 __ li(at, Operand(const_length + 1)); | |
3046 __ Dsubu(result, at, index); | |
3047 __ dsll(at, result, kPointerSizeLog2); | |
3048 __ Daddu(at, arguments, at); | |
3049 __ ld(result, MemOperand(at)); | |
3050 } | |
3051 } else if (instr->index()->IsConstantOperand()) { | |
3052 Register length = ToRegister(instr->length()); | |
3053 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); | |
3054 int loc = const_index - 1; | |
3055 if (loc != 0) { | |
3056 __ Dsubu(result, length, Operand(loc)); | |
3057 __ dsll(at, result, kPointerSizeLog2); | |
3058 __ Daddu(at, arguments, at); | |
3059 __ ld(result, MemOperand(at)); | |
3060 } else { | |
3061 __ dsll(at, length, kPointerSizeLog2); | |
3062 __ Daddu(at, arguments, at); | |
3063 __ ld(result, MemOperand(at)); | |
3064 } | |
3065 } else { | |
3066 Register length = ToRegister(instr->length()); | |
3067 Register index = ToRegister(instr->index()); | |
3068 __ Dsubu(result, length, index); | |
3069 __ Daddu(result, result, 1); | |
3070 __ dsll(at, result, kPointerSizeLog2); | |
3071 __ Daddu(at, arguments, at); | |
3072 __ ld(result, MemOperand(at)); | |
3073 } | |
3074 } | |
3075 | |
3076 | |
3077 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { | |
3078 Register external_pointer = ToRegister(instr->elements()); | |
3079 Register key = no_reg; | |
3080 ElementsKind elements_kind = instr->elements_kind(); | |
3081 bool key_is_constant = instr->key()->IsConstantOperand(); | |
3082 int constant_key = 0; | |
3083 if (key_is_constant) { | |
3084 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | |
3085 if (constant_key & 0xF0000000) { | |
3086 Abort(kArrayIndexConstantValueTooBig); | |
3087 } | |
3088 } else { | |
3089 key = ToRegister(instr->key()); | |
3090 } | |
3091 int element_size_shift = ElementsKindToShiftSize(elements_kind); | |
3092 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) | |
3093 ? (element_size_shift - (kSmiTagSize + kSmiShiftSize)) | |
3094 : element_size_shift; | |
3095 int base_offset = instr->base_offset(); | |
3096 | |
3097 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { | |
3098 FPURegister result = ToDoubleRegister(instr->result()); | |
3099 if (key_is_constant) { | |
3100 __ Daddu(scratch0(), external_pointer, | |
3101 constant_key << element_size_shift); | |
3102 } else { | |
3103 if (shift_size < 0) { | |
3104 if (shift_size == -32) { | |
3105 __ dsra32(scratch0(), key, 0); | |
3106 } else { | |
3107 __ dsra(scratch0(), key, -shift_size); | |
3108 } | |
3109 } else { | |
3110 __ dsll(scratch0(), key, shift_size); | |
3111 } | |
3112 __ Daddu(scratch0(), scratch0(), external_pointer); | |
3113 } | |
3114 if (elements_kind == FLOAT32_ELEMENTS) { | |
3115 __ lwc1(result, MemOperand(scratch0(), base_offset)); | |
3116 __ cvt_d_s(result, result); | |
3117 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS | |
3118 __ ldc1(result, MemOperand(scratch0(), base_offset)); | |
3119 } | |
3120 } else { | |
3121 Register result = ToRegister(instr->result()); | |
3122 MemOperand mem_operand = PrepareKeyedOperand( | |
3123 key, external_pointer, key_is_constant, constant_key, | |
3124 element_size_shift, shift_size, base_offset); | |
3125 switch (elements_kind) { | |
3126 case INT8_ELEMENTS: | |
3127 __ lb(result, mem_operand); | |
3128 break; | |
3129 case UINT8_ELEMENTS: | |
3130 case UINT8_CLAMPED_ELEMENTS: | |
3131 __ lbu(result, mem_operand); | |
3132 break; | |
3133 case INT16_ELEMENTS: | |
3134 __ lh(result, mem_operand); | |
3135 break; | |
3136 case UINT16_ELEMENTS: | |
3137 __ lhu(result, mem_operand); | |
3138 break; | |
3139 case INT32_ELEMENTS: | |
3140 __ lw(result, mem_operand); | |
3141 break; | |
3142 case UINT32_ELEMENTS: | |
3143 __ lw(result, mem_operand); | |
3144 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { | |
3145 DeoptimizeIf(Ugreater_equal, instr, Deoptimizer::kNegativeValue, | |
3146 result, Operand(0x80000000)); | |
3147 } | |
3148 break; | |
3149 case FLOAT32_ELEMENTS: | |
3150 case FLOAT64_ELEMENTS: | |
3151 case FAST_DOUBLE_ELEMENTS: | |
3152 case FAST_ELEMENTS: | |
3153 case FAST_SMI_ELEMENTS: | |
3154 case FAST_HOLEY_DOUBLE_ELEMENTS: | |
3155 case FAST_HOLEY_ELEMENTS: | |
3156 case FAST_HOLEY_SMI_ELEMENTS: | |
3157 case DICTIONARY_ELEMENTS: | |
3158 case FAST_SLOPPY_ARGUMENTS_ELEMENTS: | |
3159 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: | |
3160 UNREACHABLE(); | |
3161 break; | |
3162 } | |
3163 } | |
3164 } | |
3165 | |
3166 | |
3167 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { | |
3168 Register elements = ToRegister(instr->elements()); | |
3169 bool key_is_constant = instr->key()->IsConstantOperand(); | |
3170 Register key = no_reg; | |
3171 DoubleRegister result = ToDoubleRegister(instr->result()); | |
3172 Register scratch = scratch0(); | |
3173 | |
3174 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); | |
3175 | |
3176 int base_offset = instr->base_offset(); | |
3177 if (key_is_constant) { | |
3178 int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | |
3179 if (constant_key & 0xF0000000) { | |
3180 Abort(kArrayIndexConstantValueTooBig); | |
3181 } | |
3182 base_offset += constant_key * kDoubleSize; | |
3183 } | |
3184 __ Daddu(scratch, elements, Operand(base_offset)); | |
3185 | |
3186 if (!key_is_constant) { | |
3187 key = ToRegister(instr->key()); | |
3188 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) | |
3189 ? (element_size_shift - (kSmiTagSize + kSmiShiftSize)) | |
3190 : element_size_shift; | |
3191 if (shift_size > 0) { | |
3192 __ dsll(at, key, shift_size); | |
3193 } else if (shift_size == -32) { | |
3194 __ dsra32(at, key, 0); | |
3195 } else { | |
3196 __ dsra(at, key, -shift_size); | |
3197 } | |
3198 __ Daddu(scratch, scratch, at); | |
3199 } | |
3200 | |
3201 __ ldc1(result, MemOperand(scratch)); | |
3202 | |
3203 if (instr->hydrogen()->RequiresHoleCheck()) { | |
3204 __ FmoveHigh(scratch, result); | |
3205 DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, | |
3206 Operand(static_cast<int32_t>(kHoleNanUpper32))); | |
3207 } | |
3208 } | |
3209 | |
3210 | |
3211 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | |
3212 HLoadKeyed* hinstr = instr->hydrogen(); | |
3213 Register elements = ToRegister(instr->elements()); | |
3214 Register result = ToRegister(instr->result()); | |
3215 Register scratch = scratch0(); | |
3216 Register store_base = scratch; | |
3217 int offset = instr->base_offset(); | |
3218 | |
3219 if (instr->key()->IsConstantOperand()) { | |
3220 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); | |
3221 offset += ToInteger32(const_operand) * kPointerSize; | |
3222 store_base = elements; | |
3223 } else { | |
3224 Register key = ToRegister(instr->key()); | |
3225 // Even though the HLoadKeyed instruction forces the input | |
3226 // representation for the key to be an integer, the input gets replaced | |
3227 // during bound check elimination with the index argument to the bounds | |
3228 // check, which can be tagged, so that case must be handled here, too. | |
3229 if (instr->hydrogen()->key()->representation().IsSmi()) { | |
3230 __ SmiScale(scratch, key, kPointerSizeLog2); | |
3231 __ daddu(scratch, elements, scratch); | |
3232 } else { | |
3233 __ dsll(scratch, key, kPointerSizeLog2); | |
3234 __ daddu(scratch, elements, scratch); | |
3235 } | |
3236 } | |
3237 | |
3238 Representation representation = hinstr->representation(); | |
3239 if (representation.IsInteger32() && SmiValuesAre32Bits() && | |
3240 hinstr->elements_kind() == FAST_SMI_ELEMENTS) { | |
3241 DCHECK(!hinstr->RequiresHoleCheck()); | |
3242 if (FLAG_debug_code) { | |
3243 Register temp = scratch1(); | |
3244 __ Load(temp, MemOperand(store_base, offset), Representation::Smi()); | |
3245 __ AssertSmi(temp); | |
3246 } | |
3247 | |
3248 // Read int value directly from upper half of the smi. | |
3249 STATIC_ASSERT(kSmiTag == 0); | |
3250 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); | |
3251 offset = SmiWordOffset(offset); | |
3252 } | |
3253 | |
3254 __ Load(result, MemOperand(store_base, offset), representation); | |
3255 | |
3256 // Check for the hole value. | |
3257 if (hinstr->RequiresHoleCheck()) { | |
3258 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { | |
3259 __ SmiTst(result, scratch); | |
3260 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, | |
3261 Operand(zero_reg)); | |
3262 } else { | |
3263 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); | |
3264 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(scratch)); | |
3265 } | |
3266 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { | |
3267 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS); | |
3268 Label done; | |
3269 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); | |
3270 __ Branch(&done, ne, result, Operand(scratch)); | |
3271 if (info()->IsStub()) { | |
3272 // A stub can safely convert the hole to undefined only if the array | |
3273 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise | |
3274 // it needs to bail out. | |
3275 __ LoadRoot(result, Heap::kArrayProtectorRootIndex); | |
3276 // The comparison only needs LS bits of value, which is a smi. | |
3277 __ ld(result, FieldMemOperand(result, Cell::kValueOffset)); | |
3278 DeoptimizeIf(ne, instr, Deoptimizer::kHole, result, | |
3279 Operand(Smi::FromInt(Isolate::kArrayProtectorValid))); | |
3280 } | |
3281 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); | |
3282 __ bind(&done); | |
3283 } | |
3284 } | |
3285 | |
3286 | |
3287 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { | |
3288 if (instr->is_fixed_typed_array()) { | |
3289 DoLoadKeyedExternalArray(instr); | |
3290 } else if (instr->hydrogen()->representation().IsDouble()) { | |
3291 DoLoadKeyedFixedDoubleArray(instr); | |
3292 } else { | |
3293 DoLoadKeyedFixedArray(instr); | |
3294 } | |
3295 } | |
3296 | |
3297 | |
3298 MemOperand LCodeGen::PrepareKeyedOperand(Register key, | |
3299 Register base, | |
3300 bool key_is_constant, | |
3301 int constant_key, | |
3302 int element_size, | |
3303 int shift_size, | |
3304 int base_offset) { | |
3305 if (key_is_constant) { | |
3306 return MemOperand(base, (constant_key << element_size) + base_offset); | |
3307 } | |
3308 | |
3309 if (base_offset == 0) { | |
3310 if (shift_size >= 0) { | |
3311 __ dsll(scratch0(), key, shift_size); | |
3312 __ Daddu(scratch0(), base, scratch0()); | |
3313 return MemOperand(scratch0()); | |
3314 } else { | |
3315 if (shift_size == -32) { | |
3316 __ dsra32(scratch0(), key, 0); | |
3317 } else { | |
3318 __ dsra(scratch0(), key, -shift_size); | |
3319 } | |
3320 __ Daddu(scratch0(), base, scratch0()); | |
3321 return MemOperand(scratch0()); | |
3322 } | |
3323 } | |
3324 | |
3325 if (shift_size >= 0) { | |
3326 __ dsll(scratch0(), key, shift_size); | |
3327 __ Daddu(scratch0(), base, scratch0()); | |
3328 return MemOperand(scratch0(), base_offset); | |
3329 } else { | |
3330 if (shift_size == -32) { | |
3331 __ dsra32(scratch0(), key, 0); | |
3332 } else { | |
3333 __ dsra(scratch0(), key, -shift_size); | |
3334 } | |
3335 __ Daddu(scratch0(), base, scratch0()); | |
3336 return MemOperand(scratch0(), base_offset); | |
3337 } | |
3338 } | |
3339 | |
3340 | |
3341 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { | |
3342 DCHECK(ToRegister(instr->context()).is(cp)); | |
3343 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); | |
3344 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister())); | |
3345 | |
3346 if (instr->hydrogen()->HasVectorAndSlot()) { | |
3347 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr); | |
3348 } | |
3349 | |
3350 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode( | |
3351 isolate(), instr->hydrogen()->language_mode(), | |
3352 instr->hydrogen()->initialization_state()).code(); | |
3353 CallCode(ic, RelocInfo::CODE_TARGET, instr); | |
3354 } | |
3355 | |
3356 | |
3357 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { | |
3358 Register scratch = scratch0(); | |
3359 Register temp = scratch1(); | |
3360 Register result = ToRegister(instr->result()); | |
3361 | |
3362 if (instr->hydrogen()->from_inlined()) { | |
3363 __ Dsubu(result, sp, 2 * kPointerSize); | |
3364 } else { | |
3365 // Check if the calling frame is an arguments adaptor frame. | |
3366 Label done, adapted; | |
3367 __ ld(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
3368 __ ld(result, MemOperand(scratch, StandardFrameConstants::kContextOffset)); | |
3369 __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | |
3370 | |
3371 // Result is the frame pointer for the frame if not adapted and for the real | |
3372 // frame below the adaptor frame if adapted. | |
3373 __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne). | |
3374 __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq). | |
3375 } | |
3376 } | |
3377 | |
3378 | |
3379 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { | |
3380 Register elem = ToRegister(instr->elements()); | |
3381 Register result = ToRegister(instr->result()); | |
3382 | |
3383 Label done; | |
3384 | |
3385 // If no arguments adaptor frame the number of arguments is fixed. | |
3386 __ Daddu(result, zero_reg, Operand(scope()->num_parameters())); | |
3387 __ Branch(&done, eq, fp, Operand(elem)); | |
3388 | |
3389 // Arguments adaptor frame present. Get argument length from there. | |
3390 __ ld(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
3391 __ ld(result, | |
3392 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
3393 __ SmiUntag(result); | |
3394 | |
3395 // Argument length is in result register. | |
3396 __ bind(&done); | |
3397 } | |
3398 | |
3399 | |
3400 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { | |
3401 Register receiver = ToRegister(instr->receiver()); | |
3402 Register function = ToRegister(instr->function()); | |
3403 Register result = ToRegister(instr->result()); | |
3404 Register scratch = scratch0(); | |
3405 | |
3406 // If the receiver is null or undefined, we have to pass the global | |
3407 // object as a receiver to normal functions. Values have to be | |
3408 // passed unchanged to builtins and strict-mode functions. | |
3409 Label global_object, result_in_receiver; | |
3410 | |
3411 if (!instr->hydrogen()->known_function()) { | |
3412 // Do not transform the receiver to object for strict mode functions. | |
3413 __ ld(scratch, | |
3414 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); | |
3415 | |
3416 // Do not transform the receiver to object for builtins. | |
3417 int32_t strict_mode_function_mask = | |
3418 1 << SharedFunctionInfo::kStrictModeBitWithinByte; | |
3419 int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte; | |
3420 | |
3421 __ lbu(at, | |
3422 FieldMemOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset)); | |
3423 __ And(at, at, Operand(strict_mode_function_mask)); | |
3424 __ Branch(&result_in_receiver, ne, at, Operand(zero_reg)); | |
3425 __ lbu(at, | |
3426 FieldMemOperand(scratch, SharedFunctionInfo::kNativeByteOffset)); | |
3427 __ And(at, at, Operand(native_mask)); | |
3428 __ Branch(&result_in_receiver, ne, at, Operand(zero_reg)); | |
3429 } | |
3430 | |
3431 // Normal function. Replace undefined or null with global receiver. | |
3432 __ LoadRoot(scratch, Heap::kNullValueRootIndex); | |
3433 __ Branch(&global_object, eq, receiver, Operand(scratch)); | |
3434 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); | |
3435 __ Branch(&global_object, eq, receiver, Operand(scratch)); | |
3436 | |
3437 // Deoptimize if the receiver is not a JS object. | |
3438 __ SmiTst(receiver, scratch); | |
3439 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, scratch, Operand(zero_reg)); | |
3440 | |
3441 __ GetObjectType(receiver, scratch, scratch); | |
3442 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch, | |
3443 Operand(FIRST_SPEC_OBJECT_TYPE)); | |
3444 __ Branch(&result_in_receiver); | |
3445 | |
3446 __ bind(&global_object); | |
3447 __ ld(result, FieldMemOperand(function, JSFunction::kContextOffset)); | |
3448 __ ld(result, | |
3449 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); | |
3450 __ ld(result, | |
3451 FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); | |
3452 | |
3453 if (result.is(receiver)) { | |
3454 __ bind(&result_in_receiver); | |
3455 } else { | |
3456 Label result_ok; | |
3457 __ Branch(&result_ok); | |
3458 __ bind(&result_in_receiver); | |
3459 __ mov(result, receiver); | |
3460 __ bind(&result_ok); | |
3461 } | |
3462 } | |
3463 | |
3464 | |
3465 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { | |
3466 Register receiver = ToRegister(instr->receiver()); | |
3467 Register function = ToRegister(instr->function()); | |
3468 Register length = ToRegister(instr->length()); | |
3469 Register elements = ToRegister(instr->elements()); | |
3470 Register scratch = scratch0(); | |
3471 DCHECK(receiver.is(a0)); // Used for parameter count. | |
3472 DCHECK(function.is(a1)); // Required by InvokeFunction. | |
3473 DCHECK(ToRegister(instr->result()).is(v0)); | |
3474 | |
3475 // Copy the arguments to this function possibly from the | |
3476 // adaptor frame below it. | |
3477 const uint32_t kArgumentsLimit = 1 * KB; | |
3478 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments, length, | |
3479 Operand(kArgumentsLimit)); | |
3480 | |
3481 // Push the receiver and use the register to keep the original | |
3482 // number of arguments. | |
3483 __ push(receiver); | |
3484 __ Move(receiver, length); | |
3485 // The arguments are at a one pointer size offset from elements. | |
3486 __ Daddu(elements, elements, Operand(1 * kPointerSize)); | |
3487 | |
3488 // Loop through the arguments pushing them onto the execution | |
3489 // stack. | |
3490 Label invoke, loop; | |
3491 // length is a small non-negative integer, due to the test above. | |
3492 __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg)); | |
3493 __ dsll(scratch, length, kPointerSizeLog2); | |
3494 __ bind(&loop); | |
3495 __ Daddu(scratch, elements, scratch); | |
3496 __ ld(scratch, MemOperand(scratch)); | |
3497 __ push(scratch); | |
3498 __ Dsubu(length, length, Operand(1)); | |
3499 __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg)); | |
3500 __ dsll(scratch, length, kPointerSizeLog2); | |
3501 | |
3502 __ bind(&invoke); | |
3503 DCHECK(instr->HasPointerMap()); | |
3504 LPointerMap* pointers = instr->pointer_map(); | |
3505 SafepointGenerator safepoint_generator( | |
3506 this, pointers, Safepoint::kLazyDeopt); | |
3507 // The number of arguments is stored in receiver which is a0, as expected | |
3508 // by InvokeFunction. | |
3509 ParameterCount actual(receiver); | |
3510 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator); | |
3511 } | |
3512 | |
3513 | |
3514 void LCodeGen::DoPushArgument(LPushArgument* instr) { | |
3515 LOperand* argument = instr->value(); | |
3516 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { | |
3517 Abort(kDoPushArgumentNotImplementedForDoubleType); | |
3518 } else { | |
3519 Register argument_reg = EmitLoadRegister(argument, at); | |
3520 __ push(argument_reg); | |
3521 } | |
3522 } | |
3523 | |
3524 | |
3525 void LCodeGen::DoDrop(LDrop* instr) { | |
3526 __ Drop(instr->count()); | |
3527 } | |
3528 | |
3529 | |
3530 void LCodeGen::DoThisFunction(LThisFunction* instr) { | |
3531 Register result = ToRegister(instr->result()); | |
3532 __ ld(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | |
3533 } | |
3534 | |
3535 | |
3536 void LCodeGen::DoContext(LContext* instr) { | |
3537 // If there is a non-return use, the context must be moved to a register. | |
3538 Register result = ToRegister(instr->result()); | |
3539 if (info()->IsOptimizing()) { | |
3540 __ ld(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); | |
3541 } else { | |
3542 // If there is no frame, the context must be in cp. | |
3543 DCHECK(result.is(cp)); | |
3544 } | |
3545 } | |
3546 | |
3547 | |
3548 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { | |
3549 DCHECK(ToRegister(instr->context()).is(cp)); | |
3550 __ li(scratch0(), instr->hydrogen()->pairs()); | |
3551 __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags()))); | |
3552 __ Push(scratch0(), scratch1()); | |
3553 CallRuntime(Runtime::kDeclareGlobals, 2, instr); | |
3554 } | |
3555 | |
3556 | |
3557 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, | |
3558 int formal_parameter_count, int arity, | |
3559 LInstruction* instr) { | |
3560 bool dont_adapt_arguments = | |
3561 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; | |
3562 bool can_invoke_directly = | |
3563 dont_adapt_arguments || formal_parameter_count == arity; | |
3564 | |
3565 Register function_reg = a1; | |
3566 LPointerMap* pointers = instr->pointer_map(); | |
3567 | |
3568 if (can_invoke_directly) { | |
3569 // Change context. | |
3570 __ ld(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset)); | |
3571 | |
3572 // Always initialize a0 to the number of actual arguments. | |
3573 __ li(a0, Operand(arity)); | |
3574 | |
3575 // Invoke function. | |
3576 __ ld(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset)); | |
3577 __ Call(at); | |
3578 | |
3579 // Set up deoptimization. | |
3580 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); | |
3581 } else { | |
3582 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); | |
3583 ParameterCount count(arity); | |
3584 ParameterCount expected(formal_parameter_count); | |
3585 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator); | |
3586 } | |
3587 } | |
3588 | |
3589 | |
3590 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { | |
3591 DCHECK(instr->context() != NULL); | |
3592 DCHECK(ToRegister(instr->context()).is(cp)); | |
3593 Register input = ToRegister(instr->value()); | |
3594 Register result = ToRegister(instr->result()); | |
3595 Register scratch = scratch0(); | |
3596 | |
3597 // Deoptimize if not a heap number. | |
3598 __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | |
3599 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | |
3600 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, Operand(at)); | |
3601 | |
3602 Label done; | |
3603 Register exponent = scratch0(); | |
3604 scratch = no_reg; | |
3605 __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); | |
3606 // Check the sign of the argument. If the argument is positive, just | |
3607 // return it. | |
3608 __ Move(result, input); | |
3609 __ And(at, exponent, Operand(HeapNumber::kSignMask)); | |
3610 __ Branch(&done, eq, at, Operand(zero_reg)); | |
3611 | |
3612 // Input is negative. Reverse its sign. | |
3613 // Preserve the value of all registers. | |
3614 { | |
3615 PushSafepointRegistersScope scope(this); | |
3616 | |
3617 // Registers were saved at the safepoint, so we can use | |
3618 // many scratch registers. | |
3619 Register tmp1 = input.is(a1) ? a0 : a1; | |
3620 Register tmp2 = input.is(a2) ? a0 : a2; | |
3621 Register tmp3 = input.is(a3) ? a0 : a3; | |
3622 Register tmp4 = input.is(a4) ? a0 : a4; | |
3623 | |
3624 // exponent: floating point exponent value. | |
3625 | |
3626 Label allocated, slow; | |
3627 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex); | |
3628 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow); | |
3629 __ Branch(&allocated); | |
3630 | |
3631 // Slow case: Call the runtime system to do the number allocation. | |
3632 __ bind(&slow); | |
3633 | |
3634 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, | |
3635 instr->context()); | |
3636 // Set the pointer to the new heap number in tmp. | |
3637 if (!tmp1.is(v0)) | |
3638 __ mov(tmp1, v0); | |
3639 // Restore input_reg after call to runtime. | |
3640 __ LoadFromSafepointRegisterSlot(input, input); | |
3641 __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); | |
3642 | |
3643 __ bind(&allocated); | |
3644 // exponent: floating point exponent value. | |
3645 // tmp1: allocated heap number. | |
3646 __ And(exponent, exponent, Operand(~HeapNumber::kSignMask)); | |
3647 __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset)); | |
3648 __ lwu(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); | |
3649 __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); | |
3650 | |
3651 __ StoreToSafepointRegisterSlot(tmp1, result); | |
3652 } | |
3653 | |
3654 __ bind(&done); | |
3655 } | |
3656 | |
3657 | |
3658 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { | |
3659 Register input = ToRegister(instr->value()); | |
3660 Register result = ToRegister(instr->result()); | |
3661 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); | |
3662 Label done; | |
3663 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg)); | |
3664 __ mov(result, input); | |
3665 __ subu(result, zero_reg, input); | |
3666 // Overflow if result is still negative, i.e. 0x80000000. | |
3667 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg)); | |
3668 __ bind(&done); | |
3669 } | |
3670 | |
3671 | |
3672 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) { | |
3673 Register input = ToRegister(instr->value()); | |
3674 Register result = ToRegister(instr->result()); | |
3675 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); | |
3676 Label done; | |
3677 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg)); | |
3678 __ mov(result, input); | |
3679 __ dsubu(result, zero_reg, input); | |
3680 // Overflow if result is still negative, i.e. 0x80000000 00000000. | |
3681 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg)); | |
3682 __ bind(&done); | |
3683 } | |
3684 | |
3685 | |
3686 void LCodeGen::DoMathAbs(LMathAbs* instr) { | |
3687 // Class for deferred case. | |
3688 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode { | |
3689 public: | |
3690 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) | |
3691 : LDeferredCode(codegen), instr_(instr) { } | |
3692 void Generate() override { | |
3693 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); | |
3694 } | |
3695 LInstruction* instr() override { return instr_; } | |
3696 | |
3697 private: | |
3698 LMathAbs* instr_; | |
3699 }; | |
3700 | |
3701 Representation r = instr->hydrogen()->value()->representation(); | |
3702 if (r.IsDouble()) { | |
3703 FPURegister input = ToDoubleRegister(instr->value()); | |
3704 FPURegister result = ToDoubleRegister(instr->result()); | |
3705 __ abs_d(result, input); | |
3706 } else if (r.IsInteger32()) { | |
3707 EmitIntegerMathAbs(instr); | |
3708 } else if (r.IsSmi()) { | |
3709 EmitSmiMathAbs(instr); | |
3710 } else { | |
3711 // Representation is tagged. | |
3712 DeferredMathAbsTaggedHeapNumber* deferred = | |
3713 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); | |
3714 Register input = ToRegister(instr->value()); | |
3715 // Smi check. | |
3716 __ JumpIfNotSmi(input, deferred->entry()); | |
3717 // If smi, handle it directly. | |
3718 EmitSmiMathAbs(instr); | |
3719 __ bind(deferred->exit()); | |
3720 } | |
3721 } | |
3722 | |
3723 | |
3724 void LCodeGen::DoMathFloor(LMathFloor* instr) { | |
3725 DoubleRegister input = ToDoubleRegister(instr->value()); | |
3726 Register result = ToRegister(instr->result()); | |
3727 Register scratch1 = scratch0(); | |
3728 Register except_flag = ToRegister(instr->temp()); | |
3729 | |
3730 __ EmitFPUTruncate(kRoundToMinusInf, | |
3731 result, | |
3732 input, | |
3733 scratch1, | |
3734 double_scratch0(), | |
3735 except_flag); | |
3736 | |
3737 // Deopt if the operation did not succeed. | |
3738 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, | |
3739 Operand(zero_reg)); | |
3740 | |
3741 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
3742 // Test for -0. | |
3743 Label done; | |
3744 __ Branch(&done, ne, result, Operand(zero_reg)); | |
3745 __ mfhc1(scratch1, input); // Get exponent/sign bits. | |
3746 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | |
3747 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1, | |
3748 Operand(zero_reg)); | |
3749 __ bind(&done); | |
3750 } | |
3751 } | |
3752 | |
3753 | |
3754 void LCodeGen::DoMathRound(LMathRound* instr) { | |
3755 DoubleRegister input = ToDoubleRegister(instr->value()); | |
3756 Register result = ToRegister(instr->result()); | |
3757 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); | |
3758 Register scratch = scratch0(); | |
3759 Label done, check_sign_on_zero; | |
3760 | |
3761 // Extract exponent bits. | |
3762 __ mfhc1(result, input); | |
3763 __ Ext(scratch, | |
3764 result, | |
3765 HeapNumber::kExponentShift, | |
3766 HeapNumber::kExponentBits); | |
3767 | |
3768 // If the number is in ]-0.5, +0.5[, the result is +/- 0. | |
3769 Label skip1; | |
3770 __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2)); | |
3771 __ mov(result, zero_reg); | |
3772 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
3773 __ Branch(&check_sign_on_zero); | |
3774 } else { | |
3775 __ Branch(&done); | |
3776 } | |
3777 __ bind(&skip1); | |
3778 | |
3779 // The following conversion will not work with numbers | |
3780 // outside of ]-2^32, 2^32[. | |
3781 DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch, | |
3782 Operand(HeapNumber::kExponentBias + 32)); | |
3783 | |
3784 // Save the original sign for later comparison. | |
3785 __ And(scratch, result, Operand(HeapNumber::kSignMask)); | |
3786 | |
3787 __ Move(double_scratch0(), 0.5); | |
3788 __ add_d(double_scratch0(), input, double_scratch0()); | |
3789 | |
3790 // Check sign of the result: if the sign changed, the input | |
3791 // value was in ]0.5, 0[ and the result should be -0. | |
3792 __ mfhc1(result, double_scratch0()); | |
3793 // mfhc1 sign-extends, clear the upper bits. | |
3794 __ dsll32(result, result, 0); | |
3795 __ dsrl32(result, result, 0); | |
3796 __ Xor(result, result, Operand(scratch)); | |
3797 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
3798 // ARM uses 'mi' here, which is 'lt' | |
3799 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg)); | |
3800 } else { | |
3801 Label skip2; | |
3802 // ARM uses 'mi' here, which is 'lt' | |
3803 // Negating it results in 'ge' | |
3804 __ Branch(&skip2, ge, result, Operand(zero_reg)); | |
3805 __ mov(result, zero_reg); | |
3806 __ Branch(&done); | |
3807 __ bind(&skip2); | |
3808 } | |
3809 | |
3810 Register except_flag = scratch; | |
3811 __ EmitFPUTruncate(kRoundToMinusInf, | |
3812 result, | |
3813 double_scratch0(), | |
3814 at, | |
3815 double_scratch1, | |
3816 except_flag); | |
3817 | |
3818 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, | |
3819 Operand(zero_reg)); | |
3820 | |
3821 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
3822 // Test for -0. | |
3823 __ Branch(&done, ne, result, Operand(zero_reg)); | |
3824 __ bind(&check_sign_on_zero); | |
3825 __ mfhc1(scratch, input); // Get exponent/sign bits. | |
3826 __ And(scratch, scratch, Operand(HeapNumber::kSignMask)); | |
3827 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch, | |
3828 Operand(zero_reg)); | |
3829 } | |
3830 __ bind(&done); | |
3831 } | |
3832 | |
3833 | |
3834 void LCodeGen::DoMathFround(LMathFround* instr) { | |
3835 DoubleRegister input = ToDoubleRegister(instr->value()); | |
3836 DoubleRegister result = ToDoubleRegister(instr->result()); | |
3837 __ cvt_s_d(result, input); | |
3838 __ cvt_d_s(result, result); | |
3839 } | |
3840 | |
3841 | |
3842 void LCodeGen::DoMathSqrt(LMathSqrt* instr) { | |
3843 DoubleRegister input = ToDoubleRegister(instr->value()); | |
3844 DoubleRegister result = ToDoubleRegister(instr->result()); | |
3845 __ sqrt_d(result, input); | |
3846 } | |
3847 | |
3848 | |
3849 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { | |
3850 DoubleRegister input = ToDoubleRegister(instr->value()); | |
3851 DoubleRegister result = ToDoubleRegister(instr->result()); | |
3852 DoubleRegister temp = ToDoubleRegister(instr->temp()); | |
3853 | |
3854 DCHECK(!input.is(result)); | |
3855 | |
3856 // Note that according to ECMA-262 15.8.2.13: | |
3857 // Math.pow(-Infinity, 0.5) == Infinity | |
3858 // Math.sqrt(-Infinity) == NaN | |
3859 Label done; | |
3860 __ Move(temp, static_cast<double>(-V8_INFINITY)); | |
3861 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input); | |
3862 // Set up Infinity in the delay slot. | |
3863 // result is overwritten if the branch is not taken. | |
3864 __ neg_d(result, temp); | |
3865 | |
3866 // Add +0 to convert -0 to +0. | |
3867 __ add_d(result, input, kDoubleRegZero); | |
3868 __ sqrt_d(result, result); | |
3869 __ bind(&done); | |
3870 } | |
3871 | |
3872 | |
3873 void LCodeGen::DoPower(LPower* instr) { | |
3874 Representation exponent_type = instr->hydrogen()->right()->representation(); | |
3875 // Having marked this as a call, we can use any registers. | |
3876 // Just make sure that the input/output registers are the expected ones. | |
3877 Register tagged_exponent = MathPowTaggedDescriptor::exponent(); | |
3878 DCHECK(!instr->right()->IsDoubleRegister() || | |
3879 ToDoubleRegister(instr->right()).is(f4)); | |
3880 DCHECK(!instr->right()->IsRegister() || | |
3881 ToRegister(instr->right()).is(tagged_exponent)); | |
3882 DCHECK(ToDoubleRegister(instr->left()).is(f2)); | |
3883 DCHECK(ToDoubleRegister(instr->result()).is(f0)); | |
3884 | |
3885 if (exponent_type.IsSmi()) { | |
3886 MathPowStub stub(isolate(), MathPowStub::TAGGED); | |
3887 __ CallStub(&stub); | |
3888 } else if (exponent_type.IsTagged()) { | |
3889 Label no_deopt; | |
3890 __ JumpIfSmi(tagged_exponent, &no_deopt); | |
3891 DCHECK(!a7.is(tagged_exponent)); | |
3892 __ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); | |
3893 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | |
3894 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, a7, Operand(at)); | |
3895 __ bind(&no_deopt); | |
3896 MathPowStub stub(isolate(), MathPowStub::TAGGED); | |
3897 __ CallStub(&stub); | |
3898 } else if (exponent_type.IsInteger32()) { | |
3899 MathPowStub stub(isolate(), MathPowStub::INTEGER); | |
3900 __ CallStub(&stub); | |
3901 } else { | |
3902 DCHECK(exponent_type.IsDouble()); | |
3903 MathPowStub stub(isolate(), MathPowStub::DOUBLE); | |
3904 __ CallStub(&stub); | |
3905 } | |
3906 } | |
3907 | |
3908 | |
3909 void LCodeGen::DoMathExp(LMathExp* instr) { | |
3910 DoubleRegister input = ToDoubleRegister(instr->value()); | |
3911 DoubleRegister result = ToDoubleRegister(instr->result()); | |
3912 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); | |
3913 DoubleRegister double_scratch2 = double_scratch0(); | |
3914 Register temp1 = ToRegister(instr->temp1()); | |
3915 Register temp2 = ToRegister(instr->temp2()); | |
3916 | |
3917 MathExpGenerator::EmitMathExp( | |
3918 masm(), input, result, double_scratch1, double_scratch2, | |
3919 temp1, temp2, scratch0()); | |
3920 } | |
3921 | |
3922 | |
3923 void LCodeGen::DoMathLog(LMathLog* instr) { | |
3924 __ PrepareCallCFunction(0, 1, scratch0()); | |
3925 __ MovToFloatParameter(ToDoubleRegister(instr->value())); | |
3926 __ CallCFunction(ExternalReference::math_log_double_function(isolate()), | |
3927 0, 1); | |
3928 __ MovFromFloatResult(ToDoubleRegister(instr->result())); | |
3929 } | |
3930 | |
3931 | |
3932 void LCodeGen::DoMathClz32(LMathClz32* instr) { | |
3933 Register input = ToRegister(instr->value()); | |
3934 Register result = ToRegister(instr->result()); | |
3935 __ Clz(result, input); | |
3936 } | |
3937 | |
3938 | |
3939 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { | |
3940 DCHECK(ToRegister(instr->context()).is(cp)); | |
3941 DCHECK(ToRegister(instr->function()).is(a1)); | |
3942 DCHECK(instr->HasPointerMap()); | |
3943 | |
3944 Handle<JSFunction> known_function = instr->hydrogen()->known_function(); | |
3945 if (known_function.is_null()) { | |
3946 LPointerMap* pointers = instr->pointer_map(); | |
3947 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); | |
3948 ParameterCount count(instr->arity()); | |
3949 __ InvokeFunction(a1, count, CALL_FUNCTION, generator); | |
3950 } else { | |
3951 CallKnownFunction(known_function, | |
3952 instr->hydrogen()->formal_parameter_count(), | |
3953 instr->arity(), instr); | |
3954 } | |
3955 } | |
3956 | |
3957 | |
3958 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { | |
3959 DCHECK(ToRegister(instr->result()).is(v0)); | |
3960 | |
3961 if (instr->hydrogen()->IsTailCall()) { | |
3962 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL); | |
3963 | |
3964 if (instr->target()->IsConstantOperand()) { | |
3965 LConstantOperand* target = LConstantOperand::cast(instr->target()); | |
3966 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); | |
3967 __ Jump(code, RelocInfo::CODE_TARGET); | |
3968 } else { | |
3969 DCHECK(instr->target()->IsRegister()); | |
3970 Register target = ToRegister(instr->target()); | |
3971 __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
3972 __ Jump(target); | |
3973 } | |
3974 } else { | |
3975 LPointerMap* pointers = instr->pointer_map(); | |
3976 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); | |
3977 | |
3978 if (instr->target()->IsConstantOperand()) { | |
3979 LConstantOperand* target = LConstantOperand::cast(instr->target()); | |
3980 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); | |
3981 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); | |
3982 __ Call(code, RelocInfo::CODE_TARGET); | |
3983 } else { | |
3984 DCHECK(instr->target()->IsRegister()); | |
3985 Register target = ToRegister(instr->target()); | |
3986 generator.BeforeCall(__ CallSize(target)); | |
3987 __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
3988 __ Call(target); | |
3989 } | |
3990 generator.AfterCall(); | |
3991 } | |
3992 } | |
3993 | |
3994 | |
3995 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { | |
3996 DCHECK(ToRegister(instr->function()).is(a1)); | |
3997 DCHECK(ToRegister(instr->result()).is(v0)); | |
3998 | |
3999 __ li(a0, Operand(instr->arity())); | |
4000 | |
4001 // Change context. | |
4002 __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); | |
4003 | |
4004 // Load the code entry address | |
4005 __ ld(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); | |
4006 __ Call(at); | |
4007 | |
4008 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); | |
4009 } | |
4010 | |
4011 | |
4012 void LCodeGen::DoCallFunction(LCallFunction* instr) { | |
4013 DCHECK(ToRegister(instr->context()).is(cp)); | |
4014 DCHECK(ToRegister(instr->function()).is(a1)); | |
4015 DCHECK(ToRegister(instr->result()).is(v0)); | |
4016 | |
4017 int arity = instr->arity(); | |
4018 CallFunctionFlags flags = instr->hydrogen()->function_flags(); | |
4019 if (instr->hydrogen()->HasVectorAndSlot()) { | |
4020 Register slot_register = ToRegister(instr->temp_slot()); | |
4021 Register vector_register = ToRegister(instr->temp_vector()); | |
4022 DCHECK(slot_register.is(a3)); | |
4023 DCHECK(vector_register.is(a2)); | |
4024 | |
4025 AllowDeferredHandleDereference vector_structure_check; | |
4026 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector(); | |
4027 int index = vector->GetIndex(instr->hydrogen()->slot()); | |
4028 | |
4029 __ li(vector_register, vector); | |
4030 __ li(slot_register, Operand(Smi::FromInt(index))); | |
4031 | |
4032 CallICState::CallType call_type = | |
4033 (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION; | |
4034 | |
4035 Handle<Code> ic = | |
4036 CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code(); | |
4037 CallCode(ic, RelocInfo::CODE_TARGET, instr); | |
4038 } else { | |
4039 CallFunctionStub stub(isolate(), arity, flags); | |
4040 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | |
4041 } | |
4042 } | |
4043 | |
4044 | |
4045 void LCodeGen::DoCallNew(LCallNew* instr) { | |
4046 DCHECK(ToRegister(instr->context()).is(cp)); | |
4047 DCHECK(ToRegister(instr->constructor()).is(a1)); | |
4048 DCHECK(ToRegister(instr->result()).is(v0)); | |
4049 | |
4050 __ li(a0, Operand(instr->arity())); | |
4051 // No cell in a2 for construct type feedback in optimized code | |
4052 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); | |
4053 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); | |
4054 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); | |
4055 } | |
4056 | |
4057 | |
4058 void LCodeGen::DoCallNewArray(LCallNewArray* instr) { | |
4059 DCHECK(ToRegister(instr->context()).is(cp)); | |
4060 DCHECK(ToRegister(instr->constructor()).is(a1)); | |
4061 DCHECK(ToRegister(instr->result()).is(v0)); | |
4062 | |
4063 __ li(a0, Operand(instr->arity())); | |
4064 if (instr->arity() == 1) { | |
4065 // We only need the allocation site for the case we have a length argument. | |
4066 // The case may bail out to the runtime, which will determine the correct | |
4067 // elements kind with the site. | |
4068 __ li(a2, instr->hydrogen()->site()); | |
4069 } else { | |
4070 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); | |
4071 } | |
4072 ElementsKind kind = instr->hydrogen()->elements_kind(); | |
4073 AllocationSiteOverrideMode override_mode = | |
4074 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) | |
4075 ? DISABLE_ALLOCATION_SITES | |
4076 : DONT_OVERRIDE; | |
4077 | |
4078 if (instr->arity() == 0) { | |
4079 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); | |
4080 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); | |
4081 } else if (instr->arity() == 1) { | |
4082 Label done; | |
4083 if (IsFastPackedElementsKind(kind)) { | |
4084 Label packed_case; | |
4085 // We might need a change here, | |
4086 // look at the first argument. | |
4087 __ ld(a5, MemOperand(sp, 0)); | |
4088 __ Branch(&packed_case, eq, a5, Operand(zero_reg)); | |
4089 | |
4090 ElementsKind holey_kind = GetHoleyElementsKind(kind); | |
4091 ArraySingleArgumentConstructorStub stub(isolate(), | |
4092 holey_kind, | |
4093 override_mode); | |
4094 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); | |
4095 __ jmp(&done); | |
4096 __ bind(&packed_case); | |
4097 } | |
4098 | |
4099 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); | |
4100 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); | |
4101 __ bind(&done); | |
4102 } else { | |
4103 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); | |
4104 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); | |
4105 } | |
4106 } | |
4107 | |
4108 | |
4109 void LCodeGen::DoCallRuntime(LCallRuntime* instr) { | |
4110 CallRuntime(instr->function(), instr->arity(), instr); | |
4111 } | |
4112 | |
4113 | |
4114 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { | |
4115 Register function = ToRegister(instr->function()); | |
4116 Register code_object = ToRegister(instr->code_object()); | |
4117 __ Daddu(code_object, code_object, | |
4118 Operand(Code::kHeaderSize - kHeapObjectTag)); | |
4119 __ sd(code_object, | |
4120 FieldMemOperand(function, JSFunction::kCodeEntryOffset)); | |
4121 } | |
4122 | |
4123 | |
4124 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { | |
4125 Register result = ToRegister(instr->result()); | |
4126 Register base = ToRegister(instr->base_object()); | |
4127 if (instr->offset()->IsConstantOperand()) { | |
4128 LConstantOperand* offset = LConstantOperand::cast(instr->offset()); | |
4129 __ Daddu(result, base, Operand(ToInteger32(offset))); | |
4130 } else { | |
4131 Register offset = ToRegister(instr->offset()); | |
4132 __ Daddu(result, base, offset); | |
4133 } | |
4134 } | |
4135 | |
4136 | |
4137 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { | |
4138 Representation representation = instr->representation(); | |
4139 | |
4140 Register object = ToRegister(instr->object()); | |
4141 Register scratch2 = scratch1(); | |
4142 Register scratch1 = scratch0(); | |
4143 | |
4144 HObjectAccess access = instr->hydrogen()->access(); | |
4145 int offset = access.offset(); | |
4146 if (access.IsExternalMemory()) { | |
4147 Register value = ToRegister(instr->value()); | |
4148 MemOperand operand = MemOperand(object, offset); | |
4149 __ Store(value, operand, representation); | |
4150 return; | |
4151 } | |
4152 | |
4153 __ AssertNotSmi(object); | |
4154 | |
4155 DCHECK(!representation.IsSmi() || | |
4156 !instr->value()->IsConstantOperand() || | |
4157 IsSmi(LConstantOperand::cast(instr->value()))); | |
4158 if (!FLAG_unbox_double_fields && representation.IsDouble()) { | |
4159 DCHECK(access.IsInobject()); | |
4160 DCHECK(!instr->hydrogen()->has_transition()); | |
4161 DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); | |
4162 DoubleRegister value = ToDoubleRegister(instr->value()); | |
4163 __ sdc1(value, FieldMemOperand(object, offset)); | |
4164 return; | |
4165 } | |
4166 | |
4167 if (instr->hydrogen()->has_transition()) { | |
4168 Handle<Map> transition = instr->hydrogen()->transition_map(); | |
4169 AddDeprecationDependency(transition); | |
4170 __ li(scratch1, Operand(transition)); | |
4171 __ sd(scratch1, FieldMemOperand(object, HeapObject::kMapOffset)); | |
4172 if (instr->hydrogen()->NeedsWriteBarrierForMap()) { | |
4173 Register temp = ToRegister(instr->temp()); | |
4174 // Update the write barrier for the map field. | |
4175 __ RecordWriteForMap(object, | |
4176 scratch1, | |
4177 temp, | |
4178 GetRAState(), | |
4179 kSaveFPRegs); | |
4180 } | |
4181 } | |
4182 | |
4183 // Do the store. | |
4184 Register destination = object; | |
4185 if (!access.IsInobject()) { | |
4186 destination = scratch1; | |
4187 __ ld(destination, FieldMemOperand(object, JSObject::kPropertiesOffset)); | |
4188 } | |
4189 | |
4190 if (representation.IsSmi() && SmiValuesAre32Bits() && | |
4191 instr->hydrogen()->value()->representation().IsInteger32()) { | |
4192 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); | |
4193 if (FLAG_debug_code) { | |
4194 __ Load(scratch2, FieldMemOperand(destination, offset), representation); | |
4195 __ AssertSmi(scratch2); | |
4196 } | |
4197 // Store int value directly to upper half of the smi. | |
4198 offset = SmiWordOffset(offset); | |
4199 representation = Representation::Integer32(); | |
4200 } | |
4201 MemOperand operand = FieldMemOperand(destination, offset); | |
4202 | |
4203 if (FLAG_unbox_double_fields && representation.IsDouble()) { | |
4204 DCHECK(access.IsInobject()); | |
4205 DoubleRegister value = ToDoubleRegister(instr->value()); | |
4206 __ sdc1(value, operand); | |
4207 } else { | |
4208 DCHECK(instr->value()->IsRegister()); | |
4209 Register value = ToRegister(instr->value()); | |
4210 __ Store(value, operand, representation); | |
4211 } | |
4212 | |
4213 if (instr->hydrogen()->NeedsWriteBarrier()) { | |
4214 // Update the write barrier for the object for in-object properties. | |
4215 Register value = ToRegister(instr->value()); | |
4216 __ RecordWriteField(destination, | |
4217 offset, | |
4218 value, | |
4219 scratch2, | |
4220 GetRAState(), | |
4221 kSaveFPRegs, | |
4222 EMIT_REMEMBERED_SET, | |
4223 instr->hydrogen()->SmiCheckForWriteBarrier(), | |
4224 instr->hydrogen()->PointersToHereCheckForValue()); | |
4225 } | |
4226 } | |
4227 | |
4228 | |
4229 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { | |
4230 DCHECK(ToRegister(instr->context()).is(cp)); | |
4231 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister())); | |
4232 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); | |
4233 | |
4234 if (instr->hydrogen()->HasVectorAndSlot()) { | |
4235 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr); | |
4236 } | |
4237 | |
4238 __ li(StoreDescriptor::NameRegister(), Operand(instr->name())); | |
4239 Handle<Code> ic = CodeFactory::StoreICInOptimizedCode( | |
4240 isolate(), instr->language_mode(), | |
4241 instr->hydrogen()->initialization_state()).code(); | |
4242 CallCode(ic, RelocInfo::CODE_TARGET, instr); | |
4243 } | |
4244 | |
4245 | |
4246 void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) { | |
4247 DCHECK(ToRegister(instr->context()).is(cp)); | |
4248 DCHECK(ToRegister(instr->value()) | |
4249 .is(StoreGlobalViaContextDescriptor::ValueRegister())); | |
4250 | |
4251 int const slot = instr->slot_index(); | |
4252 int const depth = instr->depth(); | |
4253 if (depth <= StoreGlobalViaContextStub::kMaximumDepth) { | |
4254 __ li(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot)); | |
4255 Handle<Code> stub = CodeFactory::StoreGlobalViaContext( | |
4256 isolate(), depth, instr->language_mode()) | |
4257 .code(); | |
4258 CallCode(stub, RelocInfo::CODE_TARGET, instr); | |
4259 } else { | |
4260 __ Push(Smi::FromInt(slot)); | |
4261 __ Push(StoreGlobalViaContextDescriptor::ValueRegister()); | |
4262 __ CallRuntime(is_strict(language_mode()) | |
4263 ? Runtime::kStoreGlobalViaContext_Strict | |
4264 : Runtime::kStoreGlobalViaContext_Sloppy, | |
4265 2); | |
4266 } | |
4267 } | |
4268 | |
4269 | |
4270 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { | |
4271 Condition cc = instr->hydrogen()->allow_equality() ? hi : hs; | |
4272 Operand operand((int64_t)0); | |
4273 Register reg; | |
4274 if (instr->index()->IsConstantOperand()) { | |
4275 operand = ToOperand(instr->index()); | |
4276 reg = ToRegister(instr->length()); | |
4277 cc = CommuteCondition(cc); | |
4278 } else { | |
4279 reg = ToRegister(instr->index()); | |
4280 operand = ToOperand(instr->length()); | |
4281 } | |
4282 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { | |
4283 Label done; | |
4284 __ Branch(&done, NegateCondition(cc), reg, operand); | |
4285 __ stop("eliminated bounds check failed"); | |
4286 __ bind(&done); | |
4287 } else { | |
4288 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds, reg, operand); | |
4289 } | |
4290 } | |
4291 | |
4292 | |
4293 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { | |
4294 Register external_pointer = ToRegister(instr->elements()); | |
4295 Register key = no_reg; | |
4296 ElementsKind elements_kind = instr->elements_kind(); | |
4297 bool key_is_constant = instr->key()->IsConstantOperand(); | |
4298 int constant_key = 0; | |
4299 if (key_is_constant) { | |
4300 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | |
4301 if (constant_key & 0xF0000000) { | |
4302 Abort(kArrayIndexConstantValueTooBig); | |
4303 } | |
4304 } else { | |
4305 key = ToRegister(instr->key()); | |
4306 } | |
4307 int element_size_shift = ElementsKindToShiftSize(elements_kind); | |
4308 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) | |
4309 ? (element_size_shift - (kSmiTagSize + kSmiShiftSize)) | |
4310 : element_size_shift; | |
4311 int base_offset = instr->base_offset(); | |
4312 | |
4313 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { | |
4314 Register address = scratch0(); | |
4315 FPURegister value(ToDoubleRegister(instr->value())); | |
4316 if (key_is_constant) { | |
4317 if (constant_key != 0) { | |
4318 __ Daddu(address, external_pointer, | |
4319 Operand(constant_key << element_size_shift)); | |
4320 } else { | |
4321 address = external_pointer; | |
4322 } | |
4323 } else { | |
4324 if (shift_size < 0) { | |
4325 if (shift_size == -32) { | |
4326 __ dsra32(address, key, 0); | |
4327 } else { | |
4328 __ dsra(address, key, -shift_size); | |
4329 } | |
4330 } else { | |
4331 __ dsll(address, key, shift_size); | |
4332 } | |
4333 __ Daddu(address, external_pointer, address); | |
4334 } | |
4335 | |
4336 if (elements_kind == FLOAT32_ELEMENTS) { | |
4337 __ cvt_s_d(double_scratch0(), value); | |
4338 __ swc1(double_scratch0(), MemOperand(address, base_offset)); | |
4339 } else { // Storing doubles, not floats. | |
4340 __ sdc1(value, MemOperand(address, base_offset)); | |
4341 } | |
4342 } else { | |
4343 Register value(ToRegister(instr->value())); | |
4344 MemOperand mem_operand = PrepareKeyedOperand( | |
4345 key, external_pointer, key_is_constant, constant_key, | |
4346 element_size_shift, shift_size, | |
4347 base_offset); | |
4348 switch (elements_kind) { | |
4349 case UINT8_ELEMENTS: | |
4350 case UINT8_CLAMPED_ELEMENTS: | |
4351 case INT8_ELEMENTS: | |
4352 __ sb(value, mem_operand); | |
4353 break; | |
4354 case INT16_ELEMENTS: | |
4355 case UINT16_ELEMENTS: | |
4356 __ sh(value, mem_operand); | |
4357 break; | |
4358 case INT32_ELEMENTS: | |
4359 case UINT32_ELEMENTS: | |
4360 __ sw(value, mem_operand); | |
4361 break; | |
4362 case FLOAT32_ELEMENTS: | |
4363 case FLOAT64_ELEMENTS: | |
4364 case FAST_DOUBLE_ELEMENTS: | |
4365 case FAST_ELEMENTS: | |
4366 case FAST_SMI_ELEMENTS: | |
4367 case FAST_HOLEY_DOUBLE_ELEMENTS: | |
4368 case FAST_HOLEY_ELEMENTS: | |
4369 case FAST_HOLEY_SMI_ELEMENTS: | |
4370 case DICTIONARY_ELEMENTS: | |
4371 case FAST_SLOPPY_ARGUMENTS_ELEMENTS: | |
4372 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: | |
4373 UNREACHABLE(); | |
4374 break; | |
4375 } | |
4376 } | |
4377 } | |
4378 | |
4379 | |
4380 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { | |
4381 DoubleRegister value = ToDoubleRegister(instr->value()); | |
4382 Register elements = ToRegister(instr->elements()); | |
4383 Register scratch = scratch0(); | |
4384 DoubleRegister double_scratch = double_scratch0(); | |
4385 bool key_is_constant = instr->key()->IsConstantOperand(); | |
4386 int base_offset = instr->base_offset(); | |
4387 Label not_nan, done; | |
4388 | |
4389 // Calculate the effective address of the slot in the array to store the | |
4390 // double value. | |
4391 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); | |
4392 if (key_is_constant) { | |
4393 int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | |
4394 if (constant_key & 0xF0000000) { | |
4395 Abort(kArrayIndexConstantValueTooBig); | |
4396 } | |
4397 __ Daddu(scratch, elements, | |
4398 Operand((constant_key << element_size_shift) + base_offset)); | |
4399 } else { | |
4400 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) | |
4401 ? (element_size_shift - (kSmiTagSize + kSmiShiftSize)) | |
4402 : element_size_shift; | |
4403 __ Daddu(scratch, elements, Operand(base_offset)); | |
4404 DCHECK((shift_size == 3) || (shift_size == -29)); | |
4405 if (shift_size == 3) { | |
4406 __ dsll(at, ToRegister(instr->key()), 3); | |
4407 } else if (shift_size == -29) { | |
4408 __ dsra(at, ToRegister(instr->key()), 29); | |
4409 } | |
4410 __ Daddu(scratch, scratch, at); | |
4411 } | |
4412 | |
4413 if (instr->NeedsCanonicalization()) { | |
4414 __ FPUCanonicalizeNaN(double_scratch, value); | |
4415 __ sdc1(double_scratch, MemOperand(scratch, 0)); | |
4416 } else { | |
4417 __ sdc1(value, MemOperand(scratch, 0)); | |
4418 } | |
4419 } | |
4420 | |
4421 | |
4422 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { | |
4423 Register value = ToRegister(instr->value()); | |
4424 Register elements = ToRegister(instr->elements()); | |
4425 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) | |
4426 : no_reg; | |
4427 Register scratch = scratch0(); | |
4428 Register store_base = scratch; | |
4429 int offset = instr->base_offset(); | |
4430 | |
4431 // Do the store. | |
4432 if (instr->key()->IsConstantOperand()) { | |
4433 DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); | |
4434 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); | |
4435 offset += ToInteger32(const_operand) * kPointerSize; | |
4436 store_base = elements; | |
4437 } else { | |
4438 // Even though the HLoadKeyed instruction forces the input | |
4439 // representation for the key to be an integer, the input gets replaced | |
4440 // during bound check elimination with the index argument to the bounds | |
4441 // check, which can be tagged, so that case must be handled here, too. | |
4442 if (instr->hydrogen()->key()->representation().IsSmi()) { | |
4443 __ SmiScale(scratch, key, kPointerSizeLog2); | |
4444 __ daddu(store_base, elements, scratch); | |
4445 } else { | |
4446 __ dsll(scratch, key, kPointerSizeLog2); | |
4447 __ daddu(store_base, elements, scratch); | |
4448 } | |
4449 } | |
4450 | |
4451 Representation representation = instr->hydrogen()->value()->representation(); | |
4452 if (representation.IsInteger32() && SmiValuesAre32Bits()) { | |
4453 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); | |
4454 DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); | |
4455 if (FLAG_debug_code) { | |
4456 Register temp = scratch1(); | |
4457 __ Load(temp, MemOperand(store_base, offset), Representation::Smi()); | |
4458 __ AssertSmi(temp); | |
4459 } | |
4460 | |
4461 // Store int value directly to upper half of the smi. | |
4462 STATIC_ASSERT(kSmiTag == 0); | |
4463 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); | |
4464 offset = SmiWordOffset(offset); | |
4465 representation = Representation::Integer32(); | |
4466 } | |
4467 | |
4468 __ Store(value, MemOperand(store_base, offset), representation); | |
4469 | |
4470 if (instr->hydrogen()->NeedsWriteBarrier()) { | |
4471 SmiCheck check_needed = | |
4472 instr->hydrogen()->value()->type().IsHeapObject() | |
4473 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | |
4474 // Compute address of modified element and store it into key register. | |
4475 __ Daddu(key, store_base, Operand(offset)); | |
4476 __ RecordWrite(elements, | |
4477 key, | |
4478 value, | |
4479 GetRAState(), | |
4480 kSaveFPRegs, | |
4481 EMIT_REMEMBERED_SET, | |
4482 check_needed, | |
4483 instr->hydrogen()->PointersToHereCheckForValue()); | |
4484 } | |
4485 } | |
4486 | |
4487 | |
4488 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { | |
4489 // By cases: external, fast double | |
4490 if (instr->is_fixed_typed_array()) { | |
4491 DoStoreKeyedExternalArray(instr); | |
4492 } else if (instr->hydrogen()->value()->representation().IsDouble()) { | |
4493 DoStoreKeyedFixedDoubleArray(instr); | |
4494 } else { | |
4495 DoStoreKeyedFixedArray(instr); | |
4496 } | |
4497 } | |
4498 | |
4499 | |
4500 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { | |
4501 DCHECK(ToRegister(instr->context()).is(cp)); | |
4502 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister())); | |
4503 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister())); | |
4504 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); | |
4505 | |
4506 if (instr->hydrogen()->HasVectorAndSlot()) { | |
4507 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr); | |
4508 } | |
4509 | |
4510 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode( | |
4511 isolate(), instr->language_mode(), | |
4512 instr->hydrogen()->initialization_state()).code(); | |
4513 CallCode(ic, RelocInfo::CODE_TARGET, instr); | |
4514 } | |
4515 | |
4516 | |
4517 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) { | |
4518 class DeferredMaybeGrowElements final : public LDeferredCode { | |
4519 public: | |
4520 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr) | |
4521 : LDeferredCode(codegen), instr_(instr) {} | |
4522 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); } | |
4523 LInstruction* instr() override { return instr_; } | |
4524 | |
4525 private: | |
4526 LMaybeGrowElements* instr_; | |
4527 }; | |
4528 | |
4529 Register result = v0; | |
4530 DeferredMaybeGrowElements* deferred = | |
4531 new (zone()) DeferredMaybeGrowElements(this, instr); | |
4532 LOperand* key = instr->key(); | |
4533 LOperand* current_capacity = instr->current_capacity(); | |
4534 | |
4535 DCHECK(instr->hydrogen()->key()->representation().IsInteger32()); | |
4536 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32()); | |
4537 DCHECK(key->IsConstantOperand() || key->IsRegister()); | |
4538 DCHECK(current_capacity->IsConstantOperand() || | |
4539 current_capacity->IsRegister()); | |
4540 | |
4541 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) { | |
4542 int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); | |
4543 int32_t constant_capacity = | |
4544 ToInteger32(LConstantOperand::cast(current_capacity)); | |
4545 if (constant_key >= constant_capacity) { | |
4546 // Deferred case. | |
4547 __ jmp(deferred->entry()); | |
4548 } | |
4549 } else if (key->IsConstantOperand()) { | |
4550 int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); | |
4551 __ Branch(deferred->entry(), le, ToRegister(current_capacity), | |
4552 Operand(constant_key)); | |
4553 } else if (current_capacity->IsConstantOperand()) { | |
4554 int32_t constant_capacity = | |
4555 ToInteger32(LConstantOperand::cast(current_capacity)); | |
4556 __ Branch(deferred->entry(), ge, ToRegister(key), | |
4557 Operand(constant_capacity)); | |
4558 } else { | |
4559 __ Branch(deferred->entry(), ge, ToRegister(key), | |
4560 Operand(ToRegister(current_capacity))); | |
4561 } | |
4562 | |
4563 if (instr->elements()->IsRegister()) { | |
4564 __ mov(result, ToRegister(instr->elements())); | |
4565 } else { | |
4566 __ ld(result, ToMemOperand(instr->elements())); | |
4567 } | |
4568 | |
4569 __ bind(deferred->exit()); | |
4570 } | |
4571 | |
4572 | |
4573 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) { | |
4574 // TODO(3095996): Get rid of this. For now, we need to make the | |
4575 // result register contain a valid pointer because it is already | |
4576 // contained in the register pointer map. | |
4577 Register result = v0; | |
4578 __ mov(result, zero_reg); | |
4579 | |
4580 // We have to call a stub. | |
4581 { | |
4582 PushSafepointRegistersScope scope(this); | |
4583 if (instr->object()->IsRegister()) { | |
4584 __ mov(result, ToRegister(instr->object())); | |
4585 } else { | |
4586 __ ld(result, ToMemOperand(instr->object())); | |
4587 } | |
4588 | |
4589 LOperand* key = instr->key(); | |
4590 if (key->IsConstantOperand()) { | |
4591 __ li(a3, Operand(ToSmi(LConstantOperand::cast(key)))); | |
4592 } else { | |
4593 __ mov(a3, ToRegister(key)); | |
4594 __ SmiTag(a3); | |
4595 } | |
4596 | |
4597 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(), | |
4598 instr->hydrogen()->kind()); | |
4599 __ mov(a0, result); | |
4600 __ CallStub(&stub); | |
4601 RecordSafepointWithLazyDeopt( | |
4602 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | |
4603 __ StoreToSafepointRegisterSlot(result, result); | |
4604 } | |
4605 | |
4606 // Deopt on smi, which means the elements array changed to dictionary mode. | |
4607 __ SmiTst(result, at); | |
4608 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg)); | |
4609 } | |
4610 | |
4611 | |
4612 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { | |
4613 Register object_reg = ToRegister(instr->object()); | |
4614 Register scratch = scratch0(); | |
4615 | |
4616 Handle<Map> from_map = instr->original_map(); | |
4617 Handle<Map> to_map = instr->transitioned_map(); | |
4618 ElementsKind from_kind = instr->from_kind(); | |
4619 ElementsKind to_kind = instr->to_kind(); | |
4620 | |
4621 Label not_applicable; | |
4622 __ ld(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset)); | |
4623 __ Branch(¬_applicable, ne, scratch, Operand(from_map)); | |
4624 | |
4625 if (IsSimpleMapChangeTransition(from_kind, to_kind)) { | |
4626 Register new_map_reg = ToRegister(instr->new_map_temp()); | |
4627 __ li(new_map_reg, Operand(to_map)); | |
4628 __ sd(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); | |
4629 // Write barrier. | |
4630 __ RecordWriteForMap(object_reg, | |
4631 new_map_reg, | |
4632 scratch, | |
4633 GetRAState(), | |
4634 kDontSaveFPRegs); | |
4635 } else { | |
4636 DCHECK(object_reg.is(a0)); | |
4637 DCHECK(ToRegister(instr->context()).is(cp)); | |
4638 PushSafepointRegistersScope scope(this); | |
4639 __ li(a1, Operand(to_map)); | |
4640 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; | |
4641 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); | |
4642 __ CallStub(&stub); | |
4643 RecordSafepointWithRegisters( | |
4644 instr->pointer_map(), 0, Safepoint::kLazyDeopt); | |
4645 } | |
4646 __ bind(¬_applicable); | |
4647 } | |
4648 | |
4649 | |
4650 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { | |
4651 Register object = ToRegister(instr->object()); | |
4652 Register temp = ToRegister(instr->temp()); | |
4653 Label no_memento_found; | |
4654 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found, | |
4655 ne, &no_memento_found); | |
4656 DeoptimizeIf(al, instr, Deoptimizer::kMementoFound); | |
4657 __ bind(&no_memento_found); | |
4658 } | |
4659 | |
4660 | |
4661 void LCodeGen::DoStringAdd(LStringAdd* instr) { | |
4662 DCHECK(ToRegister(instr->context()).is(cp)); | |
4663 DCHECK(ToRegister(instr->left()).is(a1)); | |
4664 DCHECK(ToRegister(instr->right()).is(a0)); | |
4665 StringAddStub stub(isolate(), | |
4666 instr->hydrogen()->flags(), | |
4667 instr->hydrogen()->pretenure_flag()); | |
4668 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | |
4669 } | |
4670 | |
4671 | |
4672 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { | |
4673 class DeferredStringCharCodeAt final : public LDeferredCode { | |
4674 public: | |
4675 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) | |
4676 : LDeferredCode(codegen), instr_(instr) { } | |
4677 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); } | |
4678 LInstruction* instr() override { return instr_; } | |
4679 | |
4680 private: | |
4681 LStringCharCodeAt* instr_; | |
4682 }; | |
4683 | |
4684 DeferredStringCharCodeAt* deferred = | |
4685 new(zone()) DeferredStringCharCodeAt(this, instr); | |
4686 StringCharLoadGenerator::Generate(masm(), | |
4687 ToRegister(instr->string()), | |
4688 ToRegister(instr->index()), | |
4689 ToRegister(instr->result()), | |
4690 deferred->entry()); | |
4691 __ bind(deferred->exit()); | |
4692 } | |
4693 | |
4694 | |
4695 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { | |
4696 Register string = ToRegister(instr->string()); | |
4697 Register result = ToRegister(instr->result()); | |
4698 Register scratch = scratch0(); | |
4699 | |
4700 // TODO(3095996): Get rid of this. For now, we need to make the | |
4701 // result register contain a valid pointer because it is already | |
4702 // contained in the register pointer map. | |
4703 __ mov(result, zero_reg); | |
4704 | |
4705 PushSafepointRegistersScope scope(this); | |
4706 __ push(string); | |
4707 // Push the index as a smi. This is safe because of the checks in | |
4708 // DoStringCharCodeAt above. | |
4709 if (instr->index()->IsConstantOperand()) { | |
4710 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); | |
4711 __ Daddu(scratch, zero_reg, Operand(Smi::FromInt(const_index))); | |
4712 __ push(scratch); | |
4713 } else { | |
4714 Register index = ToRegister(instr->index()); | |
4715 __ SmiTag(index); | |
4716 __ push(index); | |
4717 } | |
4718 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr, | |
4719 instr->context()); | |
4720 __ AssertSmi(v0); | |
4721 __ SmiUntag(v0); | |
4722 __ StoreToSafepointRegisterSlot(v0, result); | |
4723 } | |
4724 | |
4725 | |
4726 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { | |
4727 class DeferredStringCharFromCode final : public LDeferredCode { | |
4728 public: | |
4729 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) | |
4730 : LDeferredCode(codegen), instr_(instr) { } | |
4731 void Generate() override { | |
4732 codegen()->DoDeferredStringCharFromCode(instr_); | |
4733 } | |
4734 LInstruction* instr() override { return instr_; } | |
4735 | |
4736 private: | |
4737 LStringCharFromCode* instr_; | |
4738 }; | |
4739 | |
4740 DeferredStringCharFromCode* deferred = | |
4741 new(zone()) DeferredStringCharFromCode(this, instr); | |
4742 | |
4743 DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); | |
4744 Register char_code = ToRegister(instr->char_code()); | |
4745 Register result = ToRegister(instr->result()); | |
4746 Register scratch = scratch0(); | |
4747 DCHECK(!char_code.is(result)); | |
4748 | |
4749 __ Branch(deferred->entry(), hi, | |
4750 char_code, Operand(String::kMaxOneByteCharCode)); | |
4751 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); | |
4752 __ dsll(scratch, char_code, kPointerSizeLog2); | |
4753 __ Daddu(result, result, scratch); | |
4754 __ ld(result, FieldMemOperand(result, FixedArray::kHeaderSize)); | |
4755 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); | |
4756 __ Branch(deferred->entry(), eq, result, Operand(scratch)); | |
4757 __ bind(deferred->exit()); | |
4758 } | |
4759 | |
4760 | |
4761 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { | |
4762 Register char_code = ToRegister(instr->char_code()); | |
4763 Register result = ToRegister(instr->result()); | |
4764 | |
4765 // TODO(3095996): Get rid of this. For now, we need to make the | |
4766 // result register contain a valid pointer because it is already | |
4767 // contained in the register pointer map. | |
4768 __ mov(result, zero_reg); | |
4769 | |
4770 PushSafepointRegistersScope scope(this); | |
4771 __ SmiTag(char_code); | |
4772 __ push(char_code); | |
4773 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); | |
4774 __ StoreToSafepointRegisterSlot(v0, result); | |
4775 } | |
4776 | |
4777 | |
4778 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { | |
4779 LOperand* input = instr->value(); | |
4780 DCHECK(input->IsRegister() || input->IsStackSlot()); | |
4781 LOperand* output = instr->result(); | |
4782 DCHECK(output->IsDoubleRegister()); | |
4783 FPURegister single_scratch = double_scratch0().low(); | |
4784 if (input->IsStackSlot()) { | |
4785 Register scratch = scratch0(); | |
4786 __ ld(scratch, ToMemOperand(input)); | |
4787 __ mtc1(scratch, single_scratch); | |
4788 } else { | |
4789 __ mtc1(ToRegister(input), single_scratch); | |
4790 } | |
4791 __ cvt_d_w(ToDoubleRegister(output), single_scratch); | |
4792 } | |
4793 | |
4794 | |
4795 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { | |
4796 LOperand* input = instr->value(); | |
4797 LOperand* output = instr->result(); | |
4798 | |
4799 FPURegister dbl_scratch = double_scratch0(); | |
4800 __ mtc1(ToRegister(input), dbl_scratch); | |
4801 __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22); // TODO(plind): f22? | |
4802 } | |
4803 | |
4804 | |
4805 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { | |
4806 class DeferredNumberTagU final : public LDeferredCode { | |
4807 public: | |
4808 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) | |
4809 : LDeferredCode(codegen), instr_(instr) { } | |
4810 void Generate() override { | |
4811 codegen()->DoDeferredNumberTagIU(instr_, | |
4812 instr_->value(), | |
4813 instr_->temp1(), | |
4814 instr_->temp2(), | |
4815 UNSIGNED_INT32); | |
4816 } | |
4817 LInstruction* instr() override { return instr_; } | |
4818 | |
4819 private: | |
4820 LNumberTagU* instr_; | |
4821 }; | |
4822 | |
4823 Register input = ToRegister(instr->value()); | |
4824 Register result = ToRegister(instr->result()); | |
4825 | |
4826 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); | |
4827 __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue)); | |
4828 __ SmiTag(result, input); | |
4829 __ bind(deferred->exit()); | |
4830 } | |
4831 | |
4832 | |
4833 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, | |
4834 LOperand* value, | |
4835 LOperand* temp1, | |
4836 LOperand* temp2, | |
4837 IntegerSignedness signedness) { | |
4838 Label done, slow; | |
4839 Register src = ToRegister(value); | |
4840 Register dst = ToRegister(instr->result()); | |
4841 Register tmp1 = scratch0(); | |
4842 Register tmp2 = ToRegister(temp1); | |
4843 Register tmp3 = ToRegister(temp2); | |
4844 DoubleRegister dbl_scratch = double_scratch0(); | |
4845 | |
4846 if (signedness == SIGNED_INT32) { | |
4847 // There was overflow, so bits 30 and 31 of the original integer | |
4848 // disagree. Try to allocate a heap number in new space and store | |
4849 // the value in there. If that fails, call the runtime system. | |
4850 if (dst.is(src)) { | |
4851 __ SmiUntag(src, dst); | |
4852 __ Xor(src, src, Operand(0x80000000)); | |
4853 } | |
4854 __ mtc1(src, dbl_scratch); | |
4855 __ cvt_d_w(dbl_scratch, dbl_scratch); | |
4856 } else { | |
4857 __ mtc1(src, dbl_scratch); | |
4858 __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22); | |
4859 } | |
4860 | |
4861 if (FLAG_inline_new) { | |
4862 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex); | |
4863 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, TAG_RESULT); | |
4864 __ Branch(&done); | |
4865 } | |
4866 | |
4867 // Slow case: Call the runtime system to do the number allocation. | |
4868 __ bind(&slow); | |
4869 { | |
4870 // TODO(3095996): Put a valid pointer value in the stack slot where the | |
4871 // result register is stored, as this register is in the pointer map, but | |
4872 // contains an integer value. | |
4873 __ mov(dst, zero_reg); | |
4874 // Preserve the value of all registers. | |
4875 PushSafepointRegistersScope scope(this); | |
4876 | |
4877 // NumberTagI and NumberTagD use the context from the frame, rather than | |
4878 // the environment's HContext or HInlinedContext value. | |
4879 // They only call Runtime::kAllocateHeapNumber. | |
4880 // The corresponding HChange instructions are added in a phase that does | |
4881 // not have easy access to the local context. | |
4882 __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | |
4883 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); | |
4884 RecordSafepointWithRegisters( | |
4885 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); | |
4886 __ StoreToSafepointRegisterSlot(v0, dst); | |
4887 } | |
4888 | |
4889 // Done. Put the value in dbl_scratch into the value of the allocated heap | |
4890 // number. | |
4891 __ bind(&done); | |
4892 __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset)); | |
4893 } | |
4894 | |
4895 | |
4896 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { | |
4897 class DeferredNumberTagD final : public LDeferredCode { | |
4898 public: | |
4899 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) | |
4900 : LDeferredCode(codegen), instr_(instr) { } | |
4901 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); } | |
4902 LInstruction* instr() override { return instr_; } | |
4903 | |
4904 private: | |
4905 LNumberTagD* instr_; | |
4906 }; | |
4907 | |
4908 DoubleRegister input_reg = ToDoubleRegister(instr->value()); | |
4909 Register scratch = scratch0(); | |
4910 Register reg = ToRegister(instr->result()); | |
4911 Register temp1 = ToRegister(instr->temp()); | |
4912 Register temp2 = ToRegister(instr->temp2()); | |
4913 | |
4914 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); | |
4915 if (FLAG_inline_new) { | |
4916 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); | |
4917 // We want the untagged address first for performance | |
4918 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(), | |
4919 DONT_TAG_RESULT); | |
4920 } else { | |
4921 __ Branch(deferred->entry()); | |
4922 } | |
4923 __ bind(deferred->exit()); | |
4924 __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset)); | |
4925 // Now that we have finished with the object's real address tag it | |
4926 __ Daddu(reg, reg, kHeapObjectTag); | |
4927 } | |
4928 | |
4929 | |
4930 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { | |
4931 // TODO(3095996): Get rid of this. For now, we need to make the | |
4932 // result register contain a valid pointer because it is already | |
4933 // contained in the register pointer map. | |
4934 Register reg = ToRegister(instr->result()); | |
4935 __ mov(reg, zero_reg); | |
4936 | |
4937 PushSafepointRegistersScope scope(this); | |
4938 // NumberTagI and NumberTagD use the context from the frame, rather than | |
4939 // the environment's HContext or HInlinedContext value. | |
4940 // They only call Runtime::kAllocateHeapNumber. | |
4941 // The corresponding HChange instructions are added in a phase that does | |
4942 // not have easy access to the local context. | |
4943 __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | |
4944 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); | |
4945 RecordSafepointWithRegisters( | |
4946 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); | |
4947 __ Dsubu(v0, v0, kHeapObjectTag); | |
4948 __ StoreToSafepointRegisterSlot(v0, reg); | |
4949 } | |
4950 | |
4951 | |
4952 void LCodeGen::DoSmiTag(LSmiTag* instr) { | |
4953 HChange* hchange = instr->hydrogen(); | |
4954 Register input = ToRegister(instr->value()); | |
4955 Register output = ToRegister(instr->result()); | |
4956 if (hchange->CheckFlag(HValue::kCanOverflow) && | |
4957 hchange->value()->CheckFlag(HValue::kUint32)) { | |
4958 __ And(at, input, Operand(0x80000000)); | |
4959 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, at, Operand(zero_reg)); | |
4960 } | |
4961 if (hchange->CheckFlag(HValue::kCanOverflow) && | |
4962 !hchange->value()->CheckFlag(HValue::kUint32)) { | |
4963 __ SmiTagCheckOverflow(output, input, at); | |
4964 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, at, Operand(zero_reg)); | |
4965 } else { | |
4966 __ SmiTag(output, input); | |
4967 } | |
4968 } | |
4969 | |
4970 | |
4971 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { | |
4972 Register scratch = scratch0(); | |
4973 Register input = ToRegister(instr->value()); | |
4974 Register result = ToRegister(instr->result()); | |
4975 if (instr->needs_check()) { | |
4976 STATIC_ASSERT(kHeapObjectTag == 1); | |
4977 // If the input is a HeapObject, value of scratch won't be zero. | |
4978 __ And(scratch, input, Operand(kHeapObjectTag)); | |
4979 __ SmiUntag(result, input); | |
4980 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, Operand(zero_reg)); | |
4981 } else { | |
4982 __ SmiUntag(result, input); | |
4983 } | |
4984 } | |
4985 | |
4986 | |
4987 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, | |
4988 DoubleRegister result_reg, | |
4989 NumberUntagDMode mode) { | |
4990 bool can_convert_undefined_to_nan = | |
4991 instr->hydrogen()->can_convert_undefined_to_nan(); | |
4992 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); | |
4993 | |
4994 Register scratch = scratch0(); | |
4995 Label convert, load_smi, done; | |
4996 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { | |
4997 // Smi check. | |
4998 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); | |
4999 // Heap number map check. | |
5000 __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | |
5001 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | |
5002 if (can_convert_undefined_to_nan) { | |
5003 __ Branch(&convert, ne, scratch, Operand(at)); | |
5004 } else { | |
5005 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, | |
5006 Operand(at)); | |
5007 } | |
5008 // Load heap number. | |
5009 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | |
5010 if (deoptimize_on_minus_zero) { | |
5011 __ mfc1(at, result_reg); | |
5012 __ Branch(&done, ne, at, Operand(zero_reg)); | |
5013 __ mfhc1(scratch, result_reg); // Get exponent/sign bits. | |
5014 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, scratch, | |
5015 Operand(HeapNumber::kSignMask)); | |
5016 } | |
5017 __ Branch(&done); | |
5018 if (can_convert_undefined_to_nan) { | |
5019 __ bind(&convert); | |
5020 // Convert undefined (and hole) to NaN. | |
5021 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | |
5022 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg, | |
5023 Operand(at)); | |
5024 __ LoadRoot(scratch, Heap::kNanValueRootIndex); | |
5025 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); | |
5026 __ Branch(&done); | |
5027 } | |
5028 } else { | |
5029 __ SmiUntag(scratch, input_reg); | |
5030 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); | |
5031 } | |
5032 // Smi to double register conversion | |
5033 __ bind(&load_smi); | |
5034 // scratch: untagged value of input_reg | |
5035 __ mtc1(scratch, result_reg); | |
5036 __ cvt_d_w(result_reg, result_reg); | |
5037 __ bind(&done); | |
5038 } | |
5039 | |
5040 | |
5041 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { | |
5042 Register input_reg = ToRegister(instr->value()); | |
5043 Register scratch1 = scratch0(); | |
5044 Register scratch2 = ToRegister(instr->temp()); | |
5045 DoubleRegister double_scratch = double_scratch0(); | |
5046 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2()); | |
5047 | |
5048 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2)); | |
5049 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1)); | |
5050 | |
5051 Label done; | |
5052 | |
5053 // The input is a tagged HeapObject. | |
5054 // Heap number map check. | |
5055 __ ld(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | |
5056 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | |
5057 // This 'at' value and scratch1 map value are used for tests in both clauses | |
5058 // of the if. | |
5059 | |
5060 if (instr->truncating()) { | |
5061 // Performs a truncating conversion of a floating point number as used by | |
5062 // the JS bitwise operations. | |
5063 Label no_heap_number, check_bools, check_false; | |
5064 // Check HeapNumber map. | |
5065 __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at)); | |
5066 __ mov(scratch2, input_reg); // In delay slot. | |
5067 __ TruncateHeapNumberToI(input_reg, scratch2); | |
5068 __ Branch(&done); | |
5069 | |
5070 // Check for Oddballs. Undefined/False is converted to zero and True to one | |
5071 // for truncating conversions. | |
5072 __ bind(&no_heap_number); | |
5073 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | |
5074 __ Branch(&check_bools, ne, input_reg, Operand(at)); | |
5075 DCHECK(ToRegister(instr->result()).is(input_reg)); | |
5076 __ Branch(USE_DELAY_SLOT, &done); | |
5077 __ mov(input_reg, zero_reg); // In delay slot. | |
5078 | |
5079 __ bind(&check_bools); | |
5080 __ LoadRoot(at, Heap::kTrueValueRootIndex); | |
5081 __ Branch(&check_false, ne, scratch2, Operand(at)); | |
5082 __ Branch(USE_DELAY_SLOT, &done); | |
5083 __ li(input_reg, Operand(1)); // In delay slot. | |
5084 | |
5085 __ bind(&check_false); | |
5086 __ LoadRoot(at, Heap::kFalseValueRootIndex); | |
5087 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean, | |
5088 scratch2, Operand(at)); | |
5089 __ Branch(USE_DELAY_SLOT, &done); | |
5090 __ mov(input_reg, zero_reg); // In delay slot. | |
5091 } else { | |
5092 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch1, | |
5093 Operand(at)); | |
5094 | |
5095 // Load the double value. | |
5096 __ ldc1(double_scratch, | |
5097 FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | |
5098 | |
5099 Register except_flag = scratch2; | |
5100 __ EmitFPUTruncate(kRoundToZero, | |
5101 input_reg, | |
5102 double_scratch, | |
5103 scratch1, | |
5104 double_scratch2, | |
5105 except_flag, | |
5106 kCheckForInexactConversion); | |
5107 | |
5108 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, | |
5109 Operand(zero_reg)); | |
5110 | |
5111 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
5112 __ Branch(&done, ne, input_reg, Operand(zero_reg)); | |
5113 | |
5114 __ mfhc1(scratch1, double_scratch); // Get exponent/sign bits. | |
5115 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | |
5116 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1, | |
5117 Operand(zero_reg)); | |
5118 } | |
5119 } | |
5120 __ bind(&done); | |
5121 } | |
5122 | |
5123 | |
5124 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | |
5125 class DeferredTaggedToI final : public LDeferredCode { | |
5126 public: | |
5127 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | |
5128 : LDeferredCode(codegen), instr_(instr) { } | |
5129 void Generate() override { codegen()->DoDeferredTaggedToI(instr_); } | |
5130 LInstruction* instr() override { return instr_; } | |
5131 | |
5132 private: | |
5133 LTaggedToI* instr_; | |
5134 }; | |
5135 | |
5136 LOperand* input = instr->value(); | |
5137 DCHECK(input->IsRegister()); | |
5138 DCHECK(input->Equals(instr->result())); | |
5139 | |
5140 Register input_reg = ToRegister(input); | |
5141 | |
5142 if (instr->hydrogen()->value()->representation().IsSmi()) { | |
5143 __ SmiUntag(input_reg); | |
5144 } else { | |
5145 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); | |
5146 | |
5147 // Let the deferred code handle the HeapObject case. | |
5148 __ JumpIfNotSmi(input_reg, deferred->entry()); | |
5149 | |
5150 // Smi to int32 conversion. | |
5151 __ SmiUntag(input_reg); | |
5152 __ bind(deferred->exit()); | |
5153 } | |
5154 } | |
5155 | |
5156 | |
5157 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { | |
5158 LOperand* input = instr->value(); | |
5159 DCHECK(input->IsRegister()); | |
5160 LOperand* result = instr->result(); | |
5161 DCHECK(result->IsDoubleRegister()); | |
5162 | |
5163 Register input_reg = ToRegister(input); | |
5164 DoubleRegister result_reg = ToDoubleRegister(result); | |
5165 | |
5166 HValue* value = instr->hydrogen()->value(); | |
5167 NumberUntagDMode mode = value->representation().IsSmi() | |
5168 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; | |
5169 | |
5170 EmitNumberUntagD(instr, input_reg, result_reg, mode); | |
5171 } | |
5172 | |
5173 | |
5174 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { | |
5175 Register result_reg = ToRegister(instr->result()); | |
5176 Register scratch1 = scratch0(); | |
5177 DoubleRegister double_input = ToDoubleRegister(instr->value()); | |
5178 | |
5179 if (instr->truncating()) { | |
5180 __ TruncateDoubleToI(result_reg, double_input); | |
5181 } else { | |
5182 Register except_flag = LCodeGen::scratch1(); | |
5183 | |
5184 __ EmitFPUTruncate(kRoundToMinusInf, | |
5185 result_reg, | |
5186 double_input, | |
5187 scratch1, | |
5188 double_scratch0(), | |
5189 except_flag, | |
5190 kCheckForInexactConversion); | |
5191 | |
5192 // Deopt if the operation did not succeed (except_flag != 0). | |
5193 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, | |
5194 Operand(zero_reg)); | |
5195 | |
5196 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
5197 Label done; | |
5198 __ Branch(&done, ne, result_reg, Operand(zero_reg)); | |
5199 __ mfhc1(scratch1, double_input); // Get exponent/sign bits. | |
5200 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | |
5201 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1, | |
5202 Operand(zero_reg)); | |
5203 __ bind(&done); | |
5204 } | |
5205 } | |
5206 } | |
5207 | |
5208 | |
5209 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { | |
5210 Register result_reg = ToRegister(instr->result()); | |
5211 Register scratch1 = LCodeGen::scratch0(); | |
5212 DoubleRegister double_input = ToDoubleRegister(instr->value()); | |
5213 | |
5214 if (instr->truncating()) { | |
5215 __ TruncateDoubleToI(result_reg, double_input); | |
5216 } else { | |
5217 Register except_flag = LCodeGen::scratch1(); | |
5218 | |
5219 __ EmitFPUTruncate(kRoundToMinusInf, | |
5220 result_reg, | |
5221 double_input, | |
5222 scratch1, | |
5223 double_scratch0(), | |
5224 except_flag, | |
5225 kCheckForInexactConversion); | |
5226 | |
5227 // Deopt if the operation did not succeed (except_flag != 0). | |
5228 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, | |
5229 Operand(zero_reg)); | |
5230 | |
5231 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
5232 Label done; | |
5233 __ Branch(&done, ne, result_reg, Operand(zero_reg)); | |
5234 __ mfhc1(scratch1, double_input); // Get exponent/sign bits. | |
5235 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | |
5236 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1, | |
5237 Operand(zero_reg)); | |
5238 __ bind(&done); | |
5239 } | |
5240 } | |
5241 __ SmiTag(result_reg, result_reg); | |
5242 } | |
5243 | |
5244 | |
5245 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { | |
5246 LOperand* input = instr->value(); | |
5247 __ SmiTst(ToRegister(input), at); | |
5248 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, at, Operand(zero_reg)); | |
5249 } | |
5250 | |
5251 | |
5252 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { | |
5253 if (!instr->hydrogen()->value()->type().IsHeapObject()) { | |
5254 LOperand* input = instr->value(); | |
5255 __ SmiTst(ToRegister(input), at); | |
5256 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg)); | |
5257 } | |
5258 } | |
5259 | |
5260 | |
5261 void LCodeGen::DoCheckArrayBufferNotNeutered( | |
5262 LCheckArrayBufferNotNeutered* instr) { | |
5263 Register view = ToRegister(instr->view()); | |
5264 Register scratch = scratch0(); | |
5265 | |
5266 __ ld(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset)); | |
5267 __ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset)); | |
5268 __ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift); | |
5269 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, at, Operand(zero_reg)); | |
5270 } | |
5271 | |
5272 | |
5273 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { | |
5274 Register input = ToRegister(instr->value()); | |
5275 Register scratch = scratch0(); | |
5276 | |
5277 __ GetObjectType(input, scratch, scratch); | |
5278 | |
5279 if (instr->hydrogen()->is_interval_check()) { | |
5280 InstanceType first; | |
5281 InstanceType last; | |
5282 instr->hydrogen()->GetCheckInterval(&first, &last); | |
5283 | |
5284 // If there is only one type in the interval check for equality. | |
5285 if (first == last) { | |
5286 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch, | |
5287 Operand(first)); | |
5288 } else { | |
5289 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType, scratch, | |
5290 Operand(first)); | |
5291 // Omit check for the last type. | |
5292 if (last != LAST_TYPE) { | |
5293 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType, scratch, | |
5294 Operand(last)); | |
5295 } | |
5296 } | |
5297 } else { | |
5298 uint8_t mask; | |
5299 uint8_t tag; | |
5300 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); | |
5301 | |
5302 if (base::bits::IsPowerOfTwo32(mask)) { | |
5303 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); | |
5304 __ And(at, scratch, mask); | |
5305 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType, | |
5306 at, Operand(zero_reg)); | |
5307 } else { | |
5308 __ And(scratch, scratch, Operand(mask)); | |
5309 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch, | |
5310 Operand(tag)); | |
5311 } | |
5312 } | |
5313 } | |
5314 | |
5315 | |
5316 void LCodeGen::DoCheckValue(LCheckValue* instr) { | |
5317 Register reg = ToRegister(instr->value()); | |
5318 Handle<HeapObject> object = instr->hydrogen()->object().handle(); | |
5319 AllowDeferredHandleDereference smi_check; | |
5320 if (isolate()->heap()->InNewSpace(*object)) { | |
5321 Register reg = ToRegister(instr->value()); | |
5322 Handle<Cell> cell = isolate()->factory()->NewCell(object); | |
5323 __ li(at, Operand(cell)); | |
5324 __ ld(at, FieldMemOperand(at, Cell::kValueOffset)); | |
5325 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at)); | |
5326 } else { | |
5327 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(object)); | |
5328 } | |
5329 } | |
5330 | |
5331 | |
5332 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { | |
5333 { | |
5334 PushSafepointRegistersScope scope(this); | |
5335 __ push(object); | |
5336 __ mov(cp, zero_reg); | |
5337 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); | |
5338 RecordSafepointWithRegisters( | |
5339 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); | |
5340 __ StoreToSafepointRegisterSlot(v0, scratch0()); | |
5341 } | |
5342 __ SmiTst(scratch0(), at); | |
5343 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, at, | |
5344 Operand(zero_reg)); | |
5345 } | |
5346 | |
5347 | |
5348 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | |
5349 class DeferredCheckMaps final : public LDeferredCode { | |
5350 public: | |
5351 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) | |
5352 : LDeferredCode(codegen), instr_(instr), object_(object) { | |
5353 SetExit(check_maps()); | |
5354 } | |
5355 void Generate() override { | |
5356 codegen()->DoDeferredInstanceMigration(instr_, object_); | |
5357 } | |
5358 Label* check_maps() { return &check_maps_; } | |
5359 LInstruction* instr() override { return instr_; } | |
5360 | |
5361 private: | |
5362 LCheckMaps* instr_; | |
5363 Label check_maps_; | |
5364 Register object_; | |
5365 }; | |
5366 | |
5367 if (instr->hydrogen()->IsStabilityCheck()) { | |
5368 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); | |
5369 for (int i = 0; i < maps->size(); ++i) { | |
5370 AddStabilityDependency(maps->at(i).handle()); | |
5371 } | |
5372 return; | |
5373 } | |
5374 | |
5375 Register map_reg = scratch0(); | |
5376 LOperand* input = instr->value(); | |
5377 DCHECK(input->IsRegister()); | |
5378 Register reg = ToRegister(input); | |
5379 __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); | |
5380 | |
5381 DeferredCheckMaps* deferred = NULL; | |
5382 if (instr->hydrogen()->HasMigrationTarget()) { | |
5383 deferred = new(zone()) DeferredCheckMaps(this, instr, reg); | |
5384 __ bind(deferred->check_maps()); | |
5385 } | |
5386 | |
5387 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); | |
5388 Label success; | |
5389 for (int i = 0; i < maps->size() - 1; i++) { | |
5390 Handle<Map> map = maps->at(i).handle(); | |
5391 __ CompareMapAndBranch(map_reg, map, &success, eq, &success); | |
5392 } | |
5393 Handle<Map> map = maps->at(maps->size() - 1).handle(); | |
5394 // Do the CompareMap() directly within the Branch() and DeoptimizeIf(). | |
5395 if (instr->hydrogen()->HasMigrationTarget()) { | |
5396 __ Branch(deferred->entry(), ne, map_reg, Operand(map)); | |
5397 } else { | |
5398 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map_reg, Operand(map)); | |
5399 } | |
5400 | |
5401 __ bind(&success); | |
5402 } | |
5403 | |
5404 | |
5405 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | |
5406 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); | |
5407 Register result_reg = ToRegister(instr->result()); | |
5408 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); | |
5409 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg); | |
5410 } | |
5411 | |
5412 | |
5413 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { | |
5414 Register unclamped_reg = ToRegister(instr->unclamped()); | |
5415 Register result_reg = ToRegister(instr->result()); | |
5416 __ ClampUint8(result_reg, unclamped_reg); | |
5417 } | |
5418 | |
5419 | |
5420 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { | |
5421 Register scratch = scratch0(); | |
5422 Register input_reg = ToRegister(instr->unclamped()); | |
5423 Register result_reg = ToRegister(instr->result()); | |
5424 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); | |
5425 Label is_smi, done, heap_number; | |
5426 | |
5427 // Both smi and heap number cases are handled. | |
5428 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi); | |
5429 | |
5430 // Check for heap number | |
5431 __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | |
5432 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map())); | |
5433 | |
5434 // Check for undefined. Undefined is converted to zero for clamping | |
5435 // conversions. | |
5436 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg, | |
5437 Operand(factory()->undefined_value())); | |
5438 __ mov(result_reg, zero_reg); | |
5439 __ jmp(&done); | |
5440 | |
5441 // Heap number | |
5442 __ bind(&heap_number); | |
5443 __ ldc1(double_scratch0(), FieldMemOperand(input_reg, | |
5444 HeapNumber::kValueOffset)); | |
5445 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg); | |
5446 __ jmp(&done); | |
5447 | |
5448 __ bind(&is_smi); | |
5449 __ ClampUint8(result_reg, scratch); | |
5450 | |
5451 __ bind(&done); | |
5452 } | |
5453 | |
5454 | |
5455 void LCodeGen::DoDoubleBits(LDoubleBits* instr) { | |
5456 DoubleRegister value_reg = ToDoubleRegister(instr->value()); | |
5457 Register result_reg = ToRegister(instr->result()); | |
5458 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { | |
5459 __ FmoveHigh(result_reg, value_reg); | |
5460 } else { | |
5461 __ FmoveLow(result_reg, value_reg); | |
5462 } | |
5463 } | |
5464 | |
5465 | |
5466 void LCodeGen::DoConstructDouble(LConstructDouble* instr) { | |
5467 Register hi_reg = ToRegister(instr->hi()); | |
5468 Register lo_reg = ToRegister(instr->lo()); | |
5469 DoubleRegister result_reg = ToDoubleRegister(instr->result()); | |
5470 __ Move(result_reg, lo_reg, hi_reg); | |
5471 } | |
5472 | |
5473 | |
5474 void LCodeGen::DoAllocate(LAllocate* instr) { | |
5475 class DeferredAllocate final : public LDeferredCode { | |
5476 public: | |
5477 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) | |
5478 : LDeferredCode(codegen), instr_(instr) { } | |
5479 void Generate() override { codegen()->DoDeferredAllocate(instr_); } | |
5480 LInstruction* instr() override { return instr_; } | |
5481 | |
5482 private: | |
5483 LAllocate* instr_; | |
5484 }; | |
5485 | |
5486 DeferredAllocate* deferred = | |
5487 new(zone()) DeferredAllocate(this, instr); | |
5488 | |
5489 Register result = ToRegister(instr->result()); | |
5490 Register scratch = ToRegister(instr->temp1()); | |
5491 Register scratch2 = ToRegister(instr->temp2()); | |
5492 | |
5493 // Allocate memory for the object. | |
5494 AllocationFlags flags = TAG_OBJECT; | |
5495 if (instr->hydrogen()->MustAllocateDoubleAligned()) { | |
5496 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); | |
5497 } | |
5498 if (instr->hydrogen()->IsOldSpaceAllocation()) { | |
5499 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); | |
5500 flags = static_cast<AllocationFlags>(flags | PRETENURE); | |
5501 } | |
5502 if (instr->size()->IsConstantOperand()) { | |
5503 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); | |
5504 CHECK(size <= Page::kMaxRegularHeapObjectSize); | |
5505 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); | |
5506 } else { | |
5507 Register size = ToRegister(instr->size()); | |
5508 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); | |
5509 } | |
5510 | |
5511 __ bind(deferred->exit()); | |
5512 | |
5513 if (instr->hydrogen()->MustPrefillWithFiller()) { | |
5514 STATIC_ASSERT(kHeapObjectTag == 1); | |
5515 if (instr->size()->IsConstantOperand()) { | |
5516 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); | |
5517 __ li(scratch, Operand(size - kHeapObjectTag)); | |
5518 } else { | |
5519 __ Dsubu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag)); | |
5520 } | |
5521 __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); | |
5522 Label loop; | |
5523 __ bind(&loop); | |
5524 __ Dsubu(scratch, scratch, Operand(kPointerSize)); | |
5525 __ Daddu(at, result, Operand(scratch)); | |
5526 __ sd(scratch2, MemOperand(at)); | |
5527 __ Branch(&loop, ge, scratch, Operand(zero_reg)); | |
5528 } | |
5529 } | |
5530 | |
5531 | |
5532 void LCodeGen::DoDeferredAllocate(LAllocate* instr) { | |
5533 Register result = ToRegister(instr->result()); | |
5534 | |
5535 // TODO(3095996): Get rid of this. For now, we need to make the | |
5536 // result register contain a valid pointer because it is already | |
5537 // contained in the register pointer map. | |
5538 __ mov(result, zero_reg); | |
5539 | |
5540 PushSafepointRegistersScope scope(this); | |
5541 if (instr->size()->IsRegister()) { | |
5542 Register size = ToRegister(instr->size()); | |
5543 DCHECK(!size.is(result)); | |
5544 __ SmiTag(size); | |
5545 __ push(size); | |
5546 } else { | |
5547 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); | |
5548 if (size >= 0 && size <= Smi::kMaxValue) { | |
5549 __ li(v0, Operand(Smi::FromInt(size))); | |
5550 __ Push(v0); | |
5551 } else { | |
5552 // We should never get here at runtime => abort | |
5553 __ stop("invalid allocation size"); | |
5554 return; | |
5555 } | |
5556 } | |
5557 | |
5558 int flags = AllocateDoubleAlignFlag::encode( | |
5559 instr->hydrogen()->MustAllocateDoubleAligned()); | |
5560 if (instr->hydrogen()->IsOldSpaceAllocation()) { | |
5561 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); | |
5562 flags = AllocateTargetSpace::update(flags, OLD_SPACE); | |
5563 } else { | |
5564 flags = AllocateTargetSpace::update(flags, NEW_SPACE); | |
5565 } | |
5566 __ li(v0, Operand(Smi::FromInt(flags))); | |
5567 __ Push(v0); | |
5568 | |
5569 CallRuntimeFromDeferred( | |
5570 Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); | |
5571 __ StoreToSafepointRegisterSlot(v0, result); | |
5572 } | |
5573 | |
5574 | |
5575 void LCodeGen::DoToFastProperties(LToFastProperties* instr) { | |
5576 DCHECK(ToRegister(instr->value()).is(a0)); | |
5577 DCHECK(ToRegister(instr->result()).is(v0)); | |
5578 __ push(a0); | |
5579 CallRuntime(Runtime::kToFastProperties, 1, instr); | |
5580 } | |
5581 | |
5582 | |
5583 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { | |
5584 DCHECK(ToRegister(instr->context()).is(cp)); | |
5585 Label materialized; | |
5586 // Registers will be used as follows: | |
5587 // a7 = literals array. | |
5588 // a1 = regexp literal. | |
5589 // a0 = regexp literal clone. | |
5590 // a2 and a4-a6 are used as temporaries. | |
5591 int literal_offset = | |
5592 LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index()); | |
5593 __ li(a7, instr->hydrogen()->literals()); | |
5594 __ ld(a1, FieldMemOperand(a7, literal_offset)); | |
5595 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | |
5596 __ Branch(&materialized, ne, a1, Operand(at)); | |
5597 | |
5598 // Create regexp literal using runtime function | |
5599 // Result will be in v0. | |
5600 __ li(a6, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); | |
5601 __ li(a5, Operand(instr->hydrogen()->pattern())); | |
5602 __ li(a4, Operand(instr->hydrogen()->flags())); | |
5603 __ Push(a7, a6, a5, a4); | |
5604 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); | |
5605 __ mov(a1, v0); | |
5606 | |
5607 __ bind(&materialized); | |
5608 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; | |
5609 Label allocated, runtime_allocate; | |
5610 | |
5611 __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT); | |
5612 __ jmp(&allocated); | |
5613 | |
5614 __ bind(&runtime_allocate); | |
5615 __ li(a0, Operand(Smi::FromInt(size))); | |
5616 __ Push(a1, a0); | |
5617 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); | |
5618 __ pop(a1); | |
5619 | |
5620 __ bind(&allocated); | |
5621 // Copy the content into the newly allocated memory. | |
5622 // (Unroll copy loop once for better throughput). | |
5623 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) { | |
5624 __ ld(a3, FieldMemOperand(a1, i)); | |
5625 __ ld(a2, FieldMemOperand(a1, i + kPointerSize)); | |
5626 __ sd(a3, FieldMemOperand(v0, i)); | |
5627 __ sd(a2, FieldMemOperand(v0, i + kPointerSize)); | |
5628 } | |
5629 if ((size % (2 * kPointerSize)) != 0) { | |
5630 __ ld(a3, FieldMemOperand(a1, size - kPointerSize)); | |
5631 __ sd(a3, FieldMemOperand(v0, size - kPointerSize)); | |
5632 } | |
5633 } | |
5634 | |
5635 | |
5636 void LCodeGen::DoTypeof(LTypeof* instr) { | |
5637 DCHECK(ToRegister(instr->value()).is(a3)); | |
5638 DCHECK(ToRegister(instr->result()).is(v0)); | |
5639 Label end, do_call; | |
5640 Register value_register = ToRegister(instr->value()); | |
5641 __ JumpIfNotSmi(value_register, &do_call); | |
5642 __ li(v0, Operand(isolate()->factory()->number_string())); | |
5643 __ jmp(&end); | |
5644 __ bind(&do_call); | |
5645 TypeofStub stub(isolate()); | |
5646 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | |
5647 __ bind(&end); | |
5648 } | |
5649 | |
5650 | |
5651 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { | |
5652 Register input = ToRegister(instr->value()); | |
5653 | |
5654 Register cmp1 = no_reg; | |
5655 Operand cmp2 = Operand(no_reg); | |
5656 | |
5657 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_), | |
5658 instr->FalseLabel(chunk_), | |
5659 input, | |
5660 instr->type_literal(), | |
5661 &cmp1, | |
5662 &cmp2); | |
5663 | |
5664 DCHECK(cmp1.is_valid()); | |
5665 DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid()); | |
5666 | |
5667 if (final_branch_condition != kNoCondition) { | |
5668 EmitBranch(instr, final_branch_condition, cmp1, cmp2); | |
5669 } | |
5670 } | |
5671 | |
5672 | |
5673 Condition LCodeGen::EmitTypeofIs(Label* true_label, | |
5674 Label* false_label, | |
5675 Register input, | |
5676 Handle<String> type_name, | |
5677 Register* cmp1, | |
5678 Operand* cmp2) { | |
5679 // This function utilizes the delay slot heavily. This is used to load | |
5680 // values that are always usable without depending on the type of the input | |
5681 // register. | |
5682 Condition final_branch_condition = kNoCondition; | |
5683 Register scratch = scratch0(); | |
5684 Factory* factory = isolate()->factory(); | |
5685 if (String::Equals(type_name, factory->number_string())) { | |
5686 __ JumpIfSmi(input, true_label); | |
5687 __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset)); | |
5688 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | |
5689 *cmp1 = input; | |
5690 *cmp2 = Operand(at); | |
5691 final_branch_condition = eq; | |
5692 | |
5693 } else if (String::Equals(type_name, factory->string_string())) { | |
5694 __ JumpIfSmi(input, false_label); | |
5695 __ GetObjectType(input, input, scratch); | |
5696 *cmp1 = scratch; | |
5697 *cmp2 = Operand(FIRST_NONSTRING_TYPE); | |
5698 final_branch_condition = lt; | |
5699 | |
5700 } else if (String::Equals(type_name, factory->symbol_string())) { | |
5701 __ JumpIfSmi(input, false_label); | |
5702 __ GetObjectType(input, input, scratch); | |
5703 *cmp1 = scratch; | |
5704 *cmp2 = Operand(SYMBOL_TYPE); | |
5705 final_branch_condition = eq; | |
5706 | |
5707 } else if (String::Equals(type_name, factory->boolean_string())) { | |
5708 __ LoadRoot(at, Heap::kTrueValueRootIndex); | |
5709 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); | |
5710 __ LoadRoot(at, Heap::kFalseValueRootIndex); | |
5711 *cmp1 = at; | |
5712 *cmp2 = Operand(input); | |
5713 final_branch_condition = eq; | |
5714 | |
5715 } else if (String::Equals(type_name, factory->undefined_string())) { | |
5716 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | |
5717 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); | |
5718 // The first instruction of JumpIfSmi is an And - it is safe in the delay | |
5719 // slot. | |
5720 __ JumpIfSmi(input, false_label); | |
5721 // Check for undetectable objects => true. | |
5722 __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset)); | |
5723 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset)); | |
5724 __ And(at, at, 1 << Map::kIsUndetectable); | |
5725 *cmp1 = at; | |
5726 *cmp2 = Operand(zero_reg); | |
5727 final_branch_condition = ne; | |
5728 | |
5729 } else if (String::Equals(type_name, factory->function_string())) { | |
5730 __ JumpIfSmi(input, false_label); | |
5731 __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | |
5732 __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); | |
5733 __ And(scratch, scratch, | |
5734 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); | |
5735 *cmp1 = scratch; | |
5736 *cmp2 = Operand(1 << Map::kIsCallable); | |
5737 final_branch_condition = eq; | |
5738 | |
5739 } else if (String::Equals(type_name, factory->object_string())) { | |
5740 __ JumpIfSmi(input, false_label); | |
5741 __ LoadRoot(at, Heap::kNullValueRootIndex); | |
5742 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); | |
5743 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); | |
5744 __ GetObjectType(input, scratch, scratch1()); | |
5745 __ Branch(false_label, lt, scratch1(), Operand(FIRST_SPEC_OBJECT_TYPE)); | |
5746 // Check for callable or undetectable objects => false. | |
5747 __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); | |
5748 __ And(at, scratch, | |
5749 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); | |
5750 *cmp1 = at; | |
5751 *cmp2 = Operand(zero_reg); | |
5752 final_branch_condition = eq; | |
5753 | |
5754 // clang-format off | |
5755 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \ | |
5756 } else if (String::Equals(type_name, factory->type##_string())) { \ | |
5757 __ JumpIfSmi(input, false_label); \ | |
5758 __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset)); \ | |
5759 __ LoadRoot(at, Heap::k##Type##MapRootIndex); \ | |
5760 *cmp1 = input; \ | |
5761 *cmp2 = Operand(at); \ | |
5762 final_branch_condition = eq; | |
5763 SIMD128_TYPES(SIMD128_TYPE) | |
5764 #undef SIMD128_TYPE | |
5765 // clang-format on | |
5766 | |
5767 | |
5768 } else { | |
5769 *cmp1 = at; | |
5770 *cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion. | |
5771 __ Branch(false_label); | |
5772 } | |
5773 | |
5774 return final_branch_condition; | |
5775 } | |
5776 | |
5777 | |
5778 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { | |
5779 Register temp1 = ToRegister(instr->temp()); | |
5780 | |
5781 EmitIsConstructCall(temp1, scratch0()); | |
5782 | |
5783 EmitBranch(instr, eq, temp1, | |
5784 Operand(Smi::FromInt(StackFrame::CONSTRUCT))); | |
5785 } | |
5786 | |
5787 | |
5788 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { | |
5789 DCHECK(!temp1.is(temp2)); | |
5790 // Get the frame pointer for the calling frame. | |
5791 __ ld(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
5792 | |
5793 // Skip the arguments adaptor frame if it exists. | |
5794 Label check_frame_marker; | |
5795 __ ld(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset)); | |
5796 __ Branch(&check_frame_marker, ne, temp2, | |
5797 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | |
5798 __ ld(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset)); | |
5799 | |
5800 // Check the marker in the calling frame. | |
5801 __ bind(&check_frame_marker); | |
5802 __ ld(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); | |
5803 } | |
5804 | |
5805 | |
5806 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { | |
5807 if (info()->ShouldEnsureSpaceForLazyDeopt()) { | |
5808 // Ensure that we have enough space after the previous lazy-bailout | |
5809 // instruction for patching the code here. | |
5810 int current_pc = masm()->pc_offset(); | |
5811 if (current_pc < last_lazy_deopt_pc_ + space_needed) { | |
5812 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; | |
5813 DCHECK_EQ(0, padding_size % Assembler::kInstrSize); | |
5814 while (padding_size > 0) { | |
5815 __ nop(); | |
5816 padding_size -= Assembler::kInstrSize; | |
5817 } | |
5818 } | |
5819 } | |
5820 last_lazy_deopt_pc_ = masm()->pc_offset(); | |
5821 } | |
5822 | |
5823 | |
5824 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { | |
5825 last_lazy_deopt_pc_ = masm()->pc_offset(); | |
5826 DCHECK(instr->HasEnvironment()); | |
5827 LEnvironment* env = instr->environment(); | |
5828 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); | |
5829 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); | |
5830 } | |
5831 | |
5832 | |
5833 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { | |
5834 Deoptimizer::BailoutType type = instr->hydrogen()->type(); | |
5835 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the | |
5836 // needed return address), even though the implementation of LAZY and EAGER is | |
5837 // now identical. When LAZY is eventually completely folded into EAGER, remove | |
5838 // the special case below. | |
5839 if (info()->IsStub() && type == Deoptimizer::EAGER) { | |
5840 type = Deoptimizer::LAZY; | |
5841 } | |
5842 | |
5843 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type, zero_reg, | |
5844 Operand(zero_reg)); | |
5845 } | |
5846 | |
5847 | |
5848 void LCodeGen::DoDummy(LDummy* instr) { | |
5849 // Nothing to see here, move on! | |
5850 } | |
5851 | |
5852 | |
5853 void LCodeGen::DoDummyUse(LDummyUse* instr) { | |
5854 // Nothing to see here, move on! | |
5855 } | |
5856 | |
5857 | |
5858 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { | |
5859 PushSafepointRegistersScope scope(this); | |
5860 LoadContextFromDeferred(instr->context()); | |
5861 __ CallRuntimeSaveDoubles(Runtime::kStackGuard); | |
5862 RecordSafepointWithLazyDeopt( | |
5863 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | |
5864 DCHECK(instr->HasEnvironment()); | |
5865 LEnvironment* env = instr->environment(); | |
5866 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); | |
5867 } | |
5868 | |
5869 | |
5870 void LCodeGen::DoStackCheck(LStackCheck* instr) { | |
5871 class DeferredStackCheck final : public LDeferredCode { | |
5872 public: | |
5873 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) | |
5874 : LDeferredCode(codegen), instr_(instr) { } | |
5875 void Generate() override { codegen()->DoDeferredStackCheck(instr_); } | |
5876 LInstruction* instr() override { return instr_; } | |
5877 | |
5878 private: | |
5879 LStackCheck* instr_; | |
5880 }; | |
5881 | |
5882 DCHECK(instr->HasEnvironment()); | |
5883 LEnvironment* env = instr->environment(); | |
5884 // There is no LLazyBailout instruction for stack-checks. We have to | |
5885 // prepare for lazy deoptimization explicitly here. | |
5886 if (instr->hydrogen()->is_function_entry()) { | |
5887 // Perform stack overflow check. | |
5888 Label done; | |
5889 __ LoadRoot(at, Heap::kStackLimitRootIndex); | |
5890 __ Branch(&done, hs, sp, Operand(at)); | |
5891 DCHECK(instr->context()->IsRegister()); | |
5892 DCHECK(ToRegister(instr->context()).is(cp)); | |
5893 CallCode(isolate()->builtins()->StackCheck(), | |
5894 RelocInfo::CODE_TARGET, | |
5895 instr); | |
5896 __ bind(&done); | |
5897 } else { | |
5898 DCHECK(instr->hydrogen()->is_backwards_branch()); | |
5899 // Perform stack overflow check if this goto needs it before jumping. | |
5900 DeferredStackCheck* deferred_stack_check = | |
5901 new(zone()) DeferredStackCheck(this, instr); | |
5902 __ LoadRoot(at, Heap::kStackLimitRootIndex); | |
5903 __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at)); | |
5904 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); | |
5905 __ bind(instr->done_label()); | |
5906 deferred_stack_check->SetExit(instr->done_label()); | |
5907 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); | |
5908 // Don't record a deoptimization index for the safepoint here. | |
5909 // This will be done explicitly when emitting call and the safepoint in | |
5910 // the deferred code. | |
5911 } | |
5912 } | |
5913 | |
5914 | |
5915 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { | |
5916 // This is a pseudo-instruction that ensures that the environment here is | |
5917 // properly registered for deoptimization and records the assembler's PC | |
5918 // offset. | |
5919 LEnvironment* environment = instr->environment(); | |
5920 | |
5921 // If the environment were already registered, we would have no way of | |
5922 // backpatching it with the spill slot operands. | |
5923 DCHECK(!environment->HasBeenRegistered()); | |
5924 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | |
5925 | |
5926 GenerateOsrPrologue(); | |
5927 } | |
5928 | |
5929 | |
5930 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { | |
5931 Register result = ToRegister(instr->result()); | |
5932 Register object = ToRegister(instr->object()); | |
5933 | |
5934 __ And(at, object, kSmiTagMask); | |
5935 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg)); | |
5936 | |
5937 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); | |
5938 __ GetObjectType(object, a1, a1); | |
5939 DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject, a1, | |
5940 Operand(LAST_JS_PROXY_TYPE)); | |
5941 | |
5942 Label use_cache, call_runtime; | |
5943 DCHECK(object.is(a0)); | |
5944 Register null_value = a5; | |
5945 __ LoadRoot(null_value, Heap::kNullValueRootIndex); | |
5946 __ CheckEnumCache(null_value, &call_runtime); | |
5947 | |
5948 __ ld(result, FieldMemOperand(object, HeapObject::kMapOffset)); | |
5949 __ Branch(&use_cache); | |
5950 | |
5951 // Get the set of properties to enumerate. | |
5952 __ bind(&call_runtime); | |
5953 __ push(object); | |
5954 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); | |
5955 | |
5956 __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); | |
5957 DCHECK(result.is(v0)); | |
5958 __ LoadRoot(at, Heap::kMetaMapRootIndex); | |
5959 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, a1, Operand(at)); | |
5960 __ bind(&use_cache); | |
5961 } | |
5962 | |
5963 | |
5964 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { | |
5965 Register map = ToRegister(instr->map()); | |
5966 Register result = ToRegister(instr->result()); | |
5967 Label load_cache, done; | |
5968 __ EnumLength(result, map); | |
5969 __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0))); | |
5970 __ li(result, Operand(isolate()->factory()->empty_fixed_array())); | |
5971 __ jmp(&done); | |
5972 | |
5973 __ bind(&load_cache); | |
5974 __ LoadInstanceDescriptors(map, result); | |
5975 __ ld(result, | |
5976 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); | |
5977 __ ld(result, | |
5978 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); | |
5979 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache, result, Operand(zero_reg)); | |
5980 | |
5981 __ bind(&done); | |
5982 } | |
5983 | |
5984 | |
5985 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { | |
5986 Register object = ToRegister(instr->value()); | |
5987 Register map = ToRegister(instr->map()); | |
5988 __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); | |
5989 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map, Operand(scratch0())); | |
5990 } | |
5991 | |
5992 | |
5993 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, | |
5994 Register result, | |
5995 Register object, | |
5996 Register index) { | |
5997 PushSafepointRegistersScope scope(this); | |
5998 __ Push(object, index); | |
5999 __ mov(cp, zero_reg); | |
6000 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); | |
6001 RecordSafepointWithRegisters( | |
6002 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); | |
6003 __ StoreToSafepointRegisterSlot(v0, result); | |
6004 } | |
6005 | |
6006 | |
6007 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { | |
6008 class DeferredLoadMutableDouble final : public LDeferredCode { | |
6009 public: | |
6010 DeferredLoadMutableDouble(LCodeGen* codegen, | |
6011 LLoadFieldByIndex* instr, | |
6012 Register result, | |
6013 Register object, | |
6014 Register index) | |
6015 : LDeferredCode(codegen), | |
6016 instr_(instr), | |
6017 result_(result), | |
6018 object_(object), | |
6019 index_(index) { | |
6020 } | |
6021 void Generate() override { | |
6022 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_); | |
6023 } | |
6024 LInstruction* instr() override { return instr_; } | |
6025 | |
6026 private: | |
6027 LLoadFieldByIndex* instr_; | |
6028 Register result_; | |
6029 Register object_; | |
6030 Register index_; | |
6031 }; | |
6032 | |
6033 Register object = ToRegister(instr->object()); | |
6034 Register index = ToRegister(instr->index()); | |
6035 Register result = ToRegister(instr->result()); | |
6036 Register scratch = scratch0(); | |
6037 | |
6038 DeferredLoadMutableDouble* deferred; | |
6039 deferred = new(zone()) DeferredLoadMutableDouble( | |
6040 this, instr, result, object, index); | |
6041 | |
6042 Label out_of_object, done; | |
6043 | |
6044 __ And(scratch, index, Operand(Smi::FromInt(1))); | |
6045 __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg)); | |
6046 __ dsra(index, index, 1); | |
6047 | |
6048 __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg)); | |
6049 __ SmiScale(scratch, index, kPointerSizeLog2); // In delay slot. | |
6050 __ Daddu(scratch, object, scratch); | |
6051 __ ld(result, FieldMemOperand(scratch, JSObject::kHeaderSize)); | |
6052 | |
6053 __ Branch(&done); | |
6054 | |
6055 __ bind(&out_of_object); | |
6056 __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); | |
6057 // Index is equal to negated out of object property index plus 1. | |
6058 __ Dsubu(scratch, result, scratch); | |
6059 __ ld(result, FieldMemOperand(scratch, | |
6060 FixedArray::kHeaderSize - kPointerSize)); | |
6061 __ bind(deferred->exit()); | |
6062 __ bind(&done); | |
6063 } | |
6064 | |
6065 | |
6066 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) { | |
6067 Register context = ToRegister(instr->context()); | |
6068 __ sd(context, MemOperand(fp, StandardFrameConstants::kContextOffset)); | |
6069 } | |
6070 | |
6071 | |
6072 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) { | |
6073 Handle<ScopeInfo> scope_info = instr->scope_info(); | |
6074 __ li(at, scope_info); | |
6075 __ Push(at, ToRegister(instr->function())); | |
6076 CallRuntime(Runtime::kPushBlockContext, 2, instr); | |
6077 RecordSafepoint(Safepoint::kNoLazyDeopt); | |
6078 } | |
6079 | |
6080 | |
6081 #undef __ | |
6082 | |
6083 } // namespace internal | |
6084 } // namespace v8 | |
OLD | NEW |