OLD | NEW |
| (Empty) |
1 // Copyright 2014 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "src/base/bits.h" | |
6 #include "src/code-factory.h" | |
7 #include "src/code-stubs.h" | |
8 #include "src/hydrogen-osr.h" | |
9 #include "src/ic/ic.h" | |
10 #include "src/ic/stub-cache.h" | |
11 #include "src/ppc/lithium-codegen-ppc.h" | |
12 #include "src/ppc/lithium-gap-resolver-ppc.h" | |
13 #include "src/profiler/cpu-profiler.h" | |
14 | |
15 namespace v8 { | |
16 namespace internal { | |
17 | |
18 | |
19 class SafepointGenerator final : public CallWrapper { | |
20 public: | |
21 SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers, | |
22 Safepoint::DeoptMode mode) | |
23 : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {} | |
24 virtual ~SafepointGenerator() {} | |
25 | |
26 void BeforeCall(int call_size) const override {} | |
27 | |
28 void AfterCall() const override { | |
29 codegen_->RecordSafepoint(pointers_, deopt_mode_); | |
30 } | |
31 | |
32 private: | |
33 LCodeGen* codegen_; | |
34 LPointerMap* pointers_; | |
35 Safepoint::DeoptMode deopt_mode_; | |
36 }; | |
37 | |
38 | |
39 #define __ masm()-> | |
40 | |
41 bool LCodeGen::GenerateCode() { | |
42 LPhase phase("Z_Code generation", chunk()); | |
43 DCHECK(is_unused()); | |
44 status_ = GENERATING; | |
45 | |
46 // Open a frame scope to indicate that there is a frame on the stack. The | |
47 // NONE indicates that the scope shouldn't actually generate code to set up | |
48 // the frame (that is done in GeneratePrologue). | |
49 FrameScope frame_scope(masm_, StackFrame::NONE); | |
50 | |
51 bool rc = GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && | |
52 GenerateJumpTable() && GenerateSafepointTable(); | |
53 if (FLAG_enable_embedded_constant_pool && !rc) { | |
54 masm()->AbortConstantPoolBuilding(); | |
55 } | |
56 return rc; | |
57 } | |
58 | |
59 | |
60 void LCodeGen::FinishCode(Handle<Code> code) { | |
61 DCHECK(is_done()); | |
62 code->set_stack_slots(GetStackSlotCount()); | |
63 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); | |
64 PopulateDeoptimizationData(code); | |
65 } | |
66 | |
67 | |
68 void LCodeGen::SaveCallerDoubles() { | |
69 DCHECK(info()->saves_caller_doubles()); | |
70 DCHECK(NeedsEagerFrame()); | |
71 Comment(";;; Save clobbered callee double registers"); | |
72 int count = 0; | |
73 BitVector* doubles = chunk()->allocated_double_registers(); | |
74 BitVector::Iterator save_iterator(doubles); | |
75 while (!save_iterator.Done()) { | |
76 __ stfd(DoubleRegister::from_code(save_iterator.Current()), | |
77 MemOperand(sp, count * kDoubleSize)); | |
78 save_iterator.Advance(); | |
79 count++; | |
80 } | |
81 } | |
82 | |
83 | |
84 void LCodeGen::RestoreCallerDoubles() { | |
85 DCHECK(info()->saves_caller_doubles()); | |
86 DCHECK(NeedsEagerFrame()); | |
87 Comment(";;; Restore clobbered callee double registers"); | |
88 BitVector* doubles = chunk()->allocated_double_registers(); | |
89 BitVector::Iterator save_iterator(doubles); | |
90 int count = 0; | |
91 while (!save_iterator.Done()) { | |
92 __ lfd(DoubleRegister::from_code(save_iterator.Current()), | |
93 MemOperand(sp, count * kDoubleSize)); | |
94 save_iterator.Advance(); | |
95 count++; | |
96 } | |
97 } | |
98 | |
99 | |
100 bool LCodeGen::GeneratePrologue() { | |
101 DCHECK(is_generating()); | |
102 | |
103 if (info()->IsOptimizing()) { | |
104 ProfileEntryHookStub::MaybeCallEntryHook(masm_); | |
105 | |
106 #ifdef DEBUG | |
107 if (strlen(FLAG_stop_at) > 0 && | |
108 info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { | |
109 __ stop("stop_at"); | |
110 } | |
111 #endif | |
112 | |
113 // r4: Callee's JS function. | |
114 // cp: Callee's context. | |
115 // pp: Callee's constant pool pointer (if enabled) | |
116 // fp: Caller's frame pointer. | |
117 // lr: Caller's pc. | |
118 // ip: Our own function entry (required by the prologue) | |
119 | |
120 // Sloppy mode functions and builtins need to replace the receiver with the | |
121 // global proxy when called as functions (without an explicit receiver | |
122 // object). | |
123 if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) { | |
124 Label ok; | |
125 int receiver_offset = info_->scope()->num_parameters() * kPointerSize; | |
126 __ LoadP(r5, MemOperand(sp, receiver_offset)); | |
127 __ CompareRoot(r5, Heap::kUndefinedValueRootIndex); | |
128 __ bne(&ok); | |
129 | |
130 __ LoadP(r5, GlobalObjectOperand()); | |
131 __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalProxyOffset)); | |
132 | |
133 __ StoreP(r5, MemOperand(sp, receiver_offset)); | |
134 | |
135 __ bind(&ok); | |
136 } | |
137 } | |
138 | |
139 int prologue_offset = masm_->pc_offset(); | |
140 | |
141 if (prologue_offset) { | |
142 // Prologue logic requires it's starting address in ip and the | |
143 // corresponding offset from the function entry. | |
144 prologue_offset += Instruction::kInstrSize; | |
145 __ addi(ip, ip, Operand(prologue_offset)); | |
146 } | |
147 info()->set_prologue_offset(prologue_offset); | |
148 if (NeedsEagerFrame()) { | |
149 if (info()->IsStub()) { | |
150 __ StubPrologue(prologue_offset); | |
151 } else { | |
152 __ Prologue(info()->IsCodePreAgingActive(), prologue_offset); | |
153 } | |
154 frame_is_built_ = true; | |
155 } | |
156 | |
157 // Reserve space for the stack slots needed by the code. | |
158 int slots = GetStackSlotCount(); | |
159 if (slots > 0) { | |
160 __ subi(sp, sp, Operand(slots * kPointerSize)); | |
161 if (FLAG_debug_code) { | |
162 __ Push(r3, r4); | |
163 __ li(r0, Operand(slots)); | |
164 __ mtctr(r0); | |
165 __ addi(r3, sp, Operand((slots + 2) * kPointerSize)); | |
166 __ mov(r4, Operand(kSlotsZapValue)); | |
167 Label loop; | |
168 __ bind(&loop); | |
169 __ StorePU(r4, MemOperand(r3, -kPointerSize)); | |
170 __ bdnz(&loop); | |
171 __ Pop(r3, r4); | |
172 } | |
173 } | |
174 | |
175 if (info()->saves_caller_doubles()) { | |
176 SaveCallerDoubles(); | |
177 } | |
178 return !is_aborted(); | |
179 } | |
180 | |
181 | |
182 void LCodeGen::DoPrologue(LPrologue* instr) { | |
183 Comment(";;; Prologue begin"); | |
184 | |
185 // Possibly allocate a local context. | |
186 if (info()->scope()->num_heap_slots() > 0) { | |
187 Comment(";;; Allocate local context"); | |
188 bool need_write_barrier = true; | |
189 // Argument to NewContext is the function, which is in r4. | |
190 int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; | |
191 Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt; | |
192 if (info()->scope()->is_script_scope()) { | |
193 __ push(r4); | |
194 __ Push(info()->scope()->GetScopeInfo(info()->isolate())); | |
195 __ CallRuntime(Runtime::kNewScriptContext, 2); | |
196 deopt_mode = Safepoint::kLazyDeopt; | |
197 } else if (slots <= FastNewContextStub::kMaximumSlots) { | |
198 FastNewContextStub stub(isolate(), slots); | |
199 __ CallStub(&stub); | |
200 // Result of FastNewContextStub is always in new space. | |
201 need_write_barrier = false; | |
202 } else { | |
203 __ push(r4); | |
204 __ CallRuntime(Runtime::kNewFunctionContext, 1); | |
205 } | |
206 RecordSafepoint(deopt_mode); | |
207 | |
208 // Context is returned in both r3 and cp. It replaces the context | |
209 // passed to us. It's saved in the stack and kept live in cp. | |
210 __ mr(cp, r3); | |
211 __ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset)); | |
212 // Copy any necessary parameters into the context. | |
213 int num_parameters = scope()->num_parameters(); | |
214 int first_parameter = scope()->has_this_declaration() ? -1 : 0; | |
215 for (int i = first_parameter; i < num_parameters; i++) { | |
216 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i); | |
217 if (var->IsContextSlot()) { | |
218 int parameter_offset = StandardFrameConstants::kCallerSPOffset + | |
219 (num_parameters - 1 - i) * kPointerSize; | |
220 // Load parameter from stack. | |
221 __ LoadP(r3, MemOperand(fp, parameter_offset)); | |
222 // Store it in the context. | |
223 MemOperand target = ContextOperand(cp, var->index()); | |
224 __ StoreP(r3, target, r0); | |
225 // Update the write barrier. This clobbers r6 and r3. | |
226 if (need_write_barrier) { | |
227 __ RecordWriteContextSlot(cp, target.offset(), r3, r6, | |
228 GetLinkRegisterState(), kSaveFPRegs); | |
229 } else if (FLAG_debug_code) { | |
230 Label done; | |
231 __ JumpIfInNewSpace(cp, r3, &done); | |
232 __ Abort(kExpectedNewSpaceObject); | |
233 __ bind(&done); | |
234 } | |
235 } | |
236 } | |
237 Comment(";;; End allocate local context"); | |
238 } | |
239 | |
240 Comment(";;; Prologue end"); | |
241 } | |
242 | |
243 | |
244 void LCodeGen::GenerateOsrPrologue() { | |
245 // Generate the OSR entry prologue at the first unknown OSR value, or if there | |
246 // are none, at the OSR entrypoint instruction. | |
247 if (osr_pc_offset_ >= 0) return; | |
248 | |
249 osr_pc_offset_ = masm()->pc_offset(); | |
250 | |
251 // Adjust the frame size, subsuming the unoptimized frame into the | |
252 // optimized frame. | |
253 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); | |
254 DCHECK(slots >= 0); | |
255 __ subi(sp, sp, Operand(slots * kPointerSize)); | |
256 } | |
257 | |
258 | |
259 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { | |
260 if (instr->IsCall()) { | |
261 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); | |
262 } | |
263 if (!instr->IsLazyBailout() && !instr->IsGap()) { | |
264 safepoints_.BumpLastLazySafepointIndex(); | |
265 } | |
266 } | |
267 | |
268 | |
269 bool LCodeGen::GenerateDeferredCode() { | |
270 DCHECK(is_generating()); | |
271 if (deferred_.length() > 0) { | |
272 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { | |
273 LDeferredCode* code = deferred_[i]; | |
274 | |
275 HValue* value = | |
276 instructions_->at(code->instruction_index())->hydrogen_value(); | |
277 RecordAndWritePosition( | |
278 chunk()->graph()->SourcePositionToScriptPosition(value->position())); | |
279 | |
280 Comment( | |
281 ";;; <@%d,#%d> " | |
282 "-------------------- Deferred %s --------------------", | |
283 code->instruction_index(), code->instr()->hydrogen_value()->id(), | |
284 code->instr()->Mnemonic()); | |
285 __ bind(code->entry()); | |
286 if (NeedsDeferredFrame()) { | |
287 Comment(";;; Build frame"); | |
288 DCHECK(!frame_is_built_); | |
289 DCHECK(info()->IsStub()); | |
290 frame_is_built_ = true; | |
291 __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB)); | |
292 __ PushFixedFrame(scratch0()); | |
293 __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); | |
294 Comment(";;; Deferred code"); | |
295 } | |
296 code->Generate(); | |
297 if (NeedsDeferredFrame()) { | |
298 Comment(";;; Destroy frame"); | |
299 DCHECK(frame_is_built_); | |
300 __ PopFixedFrame(ip); | |
301 frame_is_built_ = false; | |
302 } | |
303 __ b(code->exit()); | |
304 } | |
305 } | |
306 | |
307 return !is_aborted(); | |
308 } | |
309 | |
310 | |
311 bool LCodeGen::GenerateJumpTable() { | |
312 // Check that the jump table is accessible from everywhere in the function | |
313 // code, i.e. that offsets to the table can be encoded in the 24bit signed | |
314 // immediate of a branch instruction. | |
315 // To simplify we consider the code size from the first instruction to the | |
316 // end of the jump table. We also don't consider the pc load delta. | |
317 // Each entry in the jump table generates one instruction and inlines one | |
318 // 32bit data after it. | |
319 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) + | |
320 jump_table_.length() * 7)) { | |
321 Abort(kGeneratedCodeIsTooLarge); | |
322 } | |
323 | |
324 if (jump_table_.length() > 0) { | |
325 Label needs_frame, call_deopt_entry; | |
326 | |
327 Comment(";;; -------------------- Jump table --------------------"); | |
328 Address base = jump_table_[0].address; | |
329 | |
330 Register entry_offset = scratch0(); | |
331 | |
332 int length = jump_table_.length(); | |
333 for (int i = 0; i < length; i++) { | |
334 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i]; | |
335 __ bind(&table_entry->label); | |
336 | |
337 DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type); | |
338 Address entry = table_entry->address; | |
339 DeoptComment(table_entry->deopt_info); | |
340 | |
341 // Second-level deopt table entries are contiguous and small, so instead | |
342 // of loading the full, absolute address of each one, load an immediate | |
343 // offset which will be added to the base address later. | |
344 __ mov(entry_offset, Operand(entry - base)); | |
345 | |
346 if (table_entry->needs_frame) { | |
347 DCHECK(!info()->saves_caller_doubles()); | |
348 Comment(";;; call deopt with frame"); | |
349 __ PushFixedFrame(); | |
350 __ b(&needs_frame, SetLK); | |
351 } else { | |
352 __ b(&call_deopt_entry, SetLK); | |
353 } | |
354 info()->LogDeoptCallPosition(masm()->pc_offset(), | |
355 table_entry->deopt_info.inlining_id); | |
356 } | |
357 | |
358 if (needs_frame.is_linked()) { | |
359 __ bind(&needs_frame); | |
360 // This variant of deopt can only be used with stubs. Since we don't | |
361 // have a function pointer to install in the stack frame that we're | |
362 // building, install a special marker there instead. | |
363 DCHECK(info()->IsStub()); | |
364 __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB)); | |
365 __ push(ip); | |
366 __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); | |
367 } | |
368 | |
369 Comment(";;; call deopt"); | |
370 __ bind(&call_deopt_entry); | |
371 | |
372 if (info()->saves_caller_doubles()) { | |
373 DCHECK(info()->IsStub()); | |
374 RestoreCallerDoubles(); | |
375 } | |
376 | |
377 // Add the base address to the offset previously loaded in entry_offset. | |
378 __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base))); | |
379 __ add(ip, entry_offset, ip); | |
380 __ Jump(ip); | |
381 } | |
382 | |
383 // The deoptimization jump table is the last part of the instruction | |
384 // sequence. Mark the generated code as done unless we bailed out. | |
385 if (!is_aborted()) status_ = DONE; | |
386 return !is_aborted(); | |
387 } | |
388 | |
389 | |
390 bool LCodeGen::GenerateSafepointTable() { | |
391 DCHECK(is_done()); | |
392 safepoints_.Emit(masm(), GetStackSlotCount()); | |
393 return !is_aborted(); | |
394 } | |
395 | |
396 | |
397 Register LCodeGen::ToRegister(int code) const { | |
398 return Register::from_code(code); | |
399 } | |
400 | |
401 | |
402 DoubleRegister LCodeGen::ToDoubleRegister(int code) const { | |
403 return DoubleRegister::from_code(code); | |
404 } | |
405 | |
406 | |
407 Register LCodeGen::ToRegister(LOperand* op) const { | |
408 DCHECK(op->IsRegister()); | |
409 return ToRegister(op->index()); | |
410 } | |
411 | |
412 | |
413 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { | |
414 if (op->IsRegister()) { | |
415 return ToRegister(op->index()); | |
416 } else if (op->IsConstantOperand()) { | |
417 LConstantOperand* const_op = LConstantOperand::cast(op); | |
418 HConstant* constant = chunk_->LookupConstant(const_op); | |
419 Handle<Object> literal = constant->handle(isolate()); | |
420 Representation r = chunk_->LookupLiteralRepresentation(const_op); | |
421 if (r.IsInteger32()) { | |
422 AllowDeferredHandleDereference get_number; | |
423 DCHECK(literal->IsNumber()); | |
424 __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number())); | |
425 } else if (r.IsDouble()) { | |
426 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); | |
427 } else { | |
428 DCHECK(r.IsSmiOrTagged()); | |
429 __ Move(scratch, literal); | |
430 } | |
431 return scratch; | |
432 } else if (op->IsStackSlot()) { | |
433 __ LoadP(scratch, ToMemOperand(op)); | |
434 return scratch; | |
435 } | |
436 UNREACHABLE(); | |
437 return scratch; | |
438 } | |
439 | |
440 | |
441 void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op, | |
442 Register dst) { | |
443 DCHECK(IsInteger32(const_op)); | |
444 HConstant* constant = chunk_->LookupConstant(const_op); | |
445 int32_t value = constant->Integer32Value(); | |
446 if (IsSmi(const_op)) { | |
447 __ LoadSmiLiteral(dst, Smi::FromInt(value)); | |
448 } else { | |
449 __ LoadIntLiteral(dst, value); | |
450 } | |
451 } | |
452 | |
453 | |
454 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { | |
455 DCHECK(op->IsDoubleRegister()); | |
456 return ToDoubleRegister(op->index()); | |
457 } | |
458 | |
459 | |
460 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { | |
461 HConstant* constant = chunk_->LookupConstant(op); | |
462 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); | |
463 return constant->handle(isolate()); | |
464 } | |
465 | |
466 | |
467 bool LCodeGen::IsInteger32(LConstantOperand* op) const { | |
468 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); | |
469 } | |
470 | |
471 | |
472 bool LCodeGen::IsSmi(LConstantOperand* op) const { | |
473 return chunk_->LookupLiteralRepresentation(op).IsSmi(); | |
474 } | |
475 | |
476 | |
477 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { | |
478 return ToRepresentation(op, Representation::Integer32()); | |
479 } | |
480 | |
481 | |
482 intptr_t LCodeGen::ToRepresentation(LConstantOperand* op, | |
483 const Representation& r) const { | |
484 HConstant* constant = chunk_->LookupConstant(op); | |
485 int32_t value = constant->Integer32Value(); | |
486 if (r.IsInteger32()) return value; | |
487 DCHECK(r.IsSmiOrTagged()); | |
488 return reinterpret_cast<intptr_t>(Smi::FromInt(value)); | |
489 } | |
490 | |
491 | |
492 Smi* LCodeGen::ToSmi(LConstantOperand* op) const { | |
493 HConstant* constant = chunk_->LookupConstant(op); | |
494 return Smi::FromInt(constant->Integer32Value()); | |
495 } | |
496 | |
497 | |
498 double LCodeGen::ToDouble(LConstantOperand* op) const { | |
499 HConstant* constant = chunk_->LookupConstant(op); | |
500 DCHECK(constant->HasDoubleValue()); | |
501 return constant->DoubleValue(); | |
502 } | |
503 | |
504 | |
505 Operand LCodeGen::ToOperand(LOperand* op) { | |
506 if (op->IsConstantOperand()) { | |
507 LConstantOperand* const_op = LConstantOperand::cast(op); | |
508 HConstant* constant = chunk()->LookupConstant(const_op); | |
509 Representation r = chunk_->LookupLiteralRepresentation(const_op); | |
510 if (r.IsSmi()) { | |
511 DCHECK(constant->HasSmiValue()); | |
512 return Operand(Smi::FromInt(constant->Integer32Value())); | |
513 } else if (r.IsInteger32()) { | |
514 DCHECK(constant->HasInteger32Value()); | |
515 return Operand(constant->Integer32Value()); | |
516 } else if (r.IsDouble()) { | |
517 Abort(kToOperandUnsupportedDoubleImmediate); | |
518 } | |
519 DCHECK(r.IsTagged()); | |
520 return Operand(constant->handle(isolate())); | |
521 } else if (op->IsRegister()) { | |
522 return Operand(ToRegister(op)); | |
523 } else if (op->IsDoubleRegister()) { | |
524 Abort(kToOperandIsDoubleRegisterUnimplemented); | |
525 return Operand::Zero(); | |
526 } | |
527 // Stack slots not implemented, use ToMemOperand instead. | |
528 UNREACHABLE(); | |
529 return Operand::Zero(); | |
530 } | |
531 | |
532 | |
533 static int ArgumentsOffsetWithoutFrame(int index) { | |
534 DCHECK(index < 0); | |
535 return -(index + 1) * kPointerSize; | |
536 } | |
537 | |
538 | |
539 MemOperand LCodeGen::ToMemOperand(LOperand* op) const { | |
540 DCHECK(!op->IsRegister()); | |
541 DCHECK(!op->IsDoubleRegister()); | |
542 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); | |
543 if (NeedsEagerFrame()) { | |
544 return MemOperand(fp, StackSlotOffset(op->index())); | |
545 } else { | |
546 // Retrieve parameter without eager stack-frame relative to the | |
547 // stack-pointer. | |
548 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index())); | |
549 } | |
550 } | |
551 | |
552 | |
553 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { | |
554 DCHECK(op->IsDoubleStackSlot()); | |
555 if (NeedsEagerFrame()) { | |
556 return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize); | |
557 } else { | |
558 // Retrieve parameter without eager stack-frame relative to the | |
559 // stack-pointer. | |
560 return MemOperand(sp, | |
561 ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); | |
562 } | |
563 } | |
564 | |
565 | |
566 void LCodeGen::WriteTranslation(LEnvironment* environment, | |
567 Translation* translation) { | |
568 if (environment == NULL) return; | |
569 | |
570 // The translation includes one command per value in the environment. | |
571 int translation_size = environment->translation_size(); | |
572 | |
573 WriteTranslation(environment->outer(), translation); | |
574 WriteTranslationFrame(environment, translation); | |
575 | |
576 int object_index = 0; | |
577 int dematerialized_index = 0; | |
578 for (int i = 0; i < translation_size; ++i) { | |
579 LOperand* value = environment->values()->at(i); | |
580 AddToTranslation( | |
581 environment, translation, value, environment->HasTaggedValueAt(i), | |
582 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index); | |
583 } | |
584 } | |
585 | |
586 | |
587 void LCodeGen::AddToTranslation(LEnvironment* environment, | |
588 Translation* translation, LOperand* op, | |
589 bool is_tagged, bool is_uint32, | |
590 int* object_index_pointer, | |
591 int* dematerialized_index_pointer) { | |
592 if (op == LEnvironment::materialization_marker()) { | |
593 int object_index = (*object_index_pointer)++; | |
594 if (environment->ObjectIsDuplicateAt(object_index)) { | |
595 int dupe_of = environment->ObjectDuplicateOfAt(object_index); | |
596 translation->DuplicateObject(dupe_of); | |
597 return; | |
598 } | |
599 int object_length = environment->ObjectLengthAt(object_index); | |
600 if (environment->ObjectIsArgumentsAt(object_index)) { | |
601 translation->BeginArgumentsObject(object_length); | |
602 } else { | |
603 translation->BeginCapturedObject(object_length); | |
604 } | |
605 int dematerialized_index = *dematerialized_index_pointer; | |
606 int env_offset = environment->translation_size() + dematerialized_index; | |
607 *dematerialized_index_pointer += object_length; | |
608 for (int i = 0; i < object_length; ++i) { | |
609 LOperand* value = environment->values()->at(env_offset + i); | |
610 AddToTranslation(environment, translation, value, | |
611 environment->HasTaggedValueAt(env_offset + i), | |
612 environment->HasUint32ValueAt(env_offset + i), | |
613 object_index_pointer, dematerialized_index_pointer); | |
614 } | |
615 return; | |
616 } | |
617 | |
618 if (op->IsStackSlot()) { | |
619 int index = op->index(); | |
620 if (index >= 0) { | |
621 index += StandardFrameConstants::kFixedFrameSize / kPointerSize; | |
622 } | |
623 if (is_tagged) { | |
624 translation->StoreStackSlot(index); | |
625 } else if (is_uint32) { | |
626 translation->StoreUint32StackSlot(index); | |
627 } else { | |
628 translation->StoreInt32StackSlot(index); | |
629 } | |
630 } else if (op->IsDoubleStackSlot()) { | |
631 int index = op->index(); | |
632 if (index >= 0) { | |
633 index += StandardFrameConstants::kFixedFrameSize / kPointerSize; | |
634 } | |
635 translation->StoreDoubleStackSlot(index); | |
636 } else if (op->IsRegister()) { | |
637 Register reg = ToRegister(op); | |
638 if (is_tagged) { | |
639 translation->StoreRegister(reg); | |
640 } else if (is_uint32) { | |
641 translation->StoreUint32Register(reg); | |
642 } else { | |
643 translation->StoreInt32Register(reg); | |
644 } | |
645 } else if (op->IsDoubleRegister()) { | |
646 DoubleRegister reg = ToDoubleRegister(op); | |
647 translation->StoreDoubleRegister(reg); | |
648 } else if (op->IsConstantOperand()) { | |
649 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); | |
650 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); | |
651 translation->StoreLiteral(src_index); | |
652 } else { | |
653 UNREACHABLE(); | |
654 } | |
655 } | |
656 | |
657 | |
658 void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode, | |
659 LInstruction* instr) { | |
660 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); | |
661 } | |
662 | |
663 | |
664 void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode, | |
665 LInstruction* instr, | |
666 SafepointMode safepoint_mode) { | |
667 DCHECK(instr != NULL); | |
668 __ Call(code, mode); | |
669 RecordSafepointWithLazyDeopt(instr, safepoint_mode); | |
670 | |
671 // Signal that we don't inline smi code before these stubs in the | |
672 // optimizing code generator. | |
673 if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) { | |
674 __ nop(); | |
675 } | |
676 } | |
677 | |
678 | |
679 void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments, | |
680 LInstruction* instr, SaveFPRegsMode save_doubles) { | |
681 DCHECK(instr != NULL); | |
682 | |
683 __ CallRuntime(function, num_arguments, save_doubles); | |
684 | |
685 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); | |
686 } | |
687 | |
688 | |
689 void LCodeGen::LoadContextFromDeferred(LOperand* context) { | |
690 if (context->IsRegister()) { | |
691 __ Move(cp, ToRegister(context)); | |
692 } else if (context->IsStackSlot()) { | |
693 __ LoadP(cp, ToMemOperand(context)); | |
694 } else if (context->IsConstantOperand()) { | |
695 HConstant* constant = | |
696 chunk_->LookupConstant(LConstantOperand::cast(context)); | |
697 __ Move(cp, Handle<Object>::cast(constant->handle(isolate()))); | |
698 } else { | |
699 UNREACHABLE(); | |
700 } | |
701 } | |
702 | |
703 | |
704 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, | |
705 LInstruction* instr, LOperand* context) { | |
706 LoadContextFromDeferred(context); | |
707 __ CallRuntimeSaveDoubles(id); | |
708 RecordSafepointWithRegisters(instr->pointer_map(), argc, | |
709 Safepoint::kNoLazyDeopt); | |
710 } | |
711 | |
712 | |
713 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, | |
714 Safepoint::DeoptMode mode) { | |
715 environment->set_has_been_used(); | |
716 if (!environment->HasBeenRegistered()) { | |
717 // Physical stack frame layout: | |
718 // -x ............. -4 0 ..................................... y | |
719 // [incoming arguments] [spill slots] [pushed outgoing arguments] | |
720 | |
721 // Layout of the environment: | |
722 // 0 ..................................................... size-1 | |
723 // [parameters] [locals] [expression stack including arguments] | |
724 | |
725 // Layout of the translation: | |
726 // 0 ........................................................ size - 1 + 4 | |
727 // [expression stack including arguments] [locals] [4 words] [parameters] | |
728 // |>------------ translation_size ------------<| | |
729 | |
730 int frame_count = 0; | |
731 int jsframe_count = 0; | |
732 for (LEnvironment* e = environment; e != NULL; e = e->outer()) { | |
733 ++frame_count; | |
734 if (e->frame_type() == JS_FUNCTION) { | |
735 ++jsframe_count; | |
736 } | |
737 } | |
738 Translation translation(&translations_, frame_count, jsframe_count, zone()); | |
739 WriteTranslation(environment, &translation); | |
740 int deoptimization_index = deoptimizations_.length(); | |
741 int pc_offset = masm()->pc_offset(); | |
742 environment->Register(deoptimization_index, translation.index(), | |
743 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | |
744 deoptimizations_.Add(environment, zone()); | |
745 } | |
746 } | |
747 | |
748 | |
749 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, | |
750 Deoptimizer::DeoptReason deopt_reason, | |
751 Deoptimizer::BailoutType bailout_type, | |
752 CRegister cr) { | |
753 LEnvironment* environment = instr->environment(); | |
754 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | |
755 DCHECK(environment->HasBeenRegistered()); | |
756 int id = environment->deoptimization_index(); | |
757 Address entry = | |
758 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); | |
759 if (entry == NULL) { | |
760 Abort(kBailoutWasNotPrepared); | |
761 return; | |
762 } | |
763 | |
764 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { | |
765 CRegister alt_cr = cr6; | |
766 Register scratch = scratch0(); | |
767 ExternalReference count = ExternalReference::stress_deopt_count(isolate()); | |
768 Label no_deopt; | |
769 DCHECK(!alt_cr.is(cr)); | |
770 __ Push(r4, scratch); | |
771 __ mov(scratch, Operand(count)); | |
772 __ lwz(r4, MemOperand(scratch)); | |
773 __ subi(r4, r4, Operand(1)); | |
774 __ cmpi(r4, Operand::Zero(), alt_cr); | |
775 __ bne(&no_deopt, alt_cr); | |
776 __ li(r4, Operand(FLAG_deopt_every_n_times)); | |
777 __ stw(r4, MemOperand(scratch)); | |
778 __ Pop(r4, scratch); | |
779 | |
780 __ Call(entry, RelocInfo::RUNTIME_ENTRY); | |
781 __ bind(&no_deopt); | |
782 __ stw(r4, MemOperand(scratch)); | |
783 __ Pop(r4, scratch); | |
784 } | |
785 | |
786 if (info()->ShouldTrapOnDeopt()) { | |
787 __ stop("trap_on_deopt", cond, kDefaultStopCode, cr); | |
788 } | |
789 | |
790 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason); | |
791 | |
792 DCHECK(info()->IsStub() || frame_is_built_); | |
793 // Go through jump table if we need to handle condition, build frame, or | |
794 // restore caller doubles. | |
795 if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) { | |
796 DeoptComment(deopt_info); | |
797 __ Call(entry, RelocInfo::RUNTIME_ENTRY); | |
798 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id); | |
799 } else { | |
800 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type, | |
801 !frame_is_built_); | |
802 // We often have several deopts to the same entry, reuse the last | |
803 // jump entry if this is the case. | |
804 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() || | |
805 jump_table_.is_empty() || | |
806 !table_entry.IsEquivalentTo(jump_table_.last())) { | |
807 jump_table_.Add(table_entry, zone()); | |
808 } | |
809 __ b(cond, &jump_table_.last().label, cr); | |
810 } | |
811 } | |
812 | |
813 | |
814 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, | |
815 Deoptimizer::DeoptReason deopt_reason, | |
816 CRegister cr) { | |
817 Deoptimizer::BailoutType bailout_type = | |
818 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; | |
819 DeoptimizeIf(condition, instr, deopt_reason, bailout_type, cr); | |
820 } | |
821 | |
822 | |
823 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { | |
824 int length = deoptimizations_.length(); | |
825 if (length == 0) return; | |
826 Handle<DeoptimizationInputData> data = | |
827 DeoptimizationInputData::New(isolate(), length, TENURED); | |
828 | |
829 Handle<ByteArray> translations = | |
830 translations_.CreateByteArray(isolate()->factory()); | |
831 data->SetTranslationByteArray(*translations); | |
832 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); | |
833 data->SetOptimizationId(Smi::FromInt(info_->optimization_id())); | |
834 if (info_->IsOptimizing()) { | |
835 // Reference to shared function info does not change between phases. | |
836 AllowDeferredHandleDereference allow_handle_dereference; | |
837 data->SetSharedFunctionInfo(*info_->shared_info()); | |
838 } else { | |
839 data->SetSharedFunctionInfo(Smi::FromInt(0)); | |
840 } | |
841 data->SetWeakCellCache(Smi::FromInt(0)); | |
842 | |
843 Handle<FixedArray> literals = | |
844 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); | |
845 { | |
846 AllowDeferredHandleDereference copy_handles; | |
847 for (int i = 0; i < deoptimization_literals_.length(); i++) { | |
848 literals->set(i, *deoptimization_literals_[i]); | |
849 } | |
850 data->SetLiteralArray(*literals); | |
851 } | |
852 | |
853 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt())); | |
854 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); | |
855 | |
856 // Populate the deoptimization entries. | |
857 for (int i = 0; i < length; i++) { | |
858 LEnvironment* env = deoptimizations_[i]; | |
859 data->SetAstId(i, env->ast_id()); | |
860 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index())); | |
861 data->SetArgumentsStackHeight(i, | |
862 Smi::FromInt(env->arguments_stack_height())); | |
863 data->SetPc(i, Smi::FromInt(env->pc_offset())); | |
864 } | |
865 code->set_deoptimization_data(*data); | |
866 } | |
867 | |
868 | |
869 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { | |
870 DCHECK_EQ(0, deoptimization_literals_.length()); | |
871 for (auto function : chunk()->inlined_functions()) { | |
872 DefineDeoptimizationLiteral(function); | |
873 } | |
874 inlined_function_count_ = deoptimization_literals_.length(); | |
875 } | |
876 | |
877 | |
878 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr, | |
879 SafepointMode safepoint_mode) { | |
880 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { | |
881 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); | |
882 } else { | |
883 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | |
884 RecordSafepointWithRegisters(instr->pointer_map(), 0, | |
885 Safepoint::kLazyDeopt); | |
886 } | |
887 } | |
888 | |
889 | |
890 void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind, | |
891 int arguments, Safepoint::DeoptMode deopt_mode) { | |
892 DCHECK(expected_safepoint_kind_ == kind); | |
893 | |
894 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); | |
895 Safepoint safepoint = | |
896 safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode); | |
897 for (int i = 0; i < operands->length(); i++) { | |
898 LOperand* pointer = operands->at(i); | |
899 if (pointer->IsStackSlot()) { | |
900 safepoint.DefinePointerSlot(pointer->index(), zone()); | |
901 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { | |
902 safepoint.DefinePointerRegister(ToRegister(pointer), zone()); | |
903 } | |
904 } | |
905 } | |
906 | |
907 | |
908 void LCodeGen::RecordSafepoint(LPointerMap* pointers, | |
909 Safepoint::DeoptMode deopt_mode) { | |
910 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode); | |
911 } | |
912 | |
913 | |
914 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { | |
915 LPointerMap empty_pointers(zone()); | |
916 RecordSafepoint(&empty_pointers, deopt_mode); | |
917 } | |
918 | |
919 | |
920 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, | |
921 int arguments, | |
922 Safepoint::DeoptMode deopt_mode) { | |
923 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode); | |
924 } | |
925 | |
926 | |
927 void LCodeGen::RecordAndWritePosition(int position) { | |
928 if (position == RelocInfo::kNoPosition) return; | |
929 masm()->positions_recorder()->RecordPosition(position); | |
930 masm()->positions_recorder()->WriteRecordedPositions(); | |
931 } | |
932 | |
933 | |
934 static const char* LabelType(LLabel* label) { | |
935 if (label->is_loop_header()) return " (loop header)"; | |
936 if (label->is_osr_entry()) return " (OSR entry)"; | |
937 return ""; | |
938 } | |
939 | |
940 | |
941 void LCodeGen::DoLabel(LLabel* label) { | |
942 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", | |
943 current_instruction_, label->hydrogen_value()->id(), | |
944 label->block_id(), LabelType(label)); | |
945 __ bind(label->label()); | |
946 current_block_ = label->block_id(); | |
947 DoGap(label); | |
948 } | |
949 | |
950 | |
951 void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); } | |
952 | |
953 | |
954 void LCodeGen::DoGap(LGap* gap) { | |
955 for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION; | |
956 i++) { | |
957 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); | |
958 LParallelMove* move = gap->GetParallelMove(inner_pos); | |
959 if (move != NULL) DoParallelMove(move); | |
960 } | |
961 } | |
962 | |
963 | |
964 void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); } | |
965 | |
966 | |
967 void LCodeGen::DoParameter(LParameter* instr) { | |
968 // Nothing to do. | |
969 } | |
970 | |
971 | |
972 void LCodeGen::DoCallStub(LCallStub* instr) { | |
973 DCHECK(ToRegister(instr->context()).is(cp)); | |
974 DCHECK(ToRegister(instr->result()).is(r3)); | |
975 switch (instr->hydrogen()->major_key()) { | |
976 case CodeStub::RegExpExec: { | |
977 RegExpExecStub stub(isolate()); | |
978 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | |
979 break; | |
980 } | |
981 case CodeStub::SubString: { | |
982 SubStringStub stub(isolate()); | |
983 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | |
984 break; | |
985 } | |
986 default: | |
987 UNREACHABLE(); | |
988 } | |
989 } | |
990 | |
991 | |
992 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { | |
993 GenerateOsrPrologue(); | |
994 } | |
995 | |
996 | |
997 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { | |
998 Register dividend = ToRegister(instr->dividend()); | |
999 int32_t divisor = instr->divisor(); | |
1000 DCHECK(dividend.is(ToRegister(instr->result()))); | |
1001 | |
1002 // Theoretically, a variation of the branch-free code for integer division by | |
1003 // a power of 2 (calculating the remainder via an additional multiplication | |
1004 // (which gets simplified to an 'and') and subtraction) should be faster, and | |
1005 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to | |
1006 // indicate that positive dividends are heavily favored, so the branching | |
1007 // version performs better. | |
1008 HMod* hmod = instr->hydrogen(); | |
1009 int32_t shift = WhichPowerOf2Abs(divisor); | |
1010 Label dividend_is_not_negative, done; | |
1011 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { | |
1012 __ cmpwi(dividend, Operand::Zero()); | |
1013 __ bge(÷nd_is_not_negative); | |
1014 if (shift) { | |
1015 // Note that this is correct even for kMinInt operands. | |
1016 __ neg(dividend, dividend); | |
1017 __ ExtractBitRange(dividend, dividend, shift - 1, 0); | |
1018 __ neg(dividend, dividend, LeaveOE, SetRC); | |
1019 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
1020 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0); | |
1021 } | |
1022 } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
1023 __ li(dividend, Operand::Zero()); | |
1024 } else { | |
1025 DeoptimizeIf(al, instr, Deoptimizer::kMinusZero); | |
1026 } | |
1027 __ b(&done); | |
1028 } | |
1029 | |
1030 __ bind(÷nd_is_not_negative); | |
1031 if (shift) { | |
1032 __ ExtractBitRange(dividend, dividend, shift - 1, 0); | |
1033 } else { | |
1034 __ li(dividend, Operand::Zero()); | |
1035 } | |
1036 __ bind(&done); | |
1037 } | |
1038 | |
1039 | |
1040 void LCodeGen::DoModByConstI(LModByConstI* instr) { | |
1041 Register dividend = ToRegister(instr->dividend()); | |
1042 int32_t divisor = instr->divisor(); | |
1043 Register result = ToRegister(instr->result()); | |
1044 DCHECK(!dividend.is(result)); | |
1045 | |
1046 if (divisor == 0) { | |
1047 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); | |
1048 return; | |
1049 } | |
1050 | |
1051 __ TruncatingDiv(result, dividend, Abs(divisor)); | |
1052 __ mov(ip, Operand(Abs(divisor))); | |
1053 __ mullw(result, result, ip); | |
1054 __ sub(result, dividend, result, LeaveOE, SetRC); | |
1055 | |
1056 // Check for negative zero. | |
1057 HMod* hmod = instr->hydrogen(); | |
1058 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
1059 Label remainder_not_zero; | |
1060 __ bne(&remainder_not_zero, cr0); | |
1061 __ cmpwi(dividend, Operand::Zero()); | |
1062 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); | |
1063 __ bind(&remainder_not_zero); | |
1064 } | |
1065 } | |
1066 | |
1067 | |
1068 void LCodeGen::DoModI(LModI* instr) { | |
1069 HMod* hmod = instr->hydrogen(); | |
1070 Register left_reg = ToRegister(instr->left()); | |
1071 Register right_reg = ToRegister(instr->right()); | |
1072 Register result_reg = ToRegister(instr->result()); | |
1073 Register scratch = scratch0(); | |
1074 bool can_overflow = hmod->CheckFlag(HValue::kCanOverflow); | |
1075 Label done; | |
1076 | |
1077 if (can_overflow) { | |
1078 __ li(r0, Operand::Zero()); // clear xer | |
1079 __ mtxer(r0); | |
1080 } | |
1081 | |
1082 __ divw(scratch, left_reg, right_reg, SetOE, SetRC); | |
1083 | |
1084 // Check for x % 0. | |
1085 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { | |
1086 __ cmpwi(right_reg, Operand::Zero()); | |
1087 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); | |
1088 } | |
1089 | |
1090 // Check for kMinInt % -1, divw will return undefined, which is not what we | |
1091 // want. We have to deopt if we care about -0, because we can't return that. | |
1092 if (can_overflow) { | |
1093 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
1094 DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero, cr0); | |
1095 } else { | |
1096 if (CpuFeatures::IsSupported(ISELECT)) { | |
1097 __ isel(overflow, result_reg, r0, result_reg, cr0); | |
1098 __ boverflow(&done, cr0); | |
1099 } else { | |
1100 Label no_overflow_possible; | |
1101 __ bnooverflow(&no_overflow_possible, cr0); | |
1102 __ li(result_reg, Operand::Zero()); | |
1103 __ b(&done); | |
1104 __ bind(&no_overflow_possible); | |
1105 } | |
1106 } | |
1107 } | |
1108 | |
1109 __ mullw(scratch, right_reg, scratch); | |
1110 __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC); | |
1111 | |
1112 // If we care about -0, test if the dividend is <0 and the result is 0. | |
1113 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
1114 __ bne(&done, cr0); | |
1115 __ cmpwi(left_reg, Operand::Zero()); | |
1116 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); | |
1117 } | |
1118 | |
1119 __ bind(&done); | |
1120 } | |
1121 | |
1122 | |
1123 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { | |
1124 Register dividend = ToRegister(instr->dividend()); | |
1125 int32_t divisor = instr->divisor(); | |
1126 Register result = ToRegister(instr->result()); | |
1127 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); | |
1128 DCHECK(!result.is(dividend)); | |
1129 | |
1130 // Check for (0 / -x) that will produce negative zero. | |
1131 HDiv* hdiv = instr->hydrogen(); | |
1132 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | |
1133 __ cmpwi(dividend, Operand::Zero()); | |
1134 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | |
1135 } | |
1136 // Check for (kMinInt / -1). | |
1137 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { | |
1138 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); | |
1139 __ cmpw(dividend, r0); | |
1140 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); | |
1141 } | |
1142 | |
1143 int32_t shift = WhichPowerOf2Abs(divisor); | |
1144 | |
1145 // Deoptimize if remainder will not be 0. | |
1146 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) { | |
1147 __ TestBitRange(dividend, shift - 1, 0, r0); | |
1148 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, cr0); | |
1149 } | |
1150 | |
1151 if (divisor == -1) { // Nice shortcut, not needed for correctness. | |
1152 __ neg(result, dividend); | |
1153 return; | |
1154 } | |
1155 if (shift == 0) { | |
1156 __ mr(result, dividend); | |
1157 } else { | |
1158 if (shift == 1) { | |
1159 __ srwi(result, dividend, Operand(31)); | |
1160 } else { | |
1161 __ srawi(result, dividend, 31); | |
1162 __ srwi(result, result, Operand(32 - shift)); | |
1163 } | |
1164 __ add(result, dividend, result); | |
1165 __ srawi(result, result, shift); | |
1166 } | |
1167 if (divisor < 0) __ neg(result, result); | |
1168 } | |
1169 | |
1170 | |
1171 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { | |
1172 Register dividend = ToRegister(instr->dividend()); | |
1173 int32_t divisor = instr->divisor(); | |
1174 Register result = ToRegister(instr->result()); | |
1175 DCHECK(!dividend.is(result)); | |
1176 | |
1177 if (divisor == 0) { | |
1178 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); | |
1179 return; | |
1180 } | |
1181 | |
1182 // Check for (0 / -x) that will produce negative zero. | |
1183 HDiv* hdiv = instr->hydrogen(); | |
1184 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | |
1185 __ cmpwi(dividend, Operand::Zero()); | |
1186 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | |
1187 } | |
1188 | |
1189 __ TruncatingDiv(result, dividend, Abs(divisor)); | |
1190 if (divisor < 0) __ neg(result, result); | |
1191 | |
1192 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | |
1193 Register scratch = scratch0(); | |
1194 __ mov(ip, Operand(divisor)); | |
1195 __ mullw(scratch, result, ip); | |
1196 __ cmpw(scratch, dividend); | |
1197 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); | |
1198 } | |
1199 } | |
1200 | |
1201 | |
1202 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. | |
1203 void LCodeGen::DoDivI(LDivI* instr) { | |
1204 HBinaryOperation* hdiv = instr->hydrogen(); | |
1205 const Register dividend = ToRegister(instr->dividend()); | |
1206 const Register divisor = ToRegister(instr->divisor()); | |
1207 Register result = ToRegister(instr->result()); | |
1208 bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow); | |
1209 | |
1210 DCHECK(!dividend.is(result)); | |
1211 DCHECK(!divisor.is(result)); | |
1212 | |
1213 if (can_overflow) { | |
1214 __ li(r0, Operand::Zero()); // clear xer | |
1215 __ mtxer(r0); | |
1216 } | |
1217 | |
1218 __ divw(result, dividend, divisor, SetOE, SetRC); | |
1219 | |
1220 // Check for x / 0. | |
1221 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | |
1222 __ cmpwi(divisor, Operand::Zero()); | |
1223 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); | |
1224 } | |
1225 | |
1226 // Check for (0 / -x) that will produce negative zero. | |
1227 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
1228 Label dividend_not_zero; | |
1229 __ cmpwi(dividend, Operand::Zero()); | |
1230 __ bne(÷nd_not_zero); | |
1231 __ cmpwi(divisor, Operand::Zero()); | |
1232 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); | |
1233 __ bind(÷nd_not_zero); | |
1234 } | |
1235 | |
1236 // Check for (kMinInt / -1). | |
1237 if (can_overflow) { | |
1238 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | |
1239 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); | |
1240 } else { | |
1241 // When truncating, we want kMinInt / -1 = kMinInt. | |
1242 if (CpuFeatures::IsSupported(ISELECT)) { | |
1243 __ isel(overflow, result, dividend, result, cr0); | |
1244 } else { | |
1245 Label no_overflow_possible; | |
1246 __ bnooverflow(&no_overflow_possible, cr0); | |
1247 __ mr(result, dividend); | |
1248 __ bind(&no_overflow_possible); | |
1249 } | |
1250 } | |
1251 } | |
1252 | |
1253 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | |
1254 // Deoptimize if remainder is not 0. | |
1255 Register scratch = scratch0(); | |
1256 __ mullw(scratch, divisor, result); | |
1257 __ cmpw(dividend, scratch); | |
1258 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); | |
1259 } | |
1260 } | |
1261 | |
1262 | |
1263 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { | |
1264 HBinaryOperation* hdiv = instr->hydrogen(); | |
1265 Register dividend = ToRegister(instr->dividend()); | |
1266 Register result = ToRegister(instr->result()); | |
1267 int32_t divisor = instr->divisor(); | |
1268 bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt); | |
1269 | |
1270 // If the divisor is positive, things are easy: There can be no deopts and we | |
1271 // can simply do an arithmetic right shift. | |
1272 int32_t shift = WhichPowerOf2Abs(divisor); | |
1273 if (divisor > 0) { | |
1274 if (shift || !result.is(dividend)) { | |
1275 __ srawi(result, dividend, shift); | |
1276 } | |
1277 return; | |
1278 } | |
1279 | |
1280 // If the divisor is negative, we have to negate and handle edge cases. | |
1281 OEBit oe = LeaveOE; | |
1282 #if V8_TARGET_ARCH_PPC64 | |
1283 if (divisor == -1 && can_overflow) { | |
1284 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); | |
1285 __ cmpw(dividend, r0); | |
1286 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); | |
1287 } | |
1288 #else | |
1289 if (can_overflow) { | |
1290 __ li(r0, Operand::Zero()); // clear xer | |
1291 __ mtxer(r0); | |
1292 oe = SetOE; | |
1293 } | |
1294 #endif | |
1295 | |
1296 __ neg(result, dividend, oe, SetRC); | |
1297 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
1298 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0); | |
1299 } | |
1300 | |
1301 // If the negation could not overflow, simply shifting is OK. | |
1302 #if !V8_TARGET_ARCH_PPC64 | |
1303 if (!can_overflow) { | |
1304 #endif | |
1305 if (shift) { | |
1306 __ ShiftRightArithImm(result, result, shift); | |
1307 } | |
1308 return; | |
1309 #if !V8_TARGET_ARCH_PPC64 | |
1310 } | |
1311 | |
1312 // Dividing by -1 is basically negation, unless we overflow. | |
1313 if (divisor == -1) { | |
1314 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); | |
1315 return; | |
1316 } | |
1317 | |
1318 Label overflow, done; | |
1319 __ boverflow(&overflow, cr0); | |
1320 __ srawi(result, result, shift); | |
1321 __ b(&done); | |
1322 __ bind(&overflow); | |
1323 __ mov(result, Operand(kMinInt / divisor)); | |
1324 __ bind(&done); | |
1325 #endif | |
1326 } | |
1327 | |
1328 | |
1329 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { | |
1330 Register dividend = ToRegister(instr->dividend()); | |
1331 int32_t divisor = instr->divisor(); | |
1332 Register result = ToRegister(instr->result()); | |
1333 DCHECK(!dividend.is(result)); | |
1334 | |
1335 if (divisor == 0) { | |
1336 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); | |
1337 return; | |
1338 } | |
1339 | |
1340 // Check for (0 / -x) that will produce negative zero. | |
1341 HMathFloorOfDiv* hdiv = instr->hydrogen(); | |
1342 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | |
1343 __ cmpwi(dividend, Operand::Zero()); | |
1344 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | |
1345 } | |
1346 | |
1347 // Easy case: We need no dynamic check for the dividend and the flooring | |
1348 // division is the same as the truncating division. | |
1349 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || | |
1350 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { | |
1351 __ TruncatingDiv(result, dividend, Abs(divisor)); | |
1352 if (divisor < 0) __ neg(result, result); | |
1353 return; | |
1354 } | |
1355 | |
1356 // In the general case we may need to adjust before and after the truncating | |
1357 // division to get a flooring division. | |
1358 Register temp = ToRegister(instr->temp()); | |
1359 DCHECK(!temp.is(dividend) && !temp.is(result)); | |
1360 Label needs_adjustment, done; | |
1361 __ cmpwi(dividend, Operand::Zero()); | |
1362 __ b(divisor > 0 ? lt : gt, &needs_adjustment); | |
1363 __ TruncatingDiv(result, dividend, Abs(divisor)); | |
1364 if (divisor < 0) __ neg(result, result); | |
1365 __ b(&done); | |
1366 __ bind(&needs_adjustment); | |
1367 __ addi(temp, dividend, Operand(divisor > 0 ? 1 : -1)); | |
1368 __ TruncatingDiv(result, temp, Abs(divisor)); | |
1369 if (divisor < 0) __ neg(result, result); | |
1370 __ subi(result, result, Operand(1)); | |
1371 __ bind(&done); | |
1372 } | |
1373 | |
1374 | |
1375 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. | |
1376 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { | |
1377 HBinaryOperation* hdiv = instr->hydrogen(); | |
1378 const Register dividend = ToRegister(instr->dividend()); | |
1379 const Register divisor = ToRegister(instr->divisor()); | |
1380 Register result = ToRegister(instr->result()); | |
1381 bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow); | |
1382 | |
1383 DCHECK(!dividend.is(result)); | |
1384 DCHECK(!divisor.is(result)); | |
1385 | |
1386 if (can_overflow) { | |
1387 __ li(r0, Operand::Zero()); // clear xer | |
1388 __ mtxer(r0); | |
1389 } | |
1390 | |
1391 __ divw(result, dividend, divisor, SetOE, SetRC); | |
1392 | |
1393 // Check for x / 0. | |
1394 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | |
1395 __ cmpwi(divisor, Operand::Zero()); | |
1396 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); | |
1397 } | |
1398 | |
1399 // Check for (0 / -x) that will produce negative zero. | |
1400 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
1401 Label dividend_not_zero; | |
1402 __ cmpwi(dividend, Operand::Zero()); | |
1403 __ bne(÷nd_not_zero); | |
1404 __ cmpwi(divisor, Operand::Zero()); | |
1405 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); | |
1406 __ bind(÷nd_not_zero); | |
1407 } | |
1408 | |
1409 // Check for (kMinInt / -1). | |
1410 if (can_overflow) { | |
1411 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | |
1412 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); | |
1413 } else { | |
1414 // When truncating, we want kMinInt / -1 = kMinInt. | |
1415 if (CpuFeatures::IsSupported(ISELECT)) { | |
1416 __ isel(overflow, result, dividend, result, cr0); | |
1417 } else { | |
1418 Label no_overflow_possible; | |
1419 __ bnooverflow(&no_overflow_possible, cr0); | |
1420 __ mr(result, dividend); | |
1421 __ bind(&no_overflow_possible); | |
1422 } | |
1423 } | |
1424 } | |
1425 | |
1426 Label done; | |
1427 Register scratch = scratch0(); | |
1428 // If both operands have the same sign then we are done. | |
1429 #if V8_TARGET_ARCH_PPC64 | |
1430 __ xor_(scratch, dividend, divisor); | |
1431 __ cmpwi(scratch, Operand::Zero()); | |
1432 __ bge(&done); | |
1433 #else | |
1434 __ xor_(scratch, dividend, divisor, SetRC); | |
1435 __ bge(&done, cr0); | |
1436 #endif | |
1437 | |
1438 // If there is no remainder then we are done. | |
1439 __ mullw(scratch, divisor, result); | |
1440 __ cmpw(dividend, scratch); | |
1441 __ beq(&done); | |
1442 | |
1443 // We performed a truncating division. Correct the result. | |
1444 __ subi(result, result, Operand(1)); | |
1445 __ bind(&done); | |
1446 } | |
1447 | |
1448 | |
1449 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { | |
1450 DoubleRegister addend = ToDoubleRegister(instr->addend()); | |
1451 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); | |
1452 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); | |
1453 DoubleRegister result = ToDoubleRegister(instr->result()); | |
1454 | |
1455 __ fmadd(result, multiplier, multiplicand, addend); | |
1456 } | |
1457 | |
1458 | |
1459 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) { | |
1460 DoubleRegister minuend = ToDoubleRegister(instr->minuend()); | |
1461 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); | |
1462 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); | |
1463 DoubleRegister result = ToDoubleRegister(instr->result()); | |
1464 | |
1465 __ fmsub(result, multiplier, multiplicand, minuend); | |
1466 } | |
1467 | |
1468 | |
1469 void LCodeGen::DoMulI(LMulI* instr) { | |
1470 Register scratch = scratch0(); | |
1471 Register result = ToRegister(instr->result()); | |
1472 // Note that result may alias left. | |
1473 Register left = ToRegister(instr->left()); | |
1474 LOperand* right_op = instr->right(); | |
1475 | |
1476 bool bailout_on_minus_zero = | |
1477 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | |
1478 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | |
1479 | |
1480 if (right_op->IsConstantOperand()) { | |
1481 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); | |
1482 | |
1483 if (bailout_on_minus_zero && (constant < 0)) { | |
1484 // The case of a null constant will be handled separately. | |
1485 // If constant is negative and left is null, the result should be -0. | |
1486 __ cmpi(left, Operand::Zero()); | |
1487 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | |
1488 } | |
1489 | |
1490 switch (constant) { | |
1491 case -1: | |
1492 if (can_overflow) { | |
1493 #if V8_TARGET_ARCH_PPC64 | |
1494 if (instr->hydrogen()->representation().IsSmi()) { | |
1495 #endif | |
1496 __ li(r0, Operand::Zero()); // clear xer | |
1497 __ mtxer(r0); | |
1498 __ neg(result, left, SetOE, SetRC); | |
1499 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); | |
1500 #if V8_TARGET_ARCH_PPC64 | |
1501 } else { | |
1502 __ neg(result, left); | |
1503 __ TestIfInt32(result, r0); | |
1504 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); | |
1505 } | |
1506 #endif | |
1507 } else { | |
1508 __ neg(result, left); | |
1509 } | |
1510 break; | |
1511 case 0: | |
1512 if (bailout_on_minus_zero) { | |
1513 // If left is strictly negative and the constant is null, the | |
1514 // result is -0. Deoptimize if required, otherwise return 0. | |
1515 #if V8_TARGET_ARCH_PPC64 | |
1516 if (instr->hydrogen()->representation().IsSmi()) { | |
1517 #endif | |
1518 __ cmpi(left, Operand::Zero()); | |
1519 #if V8_TARGET_ARCH_PPC64 | |
1520 } else { | |
1521 __ cmpwi(left, Operand::Zero()); | |
1522 } | |
1523 #endif | |
1524 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); | |
1525 } | |
1526 __ li(result, Operand::Zero()); | |
1527 break; | |
1528 case 1: | |
1529 __ Move(result, left); | |
1530 break; | |
1531 default: | |
1532 // Multiplying by powers of two and powers of two plus or minus | |
1533 // one can be done faster with shifted operands. | |
1534 // For other constants we emit standard code. | |
1535 int32_t mask = constant >> 31; | |
1536 uint32_t constant_abs = (constant + mask) ^ mask; | |
1537 | |
1538 if (base::bits::IsPowerOfTwo32(constant_abs)) { | |
1539 int32_t shift = WhichPowerOf2(constant_abs); | |
1540 __ ShiftLeftImm(result, left, Operand(shift)); | |
1541 // Correct the sign of the result if the constant is negative. | |
1542 if (constant < 0) __ neg(result, result); | |
1543 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) { | |
1544 int32_t shift = WhichPowerOf2(constant_abs - 1); | |
1545 __ ShiftLeftImm(scratch, left, Operand(shift)); | |
1546 __ add(result, scratch, left); | |
1547 // Correct the sign of the result if the constant is negative. | |
1548 if (constant < 0) __ neg(result, result); | |
1549 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) { | |
1550 int32_t shift = WhichPowerOf2(constant_abs + 1); | |
1551 __ ShiftLeftImm(scratch, left, Operand(shift)); | |
1552 __ sub(result, scratch, left); | |
1553 // Correct the sign of the result if the constant is negative. | |
1554 if (constant < 0) __ neg(result, result); | |
1555 } else { | |
1556 // Generate standard code. | |
1557 __ mov(ip, Operand(constant)); | |
1558 __ Mul(result, left, ip); | |
1559 } | |
1560 } | |
1561 | |
1562 } else { | |
1563 DCHECK(right_op->IsRegister()); | |
1564 Register right = ToRegister(right_op); | |
1565 | |
1566 if (can_overflow) { | |
1567 #if V8_TARGET_ARCH_PPC64 | |
1568 // result = left * right. | |
1569 if (instr->hydrogen()->representation().IsSmi()) { | |
1570 __ SmiUntag(result, left); | |
1571 __ SmiUntag(scratch, right); | |
1572 __ Mul(result, result, scratch); | |
1573 } else { | |
1574 __ Mul(result, left, right); | |
1575 } | |
1576 __ TestIfInt32(result, r0); | |
1577 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); | |
1578 if (instr->hydrogen()->representation().IsSmi()) { | |
1579 __ SmiTag(result); | |
1580 } | |
1581 #else | |
1582 // scratch:result = left * right. | |
1583 if (instr->hydrogen()->representation().IsSmi()) { | |
1584 __ SmiUntag(result, left); | |
1585 __ mulhw(scratch, result, right); | |
1586 __ mullw(result, result, right); | |
1587 } else { | |
1588 __ mulhw(scratch, left, right); | |
1589 __ mullw(result, left, right); | |
1590 } | |
1591 __ TestIfInt32(scratch, result, r0); | |
1592 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); | |
1593 #endif | |
1594 } else { | |
1595 if (instr->hydrogen()->representation().IsSmi()) { | |
1596 __ SmiUntag(result, left); | |
1597 __ Mul(result, result, right); | |
1598 } else { | |
1599 __ Mul(result, left, right); | |
1600 } | |
1601 } | |
1602 | |
1603 if (bailout_on_minus_zero) { | |
1604 Label done; | |
1605 #if V8_TARGET_ARCH_PPC64 | |
1606 if (instr->hydrogen()->representation().IsSmi()) { | |
1607 #endif | |
1608 __ xor_(r0, left, right, SetRC); | |
1609 __ bge(&done, cr0); | |
1610 #if V8_TARGET_ARCH_PPC64 | |
1611 } else { | |
1612 __ xor_(r0, left, right); | |
1613 __ cmpwi(r0, Operand::Zero()); | |
1614 __ bge(&done); | |
1615 } | |
1616 #endif | |
1617 // Bail out if the result is minus zero. | |
1618 __ cmpi(result, Operand::Zero()); | |
1619 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | |
1620 __ bind(&done); | |
1621 } | |
1622 } | |
1623 } | |
1624 | |
1625 | |
1626 void LCodeGen::DoBitI(LBitI* instr) { | |
1627 LOperand* left_op = instr->left(); | |
1628 LOperand* right_op = instr->right(); | |
1629 DCHECK(left_op->IsRegister()); | |
1630 Register left = ToRegister(left_op); | |
1631 Register result = ToRegister(instr->result()); | |
1632 Operand right(no_reg); | |
1633 | |
1634 if (right_op->IsStackSlot()) { | |
1635 right = Operand(EmitLoadRegister(right_op, ip)); | |
1636 } else { | |
1637 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand()); | |
1638 right = ToOperand(right_op); | |
1639 | |
1640 if (right_op->IsConstantOperand() && is_uint16(right.immediate())) { | |
1641 switch (instr->op()) { | |
1642 case Token::BIT_AND: | |
1643 __ andi(result, left, right); | |
1644 break; | |
1645 case Token::BIT_OR: | |
1646 __ ori(result, left, right); | |
1647 break; | |
1648 case Token::BIT_XOR: | |
1649 __ xori(result, left, right); | |
1650 break; | |
1651 default: | |
1652 UNREACHABLE(); | |
1653 break; | |
1654 } | |
1655 return; | |
1656 } | |
1657 } | |
1658 | |
1659 switch (instr->op()) { | |
1660 case Token::BIT_AND: | |
1661 __ And(result, left, right); | |
1662 break; | |
1663 case Token::BIT_OR: | |
1664 __ Or(result, left, right); | |
1665 break; | |
1666 case Token::BIT_XOR: | |
1667 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) { | |
1668 __ notx(result, left); | |
1669 } else { | |
1670 __ Xor(result, left, right); | |
1671 } | |
1672 break; | |
1673 default: | |
1674 UNREACHABLE(); | |
1675 break; | |
1676 } | |
1677 } | |
1678 | |
1679 | |
1680 void LCodeGen::DoShiftI(LShiftI* instr) { | |
1681 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so | |
1682 // result may alias either of them. | |
1683 LOperand* right_op = instr->right(); | |
1684 Register left = ToRegister(instr->left()); | |
1685 Register result = ToRegister(instr->result()); | |
1686 Register scratch = scratch0(); | |
1687 if (right_op->IsRegister()) { | |
1688 // Mask the right_op operand. | |
1689 __ andi(scratch, ToRegister(right_op), Operand(0x1F)); | |
1690 switch (instr->op()) { | |
1691 case Token::ROR: | |
1692 // rotate_right(a, b) == rotate_left(a, 32 - b) | |
1693 __ subfic(scratch, scratch, Operand(32)); | |
1694 __ rotlw(result, left, scratch); | |
1695 break; | |
1696 case Token::SAR: | |
1697 __ sraw(result, left, scratch); | |
1698 break; | |
1699 case Token::SHR: | |
1700 if (instr->can_deopt()) { | |
1701 __ srw(result, left, scratch, SetRC); | |
1702 #if V8_TARGET_ARCH_PPC64 | |
1703 __ extsw(result, result, SetRC); | |
1704 #endif | |
1705 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0); | |
1706 } else { | |
1707 __ srw(result, left, scratch); | |
1708 } | |
1709 break; | |
1710 case Token::SHL: | |
1711 __ slw(result, left, scratch); | |
1712 #if V8_TARGET_ARCH_PPC64 | |
1713 __ extsw(result, result); | |
1714 #endif | |
1715 break; | |
1716 default: | |
1717 UNREACHABLE(); | |
1718 break; | |
1719 } | |
1720 } else { | |
1721 // Mask the right_op operand. | |
1722 int value = ToInteger32(LConstantOperand::cast(right_op)); | |
1723 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); | |
1724 switch (instr->op()) { | |
1725 case Token::ROR: | |
1726 if (shift_count != 0) { | |
1727 __ rotrwi(result, left, shift_count); | |
1728 } else { | |
1729 __ Move(result, left); | |
1730 } | |
1731 break; | |
1732 case Token::SAR: | |
1733 if (shift_count != 0) { | |
1734 __ srawi(result, left, shift_count); | |
1735 } else { | |
1736 __ Move(result, left); | |
1737 } | |
1738 break; | |
1739 case Token::SHR: | |
1740 if (shift_count != 0) { | |
1741 __ srwi(result, left, Operand(shift_count)); | |
1742 } else { | |
1743 if (instr->can_deopt()) { | |
1744 __ cmpwi(left, Operand::Zero()); | |
1745 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue); | |
1746 } | |
1747 __ Move(result, left); | |
1748 } | |
1749 break; | |
1750 case Token::SHL: | |
1751 if (shift_count != 0) { | |
1752 #if V8_TARGET_ARCH_PPC64 | |
1753 if (instr->hydrogen_value()->representation().IsSmi()) { | |
1754 __ sldi(result, left, Operand(shift_count)); | |
1755 #else | |
1756 if (instr->hydrogen_value()->representation().IsSmi() && | |
1757 instr->can_deopt()) { | |
1758 if (shift_count != 1) { | |
1759 __ slwi(result, left, Operand(shift_count - 1)); | |
1760 __ SmiTagCheckOverflow(result, result, scratch); | |
1761 } else { | |
1762 __ SmiTagCheckOverflow(result, left, scratch); | |
1763 } | |
1764 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); | |
1765 #endif | |
1766 } else { | |
1767 __ slwi(result, left, Operand(shift_count)); | |
1768 #if V8_TARGET_ARCH_PPC64 | |
1769 __ extsw(result, result); | |
1770 #endif | |
1771 } | |
1772 } else { | |
1773 __ Move(result, left); | |
1774 } | |
1775 break; | |
1776 default: | |
1777 UNREACHABLE(); | |
1778 break; | |
1779 } | |
1780 } | |
1781 } | |
1782 | |
1783 | |
1784 void LCodeGen::DoSubI(LSubI* instr) { | |
1785 LOperand* right = instr->right(); | |
1786 Register left = ToRegister(instr->left()); | |
1787 Register result = ToRegister(instr->result()); | |
1788 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | |
1789 #if V8_TARGET_ARCH_PPC64 | |
1790 const bool isInteger = !instr->hydrogen()->representation().IsSmi(); | |
1791 #else | |
1792 const bool isInteger = false; | |
1793 #endif | |
1794 if (!can_overflow || isInteger) { | |
1795 if (right->IsConstantOperand()) { | |
1796 __ Add(result, left, -(ToOperand(right).immediate()), r0); | |
1797 } else { | |
1798 __ sub(result, left, EmitLoadRegister(right, ip)); | |
1799 } | |
1800 #if V8_TARGET_ARCH_PPC64 | |
1801 if (can_overflow) { | |
1802 __ TestIfInt32(result, r0); | |
1803 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); | |
1804 } | |
1805 #endif | |
1806 } else { | |
1807 if (right->IsConstantOperand()) { | |
1808 __ AddAndCheckForOverflow(result, left, -(ToOperand(right).immediate()), | |
1809 scratch0(), r0); | |
1810 } else { | |
1811 __ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip), | |
1812 scratch0(), r0); | |
1813 } | |
1814 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); | |
1815 } | |
1816 } | |
1817 | |
1818 | |
1819 void LCodeGen::DoRSubI(LRSubI* instr) { | |
1820 LOperand* left = instr->left(); | |
1821 LOperand* right = instr->right(); | |
1822 LOperand* result = instr->result(); | |
1823 | |
1824 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) && | |
1825 right->IsConstantOperand()); | |
1826 | |
1827 Operand right_operand = ToOperand(right); | |
1828 if (is_int16(right_operand.immediate())) { | |
1829 __ subfic(ToRegister(result), ToRegister(left), right_operand); | |
1830 } else { | |
1831 __ mov(r0, right_operand); | |
1832 __ sub(ToRegister(result), r0, ToRegister(left)); | |
1833 } | |
1834 } | |
1835 | |
1836 | |
1837 void LCodeGen::DoConstantI(LConstantI* instr) { | |
1838 __ mov(ToRegister(instr->result()), Operand(instr->value())); | |
1839 } | |
1840 | |
1841 | |
1842 void LCodeGen::DoConstantS(LConstantS* instr) { | |
1843 __ LoadSmiLiteral(ToRegister(instr->result()), instr->value()); | |
1844 } | |
1845 | |
1846 | |
1847 void LCodeGen::DoConstantD(LConstantD* instr) { | |
1848 DCHECK(instr->result()->IsDoubleRegister()); | |
1849 DoubleRegister result = ToDoubleRegister(instr->result()); | |
1850 #if V8_HOST_ARCH_IA32 | |
1851 // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator | |
1852 // builds. | |
1853 uint64_t bits = instr->bits(); | |
1854 if ((bits & V8_UINT64_C(0x7FF8000000000000)) == | |
1855 V8_UINT64_C(0x7FF0000000000000)) { | |
1856 uint32_t lo = static_cast<uint32_t>(bits); | |
1857 uint32_t hi = static_cast<uint32_t>(bits >> 32); | |
1858 __ mov(ip, Operand(lo)); | |
1859 __ mov(scratch0(), Operand(hi)); | |
1860 __ MovInt64ToDouble(result, scratch0(), ip); | |
1861 return; | |
1862 } | |
1863 #endif | |
1864 double v = instr->value(); | |
1865 __ LoadDoubleLiteral(result, v, scratch0()); | |
1866 } | |
1867 | |
1868 | |
1869 void LCodeGen::DoConstantE(LConstantE* instr) { | |
1870 __ mov(ToRegister(instr->result()), Operand(instr->value())); | |
1871 } | |
1872 | |
1873 | |
1874 void LCodeGen::DoConstantT(LConstantT* instr) { | |
1875 Handle<Object> object = instr->value(isolate()); | |
1876 AllowDeferredHandleDereference smi_check; | |
1877 __ Move(ToRegister(instr->result()), object); | |
1878 } | |
1879 | |
1880 | |
1881 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) { | |
1882 Register result = ToRegister(instr->result()); | |
1883 Register map = ToRegister(instr->value()); | |
1884 __ EnumLength(result, map); | |
1885 } | |
1886 | |
1887 | |
1888 void LCodeGen::DoDateField(LDateField* instr) { | |
1889 Register object = ToRegister(instr->date()); | |
1890 Register result = ToRegister(instr->result()); | |
1891 Register scratch = ToRegister(instr->temp()); | |
1892 Smi* index = instr->index(); | |
1893 DCHECK(object.is(result)); | |
1894 DCHECK(object.is(r3)); | |
1895 DCHECK(!scratch.is(scratch0())); | |
1896 DCHECK(!scratch.is(object)); | |
1897 | |
1898 if (index->value() == 0) { | |
1899 __ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset)); | |
1900 } else { | |
1901 Label runtime, done; | |
1902 if (index->value() < JSDate::kFirstUncachedField) { | |
1903 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); | |
1904 __ mov(scratch, Operand(stamp)); | |
1905 __ LoadP(scratch, MemOperand(scratch)); | |
1906 __ LoadP(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); | |
1907 __ cmp(scratch, scratch0()); | |
1908 __ bne(&runtime); | |
1909 __ LoadP(result, | |
1910 FieldMemOperand(object, JSDate::kValueOffset + | |
1911 kPointerSize * index->value())); | |
1912 __ b(&done); | |
1913 } | |
1914 __ bind(&runtime); | |
1915 __ PrepareCallCFunction(2, scratch); | |
1916 __ LoadSmiLiteral(r4, index); | |
1917 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); | |
1918 __ bind(&done); | |
1919 } | |
1920 } | |
1921 | |
1922 | |
1923 MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index, | |
1924 String::Encoding encoding) { | |
1925 if (index->IsConstantOperand()) { | |
1926 int offset = ToInteger32(LConstantOperand::cast(index)); | |
1927 if (encoding == String::TWO_BYTE_ENCODING) { | |
1928 offset *= kUC16Size; | |
1929 } | |
1930 STATIC_ASSERT(kCharSize == 1); | |
1931 return FieldMemOperand(string, SeqString::kHeaderSize + offset); | |
1932 } | |
1933 Register scratch = scratch0(); | |
1934 DCHECK(!scratch.is(string)); | |
1935 DCHECK(!scratch.is(ToRegister(index))); | |
1936 if (encoding == String::ONE_BYTE_ENCODING) { | |
1937 __ add(scratch, string, ToRegister(index)); | |
1938 } else { | |
1939 STATIC_ASSERT(kUC16Size == 2); | |
1940 __ ShiftLeftImm(scratch, ToRegister(index), Operand(1)); | |
1941 __ add(scratch, string, scratch); | |
1942 } | |
1943 return FieldMemOperand(scratch, SeqString::kHeaderSize); | |
1944 } | |
1945 | |
1946 | |
1947 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { | |
1948 String::Encoding encoding = instr->hydrogen()->encoding(); | |
1949 Register string = ToRegister(instr->string()); | |
1950 Register result = ToRegister(instr->result()); | |
1951 | |
1952 if (FLAG_debug_code) { | |
1953 Register scratch = scratch0(); | |
1954 __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); | |
1955 __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | |
1956 | |
1957 __ andi(scratch, scratch, | |
1958 Operand(kStringRepresentationMask | kStringEncodingMask)); | |
1959 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; | |
1960 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; | |
1961 __ cmpi(scratch, | |
1962 Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type | |
1963 : two_byte_seq_type)); | |
1964 __ Check(eq, kUnexpectedStringType); | |
1965 } | |
1966 | |
1967 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); | |
1968 if (encoding == String::ONE_BYTE_ENCODING) { | |
1969 __ lbz(result, operand); | |
1970 } else { | |
1971 __ lhz(result, operand); | |
1972 } | |
1973 } | |
1974 | |
1975 | |
1976 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { | |
1977 String::Encoding encoding = instr->hydrogen()->encoding(); | |
1978 Register string = ToRegister(instr->string()); | |
1979 Register value = ToRegister(instr->value()); | |
1980 | |
1981 if (FLAG_debug_code) { | |
1982 Register index = ToRegister(instr->index()); | |
1983 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; | |
1984 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; | |
1985 int encoding_mask = | |
1986 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING | |
1987 ? one_byte_seq_type | |
1988 : two_byte_seq_type; | |
1989 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask); | |
1990 } | |
1991 | |
1992 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); | |
1993 if (encoding == String::ONE_BYTE_ENCODING) { | |
1994 __ stb(value, operand); | |
1995 } else { | |
1996 __ sth(value, operand); | |
1997 } | |
1998 } | |
1999 | |
2000 | |
2001 void LCodeGen::DoAddI(LAddI* instr) { | |
2002 LOperand* right = instr->right(); | |
2003 Register left = ToRegister(instr->left()); | |
2004 Register result = ToRegister(instr->result()); | |
2005 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | |
2006 #if V8_TARGET_ARCH_PPC64 | |
2007 const bool isInteger = !(instr->hydrogen()->representation().IsSmi() || | |
2008 instr->hydrogen()->representation().IsExternal()); | |
2009 #else | |
2010 const bool isInteger = false; | |
2011 #endif | |
2012 | |
2013 if (!can_overflow || isInteger) { | |
2014 if (right->IsConstantOperand()) { | |
2015 __ Add(result, left, ToOperand(right).immediate(), r0); | |
2016 } else { | |
2017 __ add(result, left, EmitLoadRegister(right, ip)); | |
2018 } | |
2019 #if V8_TARGET_ARCH_PPC64 | |
2020 if (can_overflow) { | |
2021 __ TestIfInt32(result, r0); | |
2022 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); | |
2023 } | |
2024 #endif | |
2025 } else { | |
2026 if (right->IsConstantOperand()) { | |
2027 __ AddAndCheckForOverflow(result, left, ToOperand(right).immediate(), | |
2028 scratch0(), r0); | |
2029 } else { | |
2030 __ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip), | |
2031 scratch0(), r0); | |
2032 } | |
2033 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); | |
2034 } | |
2035 } | |
2036 | |
2037 | |
2038 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { | |
2039 LOperand* left = instr->left(); | |
2040 LOperand* right = instr->right(); | |
2041 HMathMinMax::Operation operation = instr->hydrogen()->operation(); | |
2042 Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge; | |
2043 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { | |
2044 Register left_reg = ToRegister(left); | |
2045 Register right_reg = EmitLoadRegister(right, ip); | |
2046 Register result_reg = ToRegister(instr->result()); | |
2047 Label return_left, done; | |
2048 #if V8_TARGET_ARCH_PPC64 | |
2049 if (instr->hydrogen_value()->representation().IsSmi()) { | |
2050 #endif | |
2051 __ cmp(left_reg, right_reg); | |
2052 #if V8_TARGET_ARCH_PPC64 | |
2053 } else { | |
2054 __ cmpw(left_reg, right_reg); | |
2055 } | |
2056 #endif | |
2057 if (CpuFeatures::IsSupported(ISELECT)) { | |
2058 __ isel(cond, result_reg, left_reg, right_reg); | |
2059 } else { | |
2060 __ b(cond, &return_left); | |
2061 __ Move(result_reg, right_reg); | |
2062 __ b(&done); | |
2063 __ bind(&return_left); | |
2064 __ Move(result_reg, left_reg); | |
2065 __ bind(&done); | |
2066 } | |
2067 } else { | |
2068 DCHECK(instr->hydrogen()->representation().IsDouble()); | |
2069 DoubleRegister left_reg = ToDoubleRegister(left); | |
2070 DoubleRegister right_reg = ToDoubleRegister(right); | |
2071 DoubleRegister result_reg = ToDoubleRegister(instr->result()); | |
2072 Label check_nan_left, check_zero, return_left, return_right, done; | |
2073 __ fcmpu(left_reg, right_reg); | |
2074 __ bunordered(&check_nan_left); | |
2075 __ beq(&check_zero); | |
2076 __ b(cond, &return_left); | |
2077 __ b(&return_right); | |
2078 | |
2079 __ bind(&check_zero); | |
2080 __ fcmpu(left_reg, kDoubleRegZero); | |
2081 __ bne(&return_left); // left == right != 0. | |
2082 | |
2083 // At this point, both left and right are either 0 or -0. | |
2084 // N.B. The following works because +0 + -0 == +0 | |
2085 if (operation == HMathMinMax::kMathMin) { | |
2086 // For min we want logical-or of sign bit: -(-L + -R) | |
2087 __ fneg(left_reg, left_reg); | |
2088 __ fsub(result_reg, left_reg, right_reg); | |
2089 __ fneg(result_reg, result_reg); | |
2090 } else { | |
2091 // For max we want logical-and of sign bit: (L + R) | |
2092 __ fadd(result_reg, left_reg, right_reg); | |
2093 } | |
2094 __ b(&done); | |
2095 | |
2096 __ bind(&check_nan_left); | |
2097 __ fcmpu(left_reg, left_reg); | |
2098 __ bunordered(&return_left); // left == NaN. | |
2099 | |
2100 __ bind(&return_right); | |
2101 if (!right_reg.is(result_reg)) { | |
2102 __ fmr(result_reg, right_reg); | |
2103 } | |
2104 __ b(&done); | |
2105 | |
2106 __ bind(&return_left); | |
2107 if (!left_reg.is(result_reg)) { | |
2108 __ fmr(result_reg, left_reg); | |
2109 } | |
2110 __ bind(&done); | |
2111 } | |
2112 } | |
2113 | |
2114 | |
2115 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { | |
2116 DoubleRegister left = ToDoubleRegister(instr->left()); | |
2117 DoubleRegister right = ToDoubleRegister(instr->right()); | |
2118 DoubleRegister result = ToDoubleRegister(instr->result()); | |
2119 switch (instr->op()) { | |
2120 case Token::ADD: | |
2121 __ fadd(result, left, right); | |
2122 break; | |
2123 case Token::SUB: | |
2124 __ fsub(result, left, right); | |
2125 break; | |
2126 case Token::MUL: | |
2127 __ fmul(result, left, right); | |
2128 break; | |
2129 case Token::DIV: | |
2130 __ fdiv(result, left, right); | |
2131 break; | |
2132 case Token::MOD: { | |
2133 __ PrepareCallCFunction(0, 2, scratch0()); | |
2134 __ MovToFloatParameters(left, right); | |
2135 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), | |
2136 0, 2); | |
2137 // Move the result in the double result register. | |
2138 __ MovFromFloatResult(result); | |
2139 break; | |
2140 } | |
2141 default: | |
2142 UNREACHABLE(); | |
2143 break; | |
2144 } | |
2145 } | |
2146 | |
2147 | |
2148 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { | |
2149 DCHECK(ToRegister(instr->context()).is(cp)); | |
2150 DCHECK(ToRegister(instr->left()).is(r4)); | |
2151 DCHECK(ToRegister(instr->right()).is(r3)); | |
2152 DCHECK(ToRegister(instr->result()).is(r3)); | |
2153 | |
2154 Handle<Code> code = | |
2155 CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code(); | |
2156 CallCode(code, RelocInfo::CODE_TARGET, instr); | |
2157 } | |
2158 | |
2159 | |
2160 template <class InstrType> | |
2161 void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) { | |
2162 int left_block = instr->TrueDestination(chunk_); | |
2163 int right_block = instr->FalseDestination(chunk_); | |
2164 | |
2165 int next_block = GetNextEmittedBlock(); | |
2166 | |
2167 if (right_block == left_block || cond == al) { | |
2168 EmitGoto(left_block); | |
2169 } else if (left_block == next_block) { | |
2170 __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block), cr); | |
2171 } else if (right_block == next_block) { | |
2172 __ b(cond, chunk_->GetAssemblyLabel(left_block), cr); | |
2173 } else { | |
2174 __ b(cond, chunk_->GetAssemblyLabel(left_block), cr); | |
2175 __ b(chunk_->GetAssemblyLabel(right_block)); | |
2176 } | |
2177 } | |
2178 | |
2179 | |
2180 template <class InstrType> | |
2181 void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond, CRegister cr) { | |
2182 int true_block = instr->TrueDestination(chunk_); | |
2183 __ b(cond, chunk_->GetAssemblyLabel(true_block), cr); | |
2184 } | |
2185 | |
2186 | |
2187 template <class InstrType> | |
2188 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond, CRegister cr) { | |
2189 int false_block = instr->FalseDestination(chunk_); | |
2190 __ b(cond, chunk_->GetAssemblyLabel(false_block), cr); | |
2191 } | |
2192 | |
2193 | |
2194 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); } | |
2195 | |
2196 | |
2197 void LCodeGen::DoBranch(LBranch* instr) { | |
2198 Representation r = instr->hydrogen()->value()->representation(); | |
2199 DoubleRegister dbl_scratch = double_scratch0(); | |
2200 const uint crZOrNaNBits = (1 << (31 - Assembler::encode_crbit(cr7, CR_EQ)) | | |
2201 1 << (31 - Assembler::encode_crbit(cr7, CR_FU))); | |
2202 | |
2203 if (r.IsInteger32()) { | |
2204 DCHECK(!info()->IsStub()); | |
2205 Register reg = ToRegister(instr->value()); | |
2206 __ cmpwi(reg, Operand::Zero()); | |
2207 EmitBranch(instr, ne); | |
2208 } else if (r.IsSmi()) { | |
2209 DCHECK(!info()->IsStub()); | |
2210 Register reg = ToRegister(instr->value()); | |
2211 __ cmpi(reg, Operand::Zero()); | |
2212 EmitBranch(instr, ne); | |
2213 } else if (r.IsDouble()) { | |
2214 DCHECK(!info()->IsStub()); | |
2215 DoubleRegister reg = ToDoubleRegister(instr->value()); | |
2216 // Test the double value. Zero and NaN are false. | |
2217 __ fcmpu(reg, kDoubleRegZero, cr7); | |
2218 __ mfcr(r0); | |
2219 __ andi(r0, r0, Operand(crZOrNaNBits)); | |
2220 EmitBranch(instr, eq, cr0); | |
2221 } else { | |
2222 DCHECK(r.IsTagged()); | |
2223 Register reg = ToRegister(instr->value()); | |
2224 HType type = instr->hydrogen()->value()->type(); | |
2225 if (type.IsBoolean()) { | |
2226 DCHECK(!info()->IsStub()); | |
2227 __ CompareRoot(reg, Heap::kTrueValueRootIndex); | |
2228 EmitBranch(instr, eq); | |
2229 } else if (type.IsSmi()) { | |
2230 DCHECK(!info()->IsStub()); | |
2231 __ cmpi(reg, Operand::Zero()); | |
2232 EmitBranch(instr, ne); | |
2233 } else if (type.IsJSArray()) { | |
2234 DCHECK(!info()->IsStub()); | |
2235 EmitBranch(instr, al); | |
2236 } else if (type.IsHeapNumber()) { | |
2237 DCHECK(!info()->IsStub()); | |
2238 __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); | |
2239 // Test the double value. Zero and NaN are false. | |
2240 __ fcmpu(dbl_scratch, kDoubleRegZero, cr7); | |
2241 __ mfcr(r0); | |
2242 __ andi(r0, r0, Operand(crZOrNaNBits)); | |
2243 EmitBranch(instr, eq, cr0); | |
2244 } else if (type.IsString()) { | |
2245 DCHECK(!info()->IsStub()); | |
2246 __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset)); | |
2247 __ cmpi(ip, Operand::Zero()); | |
2248 EmitBranch(instr, ne); | |
2249 } else { | |
2250 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); | |
2251 // Avoid deopts in the case where we've never executed this path before. | |
2252 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); | |
2253 | |
2254 if (expected.Contains(ToBooleanStub::UNDEFINED)) { | |
2255 // undefined -> false. | |
2256 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex); | |
2257 __ beq(instr->FalseLabel(chunk_)); | |
2258 } | |
2259 if (expected.Contains(ToBooleanStub::BOOLEAN)) { | |
2260 // Boolean -> its value. | |
2261 __ CompareRoot(reg, Heap::kTrueValueRootIndex); | |
2262 __ beq(instr->TrueLabel(chunk_)); | |
2263 __ CompareRoot(reg, Heap::kFalseValueRootIndex); | |
2264 __ beq(instr->FalseLabel(chunk_)); | |
2265 } | |
2266 if (expected.Contains(ToBooleanStub::NULL_TYPE)) { | |
2267 // 'null' -> false. | |
2268 __ CompareRoot(reg, Heap::kNullValueRootIndex); | |
2269 __ beq(instr->FalseLabel(chunk_)); | |
2270 } | |
2271 | |
2272 if (expected.Contains(ToBooleanStub::SMI)) { | |
2273 // Smis: 0 -> false, all other -> true. | |
2274 __ cmpi(reg, Operand::Zero()); | |
2275 __ beq(instr->FalseLabel(chunk_)); | |
2276 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); | |
2277 } else if (expected.NeedsMap()) { | |
2278 // If we need a map later and have a Smi -> deopt. | |
2279 __ TestIfSmi(reg, r0); | |
2280 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); | |
2281 } | |
2282 | |
2283 const Register map = scratch0(); | |
2284 if (expected.NeedsMap()) { | |
2285 __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset)); | |
2286 | |
2287 if (expected.CanBeUndetectable()) { | |
2288 // Undetectable -> false. | |
2289 __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset)); | |
2290 __ TestBit(ip, Map::kIsUndetectable, r0); | |
2291 __ bne(instr->FalseLabel(chunk_), cr0); | |
2292 } | |
2293 } | |
2294 | |
2295 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) { | |
2296 // spec object -> true. | |
2297 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE); | |
2298 __ bge(instr->TrueLabel(chunk_)); | |
2299 } | |
2300 | |
2301 if (expected.Contains(ToBooleanStub::STRING)) { | |
2302 // String value -> false iff empty. | |
2303 Label not_string; | |
2304 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); | |
2305 __ bge(¬_string); | |
2306 __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset)); | |
2307 __ cmpi(ip, Operand::Zero()); | |
2308 __ bne(instr->TrueLabel(chunk_)); | |
2309 __ b(instr->FalseLabel(chunk_)); | |
2310 __ bind(¬_string); | |
2311 } | |
2312 | |
2313 if (expected.Contains(ToBooleanStub::SYMBOL)) { | |
2314 // Symbol value -> true. | |
2315 __ CompareInstanceType(map, ip, SYMBOL_TYPE); | |
2316 __ beq(instr->TrueLabel(chunk_)); | |
2317 } | |
2318 | |
2319 if (expected.Contains(ToBooleanStub::SIMD_VALUE)) { | |
2320 // SIMD value -> true. | |
2321 Label not_simd; | |
2322 __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE); | |
2323 __ beq(instr->TrueLabel(chunk_)); | |
2324 } | |
2325 | |
2326 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { | |
2327 // heap number -> false iff +0, -0, or NaN. | |
2328 Label not_heap_number; | |
2329 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); | |
2330 __ bne(¬_heap_number); | |
2331 __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); | |
2332 // Test the double value. Zero and NaN are false. | |
2333 __ fcmpu(dbl_scratch, kDoubleRegZero, cr7); | |
2334 __ mfcr(r0); | |
2335 __ andi(r0, r0, Operand(crZOrNaNBits)); | |
2336 __ bne(instr->FalseLabel(chunk_), cr0); | |
2337 __ b(instr->TrueLabel(chunk_)); | |
2338 __ bind(¬_heap_number); | |
2339 } | |
2340 | |
2341 if (!expected.IsGeneric()) { | |
2342 // We've seen something for the first time -> deopt. | |
2343 // This can only happen if we are not generic already. | |
2344 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject); | |
2345 } | |
2346 } | |
2347 } | |
2348 } | |
2349 | |
2350 | |
2351 void LCodeGen::EmitGoto(int block) { | |
2352 if (!IsNextEmittedBlock(block)) { | |
2353 __ b(chunk_->GetAssemblyLabel(LookupDestination(block))); | |
2354 } | |
2355 } | |
2356 | |
2357 | |
2358 void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); } | |
2359 | |
2360 | |
2361 Condition LCodeGen::TokenToCondition(Token::Value op) { | |
2362 Condition cond = kNoCondition; | |
2363 switch (op) { | |
2364 case Token::EQ: | |
2365 case Token::EQ_STRICT: | |
2366 cond = eq; | |
2367 break; | |
2368 case Token::NE: | |
2369 case Token::NE_STRICT: | |
2370 cond = ne; | |
2371 break; | |
2372 case Token::LT: | |
2373 cond = lt; | |
2374 break; | |
2375 case Token::GT: | |
2376 cond = gt; | |
2377 break; | |
2378 case Token::LTE: | |
2379 cond = le; | |
2380 break; | |
2381 case Token::GTE: | |
2382 cond = ge; | |
2383 break; | |
2384 case Token::IN: | |
2385 case Token::INSTANCEOF: | |
2386 default: | |
2387 UNREACHABLE(); | |
2388 } | |
2389 return cond; | |
2390 } | |
2391 | |
2392 | |
2393 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { | |
2394 LOperand* left = instr->left(); | |
2395 LOperand* right = instr->right(); | |
2396 bool is_unsigned = | |
2397 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || | |
2398 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); | |
2399 Condition cond = TokenToCondition(instr->op()); | |
2400 | |
2401 if (left->IsConstantOperand() && right->IsConstantOperand()) { | |
2402 // We can statically evaluate the comparison. | |
2403 double left_val = ToDouble(LConstantOperand::cast(left)); | |
2404 double right_val = ToDouble(LConstantOperand::cast(right)); | |
2405 int next_block = EvalComparison(instr->op(), left_val, right_val) | |
2406 ? instr->TrueDestination(chunk_) | |
2407 : instr->FalseDestination(chunk_); | |
2408 EmitGoto(next_block); | |
2409 } else { | |
2410 if (instr->is_double()) { | |
2411 // Compare left and right operands as doubles and load the | |
2412 // resulting flags into the normal status register. | |
2413 __ fcmpu(ToDoubleRegister(left), ToDoubleRegister(right)); | |
2414 // If a NaN is involved, i.e. the result is unordered, | |
2415 // jump to false block label. | |
2416 __ bunordered(instr->FalseLabel(chunk_)); | |
2417 } else { | |
2418 if (right->IsConstantOperand()) { | |
2419 int32_t value = ToInteger32(LConstantOperand::cast(right)); | |
2420 if (instr->hydrogen_value()->representation().IsSmi()) { | |
2421 if (is_unsigned) { | |
2422 __ CmplSmiLiteral(ToRegister(left), Smi::FromInt(value), r0); | |
2423 } else { | |
2424 __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0); | |
2425 } | |
2426 } else { | |
2427 if (is_unsigned) { | |
2428 __ Cmplwi(ToRegister(left), Operand(value), r0); | |
2429 } else { | |
2430 __ Cmpwi(ToRegister(left), Operand(value), r0); | |
2431 } | |
2432 } | |
2433 } else if (left->IsConstantOperand()) { | |
2434 int32_t value = ToInteger32(LConstantOperand::cast(left)); | |
2435 if (instr->hydrogen_value()->representation().IsSmi()) { | |
2436 if (is_unsigned) { | |
2437 __ CmplSmiLiteral(ToRegister(right), Smi::FromInt(value), r0); | |
2438 } else { | |
2439 __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0); | |
2440 } | |
2441 } else { | |
2442 if (is_unsigned) { | |
2443 __ Cmplwi(ToRegister(right), Operand(value), r0); | |
2444 } else { | |
2445 __ Cmpwi(ToRegister(right), Operand(value), r0); | |
2446 } | |
2447 } | |
2448 // We commuted the operands, so commute the condition. | |
2449 cond = CommuteCondition(cond); | |
2450 } else if (instr->hydrogen_value()->representation().IsSmi()) { | |
2451 if (is_unsigned) { | |
2452 __ cmpl(ToRegister(left), ToRegister(right)); | |
2453 } else { | |
2454 __ cmp(ToRegister(left), ToRegister(right)); | |
2455 } | |
2456 } else { | |
2457 if (is_unsigned) { | |
2458 __ cmplw(ToRegister(left), ToRegister(right)); | |
2459 } else { | |
2460 __ cmpw(ToRegister(left), ToRegister(right)); | |
2461 } | |
2462 } | |
2463 } | |
2464 EmitBranch(instr, cond); | |
2465 } | |
2466 } | |
2467 | |
2468 | |
2469 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { | |
2470 Register left = ToRegister(instr->left()); | |
2471 Register right = ToRegister(instr->right()); | |
2472 | |
2473 __ cmp(left, right); | |
2474 EmitBranch(instr, eq); | |
2475 } | |
2476 | |
2477 | |
2478 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { | |
2479 if (instr->hydrogen()->representation().IsTagged()) { | |
2480 Register input_reg = ToRegister(instr->object()); | |
2481 __ mov(ip, Operand(factory()->the_hole_value())); | |
2482 __ cmp(input_reg, ip); | |
2483 EmitBranch(instr, eq); | |
2484 return; | |
2485 } | |
2486 | |
2487 DoubleRegister input_reg = ToDoubleRegister(instr->object()); | |
2488 __ fcmpu(input_reg, input_reg); | |
2489 EmitFalseBranch(instr, ordered); | |
2490 | |
2491 Register scratch = scratch0(); | |
2492 __ MovDoubleHighToInt(scratch, input_reg); | |
2493 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0); | |
2494 EmitBranch(instr, eq); | |
2495 } | |
2496 | |
2497 | |
2498 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { | |
2499 Representation rep = instr->hydrogen()->value()->representation(); | |
2500 DCHECK(!rep.IsInteger32()); | |
2501 Register scratch = ToRegister(instr->temp()); | |
2502 | |
2503 if (rep.IsDouble()) { | |
2504 DoubleRegister value = ToDoubleRegister(instr->value()); | |
2505 __ fcmpu(value, kDoubleRegZero); | |
2506 EmitFalseBranch(instr, ne); | |
2507 #if V8_TARGET_ARCH_PPC64 | |
2508 __ MovDoubleToInt64(scratch, value); | |
2509 #else | |
2510 __ MovDoubleHighToInt(scratch, value); | |
2511 #endif | |
2512 __ cmpi(scratch, Operand::Zero()); | |
2513 EmitBranch(instr, lt); | |
2514 } else { | |
2515 Register value = ToRegister(instr->value()); | |
2516 __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex, | |
2517 instr->FalseLabel(chunk()), DO_SMI_CHECK); | |
2518 #if V8_TARGET_ARCH_PPC64 | |
2519 __ LoadP(scratch, FieldMemOperand(value, HeapNumber::kValueOffset)); | |
2520 __ li(ip, Operand(1)); | |
2521 __ rotrdi(ip, ip, 1); // ip = 0x80000000_00000000 | |
2522 __ cmp(scratch, ip); | |
2523 #else | |
2524 __ lwz(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset)); | |
2525 __ lwz(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset)); | |
2526 Label skip; | |
2527 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); | |
2528 __ cmp(scratch, r0); | |
2529 __ bne(&skip); | |
2530 __ cmpi(ip, Operand::Zero()); | |
2531 __ bind(&skip); | |
2532 #endif | |
2533 EmitBranch(instr, eq); | |
2534 } | |
2535 } | |
2536 | |
2537 | |
2538 Condition LCodeGen::EmitIsString(Register input, Register temp1, | |
2539 Label* is_not_string, | |
2540 SmiCheck check_needed = INLINE_SMI_CHECK) { | |
2541 if (check_needed == INLINE_SMI_CHECK) { | |
2542 __ JumpIfSmi(input, is_not_string); | |
2543 } | |
2544 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE); | |
2545 | |
2546 return lt; | |
2547 } | |
2548 | |
2549 | |
2550 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { | |
2551 Register reg = ToRegister(instr->value()); | |
2552 Register temp1 = ToRegister(instr->temp()); | |
2553 | |
2554 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject() | |
2555 ? OMIT_SMI_CHECK | |
2556 : INLINE_SMI_CHECK; | |
2557 Condition true_cond = | |
2558 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed); | |
2559 | |
2560 EmitBranch(instr, true_cond); | |
2561 } | |
2562 | |
2563 | |
2564 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { | |
2565 Register input_reg = EmitLoadRegister(instr->value(), ip); | |
2566 __ TestIfSmi(input_reg, r0); | |
2567 EmitBranch(instr, eq, cr0); | |
2568 } | |
2569 | |
2570 | |
2571 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { | |
2572 Register input = ToRegister(instr->value()); | |
2573 Register temp = ToRegister(instr->temp()); | |
2574 | |
2575 if (!instr->hydrogen()->value()->type().IsHeapObject()) { | |
2576 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); | |
2577 } | |
2578 __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset)); | |
2579 __ lbz(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); | |
2580 __ TestBit(temp, Map::kIsUndetectable, r0); | |
2581 EmitBranch(instr, ne, cr0); | |
2582 } | |
2583 | |
2584 | |
2585 static Condition ComputeCompareCondition(Token::Value op) { | |
2586 switch (op) { | |
2587 case Token::EQ_STRICT: | |
2588 case Token::EQ: | |
2589 return eq; | |
2590 case Token::LT: | |
2591 return lt; | |
2592 case Token::GT: | |
2593 return gt; | |
2594 case Token::LTE: | |
2595 return le; | |
2596 case Token::GTE: | |
2597 return ge; | |
2598 default: | |
2599 UNREACHABLE(); | |
2600 return kNoCondition; | |
2601 } | |
2602 } | |
2603 | |
2604 | |
2605 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { | |
2606 DCHECK(ToRegister(instr->context()).is(cp)); | |
2607 DCHECK(ToRegister(instr->left()).is(r4)); | |
2608 DCHECK(ToRegister(instr->right()).is(r3)); | |
2609 | |
2610 Handle<Code> code = CodeFactory::StringCompare(isolate()).code(); | |
2611 CallCode(code, RelocInfo::CODE_TARGET, instr); | |
2612 __ cmpi(r3, Operand::Zero()); | |
2613 | |
2614 EmitBranch(instr, ComputeCompareCondition(instr->op())); | |
2615 } | |
2616 | |
2617 | |
2618 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { | |
2619 InstanceType from = instr->from(); | |
2620 InstanceType to = instr->to(); | |
2621 if (from == FIRST_TYPE) return to; | |
2622 DCHECK(from == to || to == LAST_TYPE); | |
2623 return from; | |
2624 } | |
2625 | |
2626 | |
2627 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { | |
2628 InstanceType from = instr->from(); | |
2629 InstanceType to = instr->to(); | |
2630 if (from == to) return eq; | |
2631 if (to == LAST_TYPE) return ge; | |
2632 if (from == FIRST_TYPE) return le; | |
2633 UNREACHABLE(); | |
2634 return eq; | |
2635 } | |
2636 | |
2637 | |
2638 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { | |
2639 Register scratch = scratch0(); | |
2640 Register input = ToRegister(instr->value()); | |
2641 | |
2642 if (!instr->hydrogen()->value()->type().IsHeapObject()) { | |
2643 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); | |
2644 } | |
2645 | |
2646 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen())); | |
2647 EmitBranch(instr, BranchCondition(instr->hydrogen())); | |
2648 } | |
2649 | |
2650 | |
2651 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { | |
2652 Register input = ToRegister(instr->value()); | |
2653 Register result = ToRegister(instr->result()); | |
2654 | |
2655 __ AssertString(input); | |
2656 | |
2657 __ lwz(result, FieldMemOperand(input, String::kHashFieldOffset)); | |
2658 __ IndexFromHash(result, result); | |
2659 } | |
2660 | |
2661 | |
2662 void LCodeGen::DoHasCachedArrayIndexAndBranch( | |
2663 LHasCachedArrayIndexAndBranch* instr) { | |
2664 Register input = ToRegister(instr->value()); | |
2665 Register scratch = scratch0(); | |
2666 | |
2667 __ lwz(scratch, FieldMemOperand(input, String::kHashFieldOffset)); | |
2668 __ mov(r0, Operand(String::kContainsCachedArrayIndexMask)); | |
2669 __ and_(r0, scratch, r0, SetRC); | |
2670 EmitBranch(instr, eq, cr0); | |
2671 } | |
2672 | |
2673 | |
2674 // Branches to a label or falls through with the answer in flags. Trashes | |
2675 // the temp registers, but not the input. | |
2676 void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false, | |
2677 Handle<String> class_name, Register input, | |
2678 Register temp, Register temp2) { | |
2679 DCHECK(!input.is(temp)); | |
2680 DCHECK(!input.is(temp2)); | |
2681 DCHECK(!temp.is(temp2)); | |
2682 | |
2683 __ JumpIfSmi(input, is_false); | |
2684 | |
2685 if (String::Equals(isolate()->factory()->Function_string(), class_name)) { | |
2686 // Assuming the following assertions, we can use the same compares to test | |
2687 // for both being a function type and being in the object type range. | |
2688 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); | |
2689 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == | |
2690 FIRST_SPEC_OBJECT_TYPE + 1); | |
2691 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == | |
2692 LAST_SPEC_OBJECT_TYPE - 1); | |
2693 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); | |
2694 __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE); | |
2695 __ blt(is_false); | |
2696 __ beq(is_true); | |
2697 __ cmpi(temp2, Operand(LAST_SPEC_OBJECT_TYPE)); | |
2698 __ beq(is_true); | |
2699 } else { | |
2700 // Faster code path to avoid two compares: subtract lower bound from the | |
2701 // actual type and do a signed compare with the width of the type range. | |
2702 __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset)); | |
2703 __ lbz(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset)); | |
2704 __ subi(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); | |
2705 __ cmpi(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - | |
2706 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); | |
2707 __ bgt(is_false); | |
2708 } | |
2709 | |
2710 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. | |
2711 // Check if the constructor in the map is a function. | |
2712 Register instance_type = ip; | |
2713 __ GetMapConstructor(temp, temp, temp2, instance_type); | |
2714 | |
2715 // Objects with a non-function constructor have class 'Object'. | |
2716 __ cmpi(instance_type, Operand(JS_FUNCTION_TYPE)); | |
2717 if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) { | |
2718 __ bne(is_true); | |
2719 } else { | |
2720 __ bne(is_false); | |
2721 } | |
2722 | |
2723 // temp now contains the constructor function. Grab the | |
2724 // instance class name from there. | |
2725 __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset)); | |
2726 __ LoadP(temp, | |
2727 FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset)); | |
2728 // The class name we are testing against is internalized since it's a literal. | |
2729 // The name in the constructor is internalized because of the way the context | |
2730 // is booted. This routine isn't expected to work for random API-created | |
2731 // classes and it doesn't have to because you can't access it with natives | |
2732 // syntax. Since both sides are internalized it is sufficient to use an | |
2733 // identity comparison. | |
2734 __ Cmpi(temp, Operand(class_name), r0); | |
2735 // End with the answer in flags. | |
2736 } | |
2737 | |
2738 | |
2739 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { | |
2740 Register input = ToRegister(instr->value()); | |
2741 Register temp = scratch0(); | |
2742 Register temp2 = ToRegister(instr->temp()); | |
2743 Handle<String> class_name = instr->hydrogen()->class_name(); | |
2744 | |
2745 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), | |
2746 class_name, input, temp, temp2); | |
2747 | |
2748 EmitBranch(instr, eq); | |
2749 } | |
2750 | |
2751 | |
2752 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { | |
2753 Register reg = ToRegister(instr->value()); | |
2754 Register temp = ToRegister(instr->temp()); | |
2755 | |
2756 __ LoadP(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); | |
2757 __ Cmpi(temp, Operand(instr->map()), r0); | |
2758 EmitBranch(instr, eq); | |
2759 } | |
2760 | |
2761 | |
2762 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { | |
2763 DCHECK(ToRegister(instr->context()).is(cp)); | |
2764 DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister())); | |
2765 DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister())); | |
2766 DCHECK(ToRegister(instr->result()).is(r3)); | |
2767 InstanceOfStub stub(isolate()); | |
2768 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | |
2769 } | |
2770 | |
2771 | |
2772 void LCodeGen::DoHasInPrototypeChainAndBranch( | |
2773 LHasInPrototypeChainAndBranch* instr) { | |
2774 Register const object = ToRegister(instr->object()); | |
2775 Register const object_map = scratch0(); | |
2776 Register const object_prototype = object_map; | |
2777 Register const prototype = ToRegister(instr->prototype()); | |
2778 | |
2779 // The {object} must be a spec object. It's sufficient to know that {object} | |
2780 // is not a smi, since all other non-spec objects have {null} prototypes and | |
2781 // will be ruled out below. | |
2782 if (instr->hydrogen()->ObjectNeedsSmiCheck()) { | |
2783 __ TestIfSmi(object, r0); | |
2784 EmitFalseBranch(instr, eq, cr0); | |
2785 } | |
2786 | |
2787 // Loop through the {object}s prototype chain looking for the {prototype}. | |
2788 __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); | |
2789 Label loop; | |
2790 __ bind(&loop); | |
2791 __ LoadP(object_prototype, | |
2792 FieldMemOperand(object_map, Map::kPrototypeOffset)); | |
2793 __ cmp(object_prototype, prototype); | |
2794 EmitTrueBranch(instr, eq); | |
2795 __ CompareRoot(object_prototype, Heap::kNullValueRootIndex); | |
2796 EmitFalseBranch(instr, eq); | |
2797 __ LoadP(object_map, | |
2798 FieldMemOperand(object_prototype, HeapObject::kMapOffset)); | |
2799 __ b(&loop); | |
2800 } | |
2801 | |
2802 | |
2803 void LCodeGen::DoCmpT(LCmpT* instr) { | |
2804 DCHECK(ToRegister(instr->context()).is(cp)); | |
2805 Token::Value op = instr->op(); | |
2806 | |
2807 Handle<Code> ic = | |
2808 CodeFactory::CompareIC(isolate(), op, instr->strength()).code(); | |
2809 CallCode(ic, RelocInfo::CODE_TARGET, instr); | |
2810 // This instruction also signals no smi code inlined | |
2811 __ cmpi(r3, Operand::Zero()); | |
2812 | |
2813 Condition condition = ComputeCompareCondition(op); | |
2814 if (CpuFeatures::IsSupported(ISELECT)) { | |
2815 __ LoadRoot(r4, Heap::kTrueValueRootIndex); | |
2816 __ LoadRoot(r5, Heap::kFalseValueRootIndex); | |
2817 __ isel(condition, ToRegister(instr->result()), r4, r5); | |
2818 } else { | |
2819 Label true_value, done; | |
2820 | |
2821 __ b(condition, &true_value); | |
2822 | |
2823 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); | |
2824 __ b(&done); | |
2825 | |
2826 __ bind(&true_value); | |
2827 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); | |
2828 | |
2829 __ bind(&done); | |
2830 } | |
2831 } | |
2832 | |
2833 | |
2834 void LCodeGen::DoReturn(LReturn* instr) { | |
2835 if (FLAG_trace && info()->IsOptimizing()) { | |
2836 // Push the return value on the stack as the parameter. | |
2837 // Runtime::TraceExit returns its parameter in r3. We're leaving the code | |
2838 // managed by the register allocator and tearing down the frame, it's | |
2839 // safe to write to the context register. | |
2840 __ push(r3); | |
2841 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | |
2842 __ CallRuntime(Runtime::kTraceExit, 1); | |
2843 } | |
2844 if (info()->saves_caller_doubles()) { | |
2845 RestoreCallerDoubles(); | |
2846 } | |
2847 if (instr->has_constant_parameter_count()) { | |
2848 int parameter_count = ToInteger32(instr->constant_parameter_count()); | |
2849 int32_t sp_delta = (parameter_count + 1) * kPointerSize; | |
2850 if (NeedsEagerFrame()) { | |
2851 masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta); | |
2852 } else if (sp_delta != 0) { | |
2853 __ addi(sp, sp, Operand(sp_delta)); | |
2854 } | |
2855 } else { | |
2856 DCHECK(info()->IsStub()); // Functions would need to drop one more value. | |
2857 Register reg = ToRegister(instr->parameter_count()); | |
2858 // The argument count parameter is a smi | |
2859 if (NeedsEagerFrame()) { | |
2860 masm_->LeaveFrame(StackFrame::JAVA_SCRIPT); | |
2861 } | |
2862 __ SmiToPtrArrayOffset(r0, reg); | |
2863 __ add(sp, sp, r0); | |
2864 } | |
2865 | |
2866 __ blr(); | |
2867 } | |
2868 | |
2869 | |
2870 template <class T> | |
2871 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { | |
2872 Register vector_register = ToRegister(instr->temp_vector()); | |
2873 Register slot_register = LoadDescriptor::SlotRegister(); | |
2874 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister())); | |
2875 DCHECK(slot_register.is(r3)); | |
2876 | |
2877 AllowDeferredHandleDereference vector_structure_check; | |
2878 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector(); | |
2879 __ Move(vector_register, vector); | |
2880 // No need to allocate this register. | |
2881 FeedbackVectorSlot slot = instr->hydrogen()->slot(); | |
2882 int index = vector->GetIndex(slot); | |
2883 __ LoadSmiLiteral(slot_register, Smi::FromInt(index)); | |
2884 } | |
2885 | |
2886 | |
2887 template <class T> | |
2888 void LCodeGen::EmitVectorStoreICRegisters(T* instr) { | |
2889 Register vector_register = ToRegister(instr->temp_vector()); | |
2890 Register slot_register = ToRegister(instr->temp_slot()); | |
2891 | |
2892 AllowDeferredHandleDereference vector_structure_check; | |
2893 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector(); | |
2894 __ Move(vector_register, vector); | |
2895 FeedbackVectorSlot slot = instr->hydrogen()->slot(); | |
2896 int index = vector->GetIndex(slot); | |
2897 __ LoadSmiLiteral(slot_register, Smi::FromInt(index)); | |
2898 } | |
2899 | |
2900 | |
2901 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { | |
2902 DCHECK(ToRegister(instr->context()).is(cp)); | |
2903 DCHECK(ToRegister(instr->global_object()) | |
2904 .is(LoadDescriptor::ReceiverRegister())); | |
2905 DCHECK(ToRegister(instr->result()).is(r3)); | |
2906 | |
2907 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name())); | |
2908 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr); | |
2909 Handle<Code> ic = | |
2910 CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(), | |
2911 SLOPPY, PREMONOMORPHIC).code(); | |
2912 CallCode(ic, RelocInfo::CODE_TARGET, instr); | |
2913 } | |
2914 | |
2915 | |
2916 void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) { | |
2917 DCHECK(ToRegister(instr->context()).is(cp)); | |
2918 DCHECK(ToRegister(instr->result()).is(r3)); | |
2919 | |
2920 int const slot = instr->slot_index(); | |
2921 int const depth = instr->depth(); | |
2922 if (depth <= LoadGlobalViaContextStub::kMaximumDepth) { | |
2923 __ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot)); | |
2924 Handle<Code> stub = | |
2925 CodeFactory::LoadGlobalViaContext(isolate(), depth).code(); | |
2926 CallCode(stub, RelocInfo::CODE_TARGET, instr); | |
2927 } else { | |
2928 __ Push(Smi::FromInt(slot)); | |
2929 __ CallRuntime(Runtime::kLoadGlobalViaContext, 1); | |
2930 } | |
2931 } | |
2932 | |
2933 | |
2934 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { | |
2935 Register context = ToRegister(instr->context()); | |
2936 Register result = ToRegister(instr->result()); | |
2937 __ LoadP(result, ContextOperand(context, instr->slot_index())); | |
2938 if (instr->hydrogen()->RequiresHoleCheck()) { | |
2939 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | |
2940 if (instr->hydrogen()->DeoptimizesOnHole()) { | |
2941 __ cmp(result, ip); | |
2942 DeoptimizeIf(eq, instr, Deoptimizer::kHole); | |
2943 } else { | |
2944 if (CpuFeatures::IsSupported(ISELECT)) { | |
2945 Register scratch = scratch0(); | |
2946 __ mov(scratch, Operand(factory()->undefined_value())); | |
2947 __ cmp(result, ip); | |
2948 __ isel(eq, result, scratch, result); | |
2949 } else { | |
2950 Label skip; | |
2951 __ cmp(result, ip); | |
2952 __ bne(&skip); | |
2953 __ mov(result, Operand(factory()->undefined_value())); | |
2954 __ bind(&skip); | |
2955 } | |
2956 } | |
2957 } | |
2958 } | |
2959 | |
2960 | |
2961 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { | |
2962 Register context = ToRegister(instr->context()); | |
2963 Register value = ToRegister(instr->value()); | |
2964 Register scratch = scratch0(); | |
2965 MemOperand target = ContextOperand(context, instr->slot_index()); | |
2966 | |
2967 Label skip_assignment; | |
2968 | |
2969 if (instr->hydrogen()->RequiresHoleCheck()) { | |
2970 __ LoadP(scratch, target); | |
2971 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | |
2972 __ cmp(scratch, ip); | |
2973 if (instr->hydrogen()->DeoptimizesOnHole()) { | |
2974 DeoptimizeIf(eq, instr, Deoptimizer::kHole); | |
2975 } else { | |
2976 __ bne(&skip_assignment); | |
2977 } | |
2978 } | |
2979 | |
2980 __ StoreP(value, target, r0); | |
2981 if (instr->hydrogen()->NeedsWriteBarrier()) { | |
2982 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject() | |
2983 ? OMIT_SMI_CHECK | |
2984 : INLINE_SMI_CHECK; | |
2985 __ RecordWriteContextSlot(context, target.offset(), value, scratch, | |
2986 GetLinkRegisterState(), kSaveFPRegs, | |
2987 EMIT_REMEMBERED_SET, check_needed); | |
2988 } | |
2989 | |
2990 __ bind(&skip_assignment); | |
2991 } | |
2992 | |
2993 | |
2994 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { | |
2995 HObjectAccess access = instr->hydrogen()->access(); | |
2996 int offset = access.offset(); | |
2997 Register object = ToRegister(instr->object()); | |
2998 | |
2999 if (access.IsExternalMemory()) { | |
3000 Register result = ToRegister(instr->result()); | |
3001 MemOperand operand = MemOperand(object, offset); | |
3002 __ LoadRepresentation(result, operand, access.representation(), r0); | |
3003 return; | |
3004 } | |
3005 | |
3006 if (instr->hydrogen()->representation().IsDouble()) { | |
3007 DCHECK(access.IsInobject()); | |
3008 DoubleRegister result = ToDoubleRegister(instr->result()); | |
3009 __ lfd(result, FieldMemOperand(object, offset)); | |
3010 return; | |
3011 } | |
3012 | |
3013 Register result = ToRegister(instr->result()); | |
3014 if (!access.IsInobject()) { | |
3015 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); | |
3016 object = result; | |
3017 } | |
3018 | |
3019 Representation representation = access.representation(); | |
3020 | |
3021 #if V8_TARGET_ARCH_PPC64 | |
3022 // 64-bit Smi optimization | |
3023 if (representation.IsSmi() && | |
3024 instr->hydrogen()->representation().IsInteger32()) { | |
3025 // Read int value directly from upper half of the smi. | |
3026 offset = SmiWordOffset(offset); | |
3027 representation = Representation::Integer32(); | |
3028 } | |
3029 #endif | |
3030 | |
3031 __ LoadRepresentation(result, FieldMemOperand(object, offset), representation, | |
3032 r0); | |
3033 } | |
3034 | |
3035 | |
3036 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { | |
3037 DCHECK(ToRegister(instr->context()).is(cp)); | |
3038 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); | |
3039 DCHECK(ToRegister(instr->result()).is(r3)); | |
3040 | |
3041 // Name is always in r5. | |
3042 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name())); | |
3043 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr); | |
3044 Handle<Code> ic = | |
3045 CodeFactory::LoadICInOptimizedCode( | |
3046 isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(), | |
3047 instr->hydrogen()->initialization_state()).code(); | |
3048 CallCode(ic, RelocInfo::CODE_TARGET, instr); | |
3049 } | |
3050 | |
3051 | |
3052 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { | |
3053 Register scratch = scratch0(); | |
3054 Register function = ToRegister(instr->function()); | |
3055 Register result = ToRegister(instr->result()); | |
3056 | |
3057 // Get the prototype or initial map from the function. | |
3058 __ LoadP(result, | |
3059 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | |
3060 | |
3061 // Check that the function has a prototype or an initial map. | |
3062 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | |
3063 __ cmp(result, ip); | |
3064 DeoptimizeIf(eq, instr, Deoptimizer::kHole); | |
3065 | |
3066 // If the function does not have an initial map, we're done. | |
3067 if (CpuFeatures::IsSupported(ISELECT)) { | |
3068 // Get the prototype from the initial map (optimistic). | |
3069 __ LoadP(ip, FieldMemOperand(result, Map::kPrototypeOffset)); | |
3070 __ CompareObjectType(result, scratch, scratch, MAP_TYPE); | |
3071 __ isel(eq, result, ip, result); | |
3072 } else { | |
3073 Label done; | |
3074 __ CompareObjectType(result, scratch, scratch, MAP_TYPE); | |
3075 __ bne(&done); | |
3076 | |
3077 // Get the prototype from the initial map. | |
3078 __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset)); | |
3079 | |
3080 // All done. | |
3081 __ bind(&done); | |
3082 } | |
3083 } | |
3084 | |
3085 | |
3086 void LCodeGen::DoLoadRoot(LLoadRoot* instr) { | |
3087 Register result = ToRegister(instr->result()); | |
3088 __ LoadRoot(result, instr->index()); | |
3089 } | |
3090 | |
3091 | |
3092 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { | |
3093 Register arguments = ToRegister(instr->arguments()); | |
3094 Register result = ToRegister(instr->result()); | |
3095 // There are two words between the frame pointer and the last argument. | |
3096 // Subtracting from length accounts for one of them add one more. | |
3097 if (instr->length()->IsConstantOperand()) { | |
3098 int const_length = ToInteger32(LConstantOperand::cast(instr->length())); | |
3099 if (instr->index()->IsConstantOperand()) { | |
3100 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); | |
3101 int index = (const_length - const_index) + 1; | |
3102 __ LoadP(result, MemOperand(arguments, index * kPointerSize), r0); | |
3103 } else { | |
3104 Register index = ToRegister(instr->index()); | |
3105 __ subfic(result, index, Operand(const_length + 1)); | |
3106 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2)); | |
3107 __ LoadPX(result, MemOperand(arguments, result)); | |
3108 } | |
3109 } else if (instr->index()->IsConstantOperand()) { | |
3110 Register length = ToRegister(instr->length()); | |
3111 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); | |
3112 int loc = const_index - 1; | |
3113 if (loc != 0) { | |
3114 __ subi(result, length, Operand(loc)); | |
3115 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2)); | |
3116 __ LoadPX(result, MemOperand(arguments, result)); | |
3117 } else { | |
3118 __ ShiftLeftImm(result, length, Operand(kPointerSizeLog2)); | |
3119 __ LoadPX(result, MemOperand(arguments, result)); | |
3120 } | |
3121 } else { | |
3122 Register length = ToRegister(instr->length()); | |
3123 Register index = ToRegister(instr->index()); | |
3124 __ sub(result, length, index); | |
3125 __ addi(result, result, Operand(1)); | |
3126 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2)); | |
3127 __ LoadPX(result, MemOperand(arguments, result)); | |
3128 } | |
3129 } | |
3130 | |
3131 | |
3132 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { | |
3133 Register external_pointer = ToRegister(instr->elements()); | |
3134 Register key = no_reg; | |
3135 ElementsKind elements_kind = instr->elements_kind(); | |
3136 bool key_is_constant = instr->key()->IsConstantOperand(); | |
3137 int constant_key = 0; | |
3138 if (key_is_constant) { | |
3139 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | |
3140 if (constant_key & 0xF0000000) { | |
3141 Abort(kArrayIndexConstantValueTooBig); | |
3142 } | |
3143 } else { | |
3144 key = ToRegister(instr->key()); | |
3145 } | |
3146 int element_size_shift = ElementsKindToShiftSize(elements_kind); | |
3147 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); | |
3148 int base_offset = instr->base_offset(); | |
3149 | |
3150 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { | |
3151 DoubleRegister result = ToDoubleRegister(instr->result()); | |
3152 if (key_is_constant) { | |
3153 __ Add(scratch0(), external_pointer, constant_key << element_size_shift, | |
3154 r0); | |
3155 } else { | |
3156 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi); | |
3157 __ add(scratch0(), external_pointer, r0); | |
3158 } | |
3159 if (elements_kind == FLOAT32_ELEMENTS) { | |
3160 __ lfs(result, MemOperand(scratch0(), base_offset)); | |
3161 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS | |
3162 __ lfd(result, MemOperand(scratch0(), base_offset)); | |
3163 } | |
3164 } else { | |
3165 Register result = ToRegister(instr->result()); | |
3166 MemOperand mem_operand = | |
3167 PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi, | |
3168 constant_key, element_size_shift, base_offset); | |
3169 switch (elements_kind) { | |
3170 case INT8_ELEMENTS: | |
3171 if (key_is_constant) { | |
3172 __ LoadByte(result, mem_operand, r0); | |
3173 } else { | |
3174 __ lbzx(result, mem_operand); | |
3175 } | |
3176 __ extsb(result, result); | |
3177 break; | |
3178 case UINT8_ELEMENTS: | |
3179 case UINT8_CLAMPED_ELEMENTS: | |
3180 if (key_is_constant) { | |
3181 __ LoadByte(result, mem_operand, r0); | |
3182 } else { | |
3183 __ lbzx(result, mem_operand); | |
3184 } | |
3185 break; | |
3186 case INT16_ELEMENTS: | |
3187 if (key_is_constant) { | |
3188 __ LoadHalfWordArith(result, mem_operand, r0); | |
3189 } else { | |
3190 __ lhax(result, mem_operand); | |
3191 } | |
3192 break; | |
3193 case UINT16_ELEMENTS: | |
3194 if (key_is_constant) { | |
3195 __ LoadHalfWord(result, mem_operand, r0); | |
3196 } else { | |
3197 __ lhzx(result, mem_operand); | |
3198 } | |
3199 break; | |
3200 case INT32_ELEMENTS: | |
3201 if (key_is_constant) { | |
3202 __ LoadWordArith(result, mem_operand, r0); | |
3203 } else { | |
3204 __ lwax(result, mem_operand); | |
3205 } | |
3206 break; | |
3207 case UINT32_ELEMENTS: | |
3208 if (key_is_constant) { | |
3209 __ LoadWord(result, mem_operand, r0); | |
3210 } else { | |
3211 __ lwzx(result, mem_operand); | |
3212 } | |
3213 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { | |
3214 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); | |
3215 __ cmplw(result, r0); | |
3216 DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue); | |
3217 } | |
3218 break; | |
3219 case FLOAT32_ELEMENTS: | |
3220 case FLOAT64_ELEMENTS: | |
3221 case FAST_HOLEY_DOUBLE_ELEMENTS: | |
3222 case FAST_HOLEY_ELEMENTS: | |
3223 case FAST_HOLEY_SMI_ELEMENTS: | |
3224 case FAST_DOUBLE_ELEMENTS: | |
3225 case FAST_ELEMENTS: | |
3226 case FAST_SMI_ELEMENTS: | |
3227 case DICTIONARY_ELEMENTS: | |
3228 case FAST_SLOPPY_ARGUMENTS_ELEMENTS: | |
3229 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: | |
3230 UNREACHABLE(); | |
3231 break; | |
3232 } | |
3233 } | |
3234 } | |
3235 | |
3236 | |
3237 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { | |
3238 Register elements = ToRegister(instr->elements()); | |
3239 bool key_is_constant = instr->key()->IsConstantOperand(); | |
3240 Register key = no_reg; | |
3241 DoubleRegister result = ToDoubleRegister(instr->result()); | |
3242 Register scratch = scratch0(); | |
3243 | |
3244 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); | |
3245 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); | |
3246 int constant_key = 0; | |
3247 if (key_is_constant) { | |
3248 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | |
3249 if (constant_key & 0xF0000000) { | |
3250 Abort(kArrayIndexConstantValueTooBig); | |
3251 } | |
3252 } else { | |
3253 key = ToRegister(instr->key()); | |
3254 } | |
3255 | |
3256 int base_offset = instr->base_offset() + constant_key * kDoubleSize; | |
3257 if (!key_is_constant) { | |
3258 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi); | |
3259 __ add(scratch, elements, r0); | |
3260 elements = scratch; | |
3261 } | |
3262 if (!is_int16(base_offset)) { | |
3263 __ Add(scratch, elements, base_offset, r0); | |
3264 base_offset = 0; | |
3265 elements = scratch; | |
3266 } | |
3267 __ lfd(result, MemOperand(elements, base_offset)); | |
3268 | |
3269 if (instr->hydrogen()->RequiresHoleCheck()) { | |
3270 if (is_int16(base_offset + Register::kExponentOffset)) { | |
3271 __ lwz(scratch, | |
3272 MemOperand(elements, base_offset + Register::kExponentOffset)); | |
3273 } else { | |
3274 __ addi(scratch, elements, Operand(base_offset)); | |
3275 __ lwz(scratch, MemOperand(scratch, Register::kExponentOffset)); | |
3276 } | |
3277 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0); | |
3278 DeoptimizeIf(eq, instr, Deoptimizer::kHole); | |
3279 } | |
3280 } | |
3281 | |
3282 | |
3283 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | |
3284 HLoadKeyed* hinstr = instr->hydrogen(); | |
3285 Register elements = ToRegister(instr->elements()); | |
3286 Register result = ToRegister(instr->result()); | |
3287 Register scratch = scratch0(); | |
3288 Register store_base = scratch; | |
3289 int offset = instr->base_offset(); | |
3290 | |
3291 if (instr->key()->IsConstantOperand()) { | |
3292 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); | |
3293 offset += ToInteger32(const_operand) * kPointerSize; | |
3294 store_base = elements; | |
3295 } else { | |
3296 Register key = ToRegister(instr->key()); | |
3297 // Even though the HLoadKeyed instruction forces the input | |
3298 // representation for the key to be an integer, the input gets replaced | |
3299 // during bound check elimination with the index argument to the bounds | |
3300 // check, which can be tagged, so that case must be handled here, too. | |
3301 if (hinstr->key()->representation().IsSmi()) { | |
3302 __ SmiToPtrArrayOffset(r0, key); | |
3303 } else { | |
3304 __ ShiftLeftImm(r0, key, Operand(kPointerSizeLog2)); | |
3305 } | |
3306 __ add(scratch, elements, r0); | |
3307 } | |
3308 | |
3309 bool requires_hole_check = hinstr->RequiresHoleCheck(); | |
3310 Representation representation = hinstr->representation(); | |
3311 | |
3312 #if V8_TARGET_ARCH_PPC64 | |
3313 // 64-bit Smi optimization | |
3314 if (representation.IsInteger32() && | |
3315 hinstr->elements_kind() == FAST_SMI_ELEMENTS) { | |
3316 DCHECK(!requires_hole_check); | |
3317 // Read int value directly from upper half of the smi. | |
3318 offset = SmiWordOffset(offset); | |
3319 } | |
3320 #endif | |
3321 | |
3322 __ LoadRepresentation(result, MemOperand(store_base, offset), representation, | |
3323 r0); | |
3324 | |
3325 // Check for the hole value. | |
3326 if (requires_hole_check) { | |
3327 if (IsFastSmiElementsKind(hinstr->elements_kind())) { | |
3328 __ TestIfSmi(result, r0); | |
3329 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0); | |
3330 } else { | |
3331 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); | |
3332 __ cmp(result, scratch); | |
3333 DeoptimizeIf(eq, instr, Deoptimizer::kHole); | |
3334 } | |
3335 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { | |
3336 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS); | |
3337 Label done; | |
3338 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); | |
3339 __ cmp(result, scratch); | |
3340 __ bne(&done); | |
3341 if (info()->IsStub()) { | |
3342 // A stub can safely convert the hole to undefined only if the array | |
3343 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise | |
3344 // it needs to bail out. | |
3345 __ LoadRoot(result, Heap::kArrayProtectorRootIndex); | |
3346 __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset)); | |
3347 __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0); | |
3348 DeoptimizeIf(ne, instr, Deoptimizer::kHole); | |
3349 } | |
3350 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); | |
3351 __ bind(&done); | |
3352 } | |
3353 } | |
3354 | |
3355 | |
3356 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { | |
3357 if (instr->is_fixed_typed_array()) { | |
3358 DoLoadKeyedExternalArray(instr); | |
3359 } else if (instr->hydrogen()->representation().IsDouble()) { | |
3360 DoLoadKeyedFixedDoubleArray(instr); | |
3361 } else { | |
3362 DoLoadKeyedFixedArray(instr); | |
3363 } | |
3364 } | |
3365 | |
3366 | |
3367 MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base, | |
3368 bool key_is_constant, bool key_is_smi, | |
3369 int constant_key, | |
3370 int element_size_shift, | |
3371 int base_offset) { | |
3372 Register scratch = scratch0(); | |
3373 | |
3374 if (key_is_constant) { | |
3375 return MemOperand(base, (constant_key << element_size_shift) + base_offset); | |
3376 } | |
3377 | |
3378 bool needs_shift = | |
3379 (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0)); | |
3380 | |
3381 if (!(base_offset || needs_shift)) { | |
3382 return MemOperand(base, key); | |
3383 } | |
3384 | |
3385 if (needs_shift) { | |
3386 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi); | |
3387 key = scratch; | |
3388 } | |
3389 | |
3390 if (base_offset) { | |
3391 __ Add(scratch, key, base_offset, r0); | |
3392 } | |
3393 | |
3394 return MemOperand(base, scratch); | |
3395 } | |
3396 | |
3397 | |
3398 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { | |
3399 DCHECK(ToRegister(instr->context()).is(cp)); | |
3400 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); | |
3401 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister())); | |
3402 | |
3403 if (instr->hydrogen()->HasVectorAndSlot()) { | |
3404 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr); | |
3405 } | |
3406 | |
3407 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode( | |
3408 isolate(), instr->hydrogen()->language_mode(), | |
3409 instr->hydrogen()->initialization_state()).code(); | |
3410 CallCode(ic, RelocInfo::CODE_TARGET, instr); | |
3411 } | |
3412 | |
3413 | |
3414 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { | |
3415 Register scratch = scratch0(); | |
3416 Register result = ToRegister(instr->result()); | |
3417 | |
3418 if (instr->hydrogen()->from_inlined()) { | |
3419 __ subi(result, sp, Operand(2 * kPointerSize)); | |
3420 } else { | |
3421 // Check if the calling frame is an arguments adaptor frame. | |
3422 __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
3423 __ LoadP(result, | |
3424 MemOperand(scratch, StandardFrameConstants::kContextOffset)); | |
3425 __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); | |
3426 | |
3427 // Result is the frame pointer for the frame if not adapted and for the real | |
3428 // frame below the adaptor frame if adapted. | |
3429 if (CpuFeatures::IsSupported(ISELECT)) { | |
3430 __ isel(eq, result, scratch, fp); | |
3431 } else { | |
3432 Label done, adapted; | |
3433 __ beq(&adapted); | |
3434 __ mr(result, fp); | |
3435 __ b(&done); | |
3436 | |
3437 __ bind(&adapted); | |
3438 __ mr(result, scratch); | |
3439 __ bind(&done); | |
3440 } | |
3441 } | |
3442 } | |
3443 | |
3444 | |
3445 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { | |
3446 Register elem = ToRegister(instr->elements()); | |
3447 Register result = ToRegister(instr->result()); | |
3448 | |
3449 Label done; | |
3450 | |
3451 // If no arguments adaptor frame the number of arguments is fixed. | |
3452 __ cmp(fp, elem); | |
3453 __ mov(result, Operand(scope()->num_parameters())); | |
3454 __ beq(&done); | |
3455 | |
3456 // Arguments adaptor frame present. Get argument length from there. | |
3457 __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
3458 __ LoadP(result, | |
3459 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
3460 __ SmiUntag(result); | |
3461 | |
3462 // Argument length is in result register. | |
3463 __ bind(&done); | |
3464 } | |
3465 | |
3466 | |
3467 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { | |
3468 Register receiver = ToRegister(instr->receiver()); | |
3469 Register function = ToRegister(instr->function()); | |
3470 Register result = ToRegister(instr->result()); | |
3471 Register scratch = scratch0(); | |
3472 | |
3473 // If the receiver is null or undefined, we have to pass the global | |
3474 // object as a receiver to normal functions. Values have to be | |
3475 // passed unchanged to builtins and strict-mode functions. | |
3476 Label global_object, result_in_receiver; | |
3477 | |
3478 if (!instr->hydrogen()->known_function()) { | |
3479 // Do not transform the receiver to object for strict mode | |
3480 // functions. | |
3481 __ LoadP(scratch, | |
3482 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); | |
3483 __ lwz(scratch, | |
3484 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); | |
3485 __ TestBit(scratch, | |
3486 #if V8_TARGET_ARCH_PPC64 | |
3487 SharedFunctionInfo::kStrictModeFunction, | |
3488 #else | |
3489 SharedFunctionInfo::kStrictModeFunction + kSmiTagSize, | |
3490 #endif | |
3491 r0); | |
3492 __ bne(&result_in_receiver, cr0); | |
3493 | |
3494 // Do not transform the receiver to object for builtins. | |
3495 __ TestBit(scratch, | |
3496 #if V8_TARGET_ARCH_PPC64 | |
3497 SharedFunctionInfo::kNative, | |
3498 #else | |
3499 SharedFunctionInfo::kNative + kSmiTagSize, | |
3500 #endif | |
3501 r0); | |
3502 __ bne(&result_in_receiver, cr0); | |
3503 } | |
3504 | |
3505 // Normal function. Replace undefined or null with global receiver. | |
3506 __ LoadRoot(scratch, Heap::kNullValueRootIndex); | |
3507 __ cmp(receiver, scratch); | |
3508 __ beq(&global_object); | |
3509 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); | |
3510 __ cmp(receiver, scratch); | |
3511 __ beq(&global_object); | |
3512 | |
3513 // Deoptimize if the receiver is not a JS object. | |
3514 __ TestIfSmi(receiver, r0); | |
3515 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); | |
3516 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE); | |
3517 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject); | |
3518 | |
3519 __ b(&result_in_receiver); | |
3520 __ bind(&global_object); | |
3521 __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset)); | |
3522 __ LoadP(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); | |
3523 __ LoadP(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); | |
3524 if (result.is(receiver)) { | |
3525 __ bind(&result_in_receiver); | |
3526 } else { | |
3527 Label result_ok; | |
3528 __ b(&result_ok); | |
3529 __ bind(&result_in_receiver); | |
3530 __ mr(result, receiver); | |
3531 __ bind(&result_ok); | |
3532 } | |
3533 } | |
3534 | |
3535 | |
3536 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { | |
3537 Register receiver = ToRegister(instr->receiver()); | |
3538 Register function = ToRegister(instr->function()); | |
3539 Register length = ToRegister(instr->length()); | |
3540 Register elements = ToRegister(instr->elements()); | |
3541 Register scratch = scratch0(); | |
3542 DCHECK(receiver.is(r3)); // Used for parameter count. | |
3543 DCHECK(function.is(r4)); // Required by InvokeFunction. | |
3544 DCHECK(ToRegister(instr->result()).is(r3)); | |
3545 | |
3546 // Copy the arguments to this function possibly from the | |
3547 // adaptor frame below it. | |
3548 const uint32_t kArgumentsLimit = 1 * KB; | |
3549 __ cmpli(length, Operand(kArgumentsLimit)); | |
3550 DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments); | |
3551 | |
3552 // Push the receiver and use the register to keep the original | |
3553 // number of arguments. | |
3554 __ push(receiver); | |
3555 __ mr(receiver, length); | |
3556 // The arguments are at a one pointer size offset from elements. | |
3557 __ addi(elements, elements, Operand(1 * kPointerSize)); | |
3558 | |
3559 // Loop through the arguments pushing them onto the execution | |
3560 // stack. | |
3561 Label invoke, loop; | |
3562 // length is a small non-negative integer, due to the test above. | |
3563 __ cmpi(length, Operand::Zero()); | |
3564 __ beq(&invoke); | |
3565 __ mtctr(length); | |
3566 __ bind(&loop); | |
3567 __ ShiftLeftImm(r0, length, Operand(kPointerSizeLog2)); | |
3568 __ LoadPX(scratch, MemOperand(elements, r0)); | |
3569 __ push(scratch); | |
3570 __ addi(length, length, Operand(-1)); | |
3571 __ bdnz(&loop); | |
3572 | |
3573 __ bind(&invoke); | |
3574 DCHECK(instr->HasPointerMap()); | |
3575 LPointerMap* pointers = instr->pointer_map(); | |
3576 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); | |
3577 // The number of arguments is stored in receiver which is r3, as expected | |
3578 // by InvokeFunction. | |
3579 ParameterCount actual(receiver); | |
3580 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator); | |
3581 } | |
3582 | |
3583 | |
3584 void LCodeGen::DoPushArgument(LPushArgument* instr) { | |
3585 LOperand* argument = instr->value(); | |
3586 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { | |
3587 Abort(kDoPushArgumentNotImplementedForDoubleType); | |
3588 } else { | |
3589 Register argument_reg = EmitLoadRegister(argument, ip); | |
3590 __ push(argument_reg); | |
3591 } | |
3592 } | |
3593 | |
3594 | |
3595 void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); } | |
3596 | |
3597 | |
3598 void LCodeGen::DoThisFunction(LThisFunction* instr) { | |
3599 Register result = ToRegister(instr->result()); | |
3600 __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | |
3601 } | |
3602 | |
3603 | |
3604 void LCodeGen::DoContext(LContext* instr) { | |
3605 // If there is a non-return use, the context must be moved to a register. | |
3606 Register result = ToRegister(instr->result()); | |
3607 if (info()->IsOptimizing()) { | |
3608 __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); | |
3609 } else { | |
3610 // If there is no frame, the context must be in cp. | |
3611 DCHECK(result.is(cp)); | |
3612 } | |
3613 } | |
3614 | |
3615 | |
3616 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { | |
3617 DCHECK(ToRegister(instr->context()).is(cp)); | |
3618 __ Move(scratch0(), instr->hydrogen()->pairs()); | |
3619 __ push(scratch0()); | |
3620 __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags())); | |
3621 __ push(scratch0()); | |
3622 CallRuntime(Runtime::kDeclareGlobals, 2, instr); | |
3623 } | |
3624 | |
3625 | |
3626 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, | |
3627 int formal_parameter_count, int arity, | |
3628 LInstruction* instr) { | |
3629 bool dont_adapt_arguments = | |
3630 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; | |
3631 bool can_invoke_directly = | |
3632 dont_adapt_arguments || formal_parameter_count == arity; | |
3633 | |
3634 Register function_reg = r4; | |
3635 | |
3636 LPointerMap* pointers = instr->pointer_map(); | |
3637 | |
3638 if (can_invoke_directly) { | |
3639 // Change context. | |
3640 __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset)); | |
3641 | |
3642 // Always initialize r3 to the number of actual arguments. | |
3643 __ mov(r3, Operand(arity)); | |
3644 | |
3645 bool is_self_call = function.is_identical_to(info()->closure()); | |
3646 | |
3647 // Invoke function. | |
3648 if (is_self_call) { | |
3649 __ CallSelf(); | |
3650 } else { | |
3651 __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset)); | |
3652 __ CallJSEntry(ip); | |
3653 } | |
3654 | |
3655 // Set up deoptimization. | |
3656 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); | |
3657 } else { | |
3658 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); | |
3659 ParameterCount count(arity); | |
3660 ParameterCount expected(formal_parameter_count); | |
3661 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator); | |
3662 } | |
3663 } | |
3664 | |
3665 | |
3666 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { | |
3667 DCHECK(instr->context() != NULL); | |
3668 DCHECK(ToRegister(instr->context()).is(cp)); | |
3669 Register input = ToRegister(instr->value()); | |
3670 Register result = ToRegister(instr->result()); | |
3671 Register scratch = scratch0(); | |
3672 | |
3673 // Deoptimize if not a heap number. | |
3674 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | |
3675 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | |
3676 __ cmp(scratch, ip); | |
3677 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); | |
3678 | |
3679 Label done; | |
3680 Register exponent = scratch0(); | |
3681 scratch = no_reg; | |
3682 __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); | |
3683 // Check the sign of the argument. If the argument is positive, just | |
3684 // return it. | |
3685 __ cmpwi(exponent, Operand::Zero()); | |
3686 // Move the input to the result if necessary. | |
3687 __ Move(result, input); | |
3688 __ bge(&done); | |
3689 | |
3690 // Input is negative. Reverse its sign. | |
3691 // Preserve the value of all registers. | |
3692 { | |
3693 PushSafepointRegistersScope scope(this); | |
3694 | |
3695 // Registers were saved at the safepoint, so we can use | |
3696 // many scratch registers. | |
3697 Register tmp1 = input.is(r4) ? r3 : r4; | |
3698 Register tmp2 = input.is(r5) ? r3 : r5; | |
3699 Register tmp3 = input.is(r6) ? r3 : r6; | |
3700 Register tmp4 = input.is(r7) ? r3 : r7; | |
3701 | |
3702 // exponent: floating point exponent value. | |
3703 | |
3704 Label allocated, slow; | |
3705 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex); | |
3706 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow); | |
3707 __ b(&allocated); | |
3708 | |
3709 // Slow case: Call the runtime system to do the number allocation. | |
3710 __ bind(&slow); | |
3711 | |
3712 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, | |
3713 instr->context()); | |
3714 // Set the pointer to the new heap number in tmp. | |
3715 if (!tmp1.is(r3)) __ mr(tmp1, r3); | |
3716 // Restore input_reg after call to runtime. | |
3717 __ LoadFromSafepointRegisterSlot(input, input); | |
3718 __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); | |
3719 | |
3720 __ bind(&allocated); | |
3721 // exponent: floating point exponent value. | |
3722 // tmp1: allocated heap number. | |
3723 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); | |
3724 __ clrlwi(exponent, exponent, Operand(1)); // clear sign bit | |
3725 __ stw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset)); | |
3726 __ lwz(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); | |
3727 __ stw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); | |
3728 | |
3729 __ StoreToSafepointRegisterSlot(tmp1, result); | |
3730 } | |
3731 | |
3732 __ bind(&done); | |
3733 } | |
3734 | |
3735 | |
3736 void LCodeGen::EmitMathAbs(LMathAbs* instr) { | |
3737 Register input = ToRegister(instr->value()); | |
3738 Register result = ToRegister(instr->result()); | |
3739 Label done; | |
3740 __ cmpi(input, Operand::Zero()); | |
3741 __ Move(result, input); | |
3742 __ bge(&done); | |
3743 __ li(r0, Operand::Zero()); // clear xer | |
3744 __ mtxer(r0); | |
3745 __ neg(result, result, SetOE, SetRC); | |
3746 // Deoptimize on overflow. | |
3747 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); | |
3748 __ bind(&done); | |
3749 } | |
3750 | |
3751 | |
3752 #if V8_TARGET_ARCH_PPC64 | |
3753 void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) { | |
3754 Register input = ToRegister(instr->value()); | |
3755 Register result = ToRegister(instr->result()); | |
3756 Label done; | |
3757 __ cmpwi(input, Operand::Zero()); | |
3758 __ Move(result, input); | |
3759 __ bge(&done); | |
3760 | |
3761 // Deoptimize on overflow. | |
3762 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); | |
3763 __ cmpw(input, r0); | |
3764 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); | |
3765 | |
3766 __ neg(result, result); | |
3767 __ bind(&done); | |
3768 } | |
3769 #endif | |
3770 | |
3771 | |
3772 void LCodeGen::DoMathAbs(LMathAbs* instr) { | |
3773 // Class for deferred case. | |
3774 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode { | |
3775 public: | |
3776 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) | |
3777 : LDeferredCode(codegen), instr_(instr) {} | |
3778 void Generate() override { | |
3779 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); | |
3780 } | |
3781 LInstruction* instr() override { return instr_; } | |
3782 | |
3783 private: | |
3784 LMathAbs* instr_; | |
3785 }; | |
3786 | |
3787 Representation r = instr->hydrogen()->value()->representation(); | |
3788 if (r.IsDouble()) { | |
3789 DoubleRegister input = ToDoubleRegister(instr->value()); | |
3790 DoubleRegister result = ToDoubleRegister(instr->result()); | |
3791 __ fabs(result, input); | |
3792 #if V8_TARGET_ARCH_PPC64 | |
3793 } else if (r.IsInteger32()) { | |
3794 EmitInteger32MathAbs(instr); | |
3795 } else if (r.IsSmi()) { | |
3796 #else | |
3797 } else if (r.IsSmiOrInteger32()) { | |
3798 #endif | |
3799 EmitMathAbs(instr); | |
3800 } else { | |
3801 // Representation is tagged. | |
3802 DeferredMathAbsTaggedHeapNumber* deferred = | |
3803 new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr); | |
3804 Register input = ToRegister(instr->value()); | |
3805 // Smi check. | |
3806 __ JumpIfNotSmi(input, deferred->entry()); | |
3807 // If smi, handle it directly. | |
3808 EmitMathAbs(instr); | |
3809 __ bind(deferred->exit()); | |
3810 } | |
3811 } | |
3812 | |
3813 | |
3814 void LCodeGen::DoMathFloor(LMathFloor* instr) { | |
3815 DoubleRegister input = ToDoubleRegister(instr->value()); | |
3816 Register result = ToRegister(instr->result()); | |
3817 Register input_high = scratch0(); | |
3818 Register scratch = ip; | |
3819 Label done, exact; | |
3820 | |
3821 __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done, | |
3822 &exact); | |
3823 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN); | |
3824 | |
3825 __ bind(&exact); | |
3826 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
3827 // Test for -0. | |
3828 __ cmpi(result, Operand::Zero()); | |
3829 __ bne(&done); | |
3830 __ cmpwi(input_high, Operand::Zero()); | |
3831 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); | |
3832 } | |
3833 __ bind(&done); | |
3834 } | |
3835 | |
3836 | |
3837 void LCodeGen::DoMathRound(LMathRound* instr) { | |
3838 DoubleRegister input = ToDoubleRegister(instr->value()); | |
3839 Register result = ToRegister(instr->result()); | |
3840 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); | |
3841 DoubleRegister input_plus_dot_five = double_scratch1; | |
3842 Register scratch1 = scratch0(); | |
3843 Register scratch2 = ip; | |
3844 DoubleRegister dot_five = double_scratch0(); | |
3845 Label convert, done; | |
3846 | |
3847 __ LoadDoubleLiteral(dot_five, 0.5, r0); | |
3848 __ fabs(double_scratch1, input); | |
3849 __ fcmpu(double_scratch1, dot_five); | |
3850 DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN); | |
3851 // If input is in [-0.5, -0], the result is -0. | |
3852 // If input is in [+0, +0.5[, the result is +0. | |
3853 // If the input is +0.5, the result is 1. | |
3854 __ bgt(&convert); // Out of [-0.5, +0.5]. | |
3855 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
3856 #if V8_TARGET_ARCH_PPC64 | |
3857 __ MovDoubleToInt64(scratch1, input); | |
3858 #else | |
3859 __ MovDoubleHighToInt(scratch1, input); | |
3860 #endif | |
3861 __ cmpi(scratch1, Operand::Zero()); | |
3862 // [-0.5, -0]. | |
3863 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); | |
3864 } | |
3865 __ fcmpu(input, dot_five); | |
3866 if (CpuFeatures::IsSupported(ISELECT)) { | |
3867 __ li(result, Operand(1)); | |
3868 __ isel(lt, result, r0, result); | |
3869 __ b(&done); | |
3870 } else { | |
3871 Label return_zero; | |
3872 __ bne(&return_zero); | |
3873 __ li(result, Operand(1)); // +0.5. | |
3874 __ b(&done); | |
3875 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on | |
3876 // flag kBailoutOnMinusZero. | |
3877 __ bind(&return_zero); | |
3878 __ li(result, Operand::Zero()); | |
3879 __ b(&done); | |
3880 } | |
3881 | |
3882 __ bind(&convert); | |
3883 __ fadd(input_plus_dot_five, input, dot_five); | |
3884 // Reuse dot_five (double_scratch0) as we no longer need this value. | |
3885 __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2, | |
3886 double_scratch0(), &done, &done); | |
3887 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN); | |
3888 __ bind(&done); | |
3889 } | |
3890 | |
3891 | |
3892 void LCodeGen::DoMathFround(LMathFround* instr) { | |
3893 DoubleRegister input_reg = ToDoubleRegister(instr->value()); | |
3894 DoubleRegister output_reg = ToDoubleRegister(instr->result()); | |
3895 __ frsp(output_reg, input_reg); | |
3896 } | |
3897 | |
3898 | |
3899 void LCodeGen::DoMathSqrt(LMathSqrt* instr) { | |
3900 DoubleRegister input = ToDoubleRegister(instr->value()); | |
3901 DoubleRegister result = ToDoubleRegister(instr->result()); | |
3902 __ fsqrt(result, input); | |
3903 } | |
3904 | |
3905 | |
3906 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { | |
3907 DoubleRegister input = ToDoubleRegister(instr->value()); | |
3908 DoubleRegister result = ToDoubleRegister(instr->result()); | |
3909 DoubleRegister temp = double_scratch0(); | |
3910 | |
3911 // Note that according to ECMA-262 15.8.2.13: | |
3912 // Math.pow(-Infinity, 0.5) == Infinity | |
3913 // Math.sqrt(-Infinity) == NaN | |
3914 Label skip, done; | |
3915 | |
3916 __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0()); | |
3917 __ fcmpu(input, temp); | |
3918 __ bne(&skip); | |
3919 __ fneg(result, temp); | |
3920 __ b(&done); | |
3921 | |
3922 // Add +0 to convert -0 to +0. | |
3923 __ bind(&skip); | |
3924 __ fadd(result, input, kDoubleRegZero); | |
3925 __ fsqrt(result, result); | |
3926 __ bind(&done); | |
3927 } | |
3928 | |
3929 | |
3930 void LCodeGen::DoPower(LPower* instr) { | |
3931 Representation exponent_type = instr->hydrogen()->right()->representation(); | |
3932 // Having marked this as a call, we can use any registers. | |
3933 // Just make sure that the input/output registers are the expected ones. | |
3934 Register tagged_exponent = MathPowTaggedDescriptor::exponent(); | |
3935 DCHECK(!instr->right()->IsDoubleRegister() || | |
3936 ToDoubleRegister(instr->right()).is(d2)); | |
3937 DCHECK(!instr->right()->IsRegister() || | |
3938 ToRegister(instr->right()).is(tagged_exponent)); | |
3939 DCHECK(ToDoubleRegister(instr->left()).is(d1)); | |
3940 DCHECK(ToDoubleRegister(instr->result()).is(d3)); | |
3941 | |
3942 if (exponent_type.IsSmi()) { | |
3943 MathPowStub stub(isolate(), MathPowStub::TAGGED); | |
3944 __ CallStub(&stub); | |
3945 } else if (exponent_type.IsTagged()) { | |
3946 Label no_deopt; | |
3947 __ JumpIfSmi(tagged_exponent, &no_deopt); | |
3948 DCHECK(!r10.is(tagged_exponent)); | |
3949 __ LoadP(r10, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); | |
3950 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | |
3951 __ cmp(r10, ip); | |
3952 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); | |
3953 __ bind(&no_deopt); | |
3954 MathPowStub stub(isolate(), MathPowStub::TAGGED); | |
3955 __ CallStub(&stub); | |
3956 } else if (exponent_type.IsInteger32()) { | |
3957 MathPowStub stub(isolate(), MathPowStub::INTEGER); | |
3958 __ CallStub(&stub); | |
3959 } else { | |
3960 DCHECK(exponent_type.IsDouble()); | |
3961 MathPowStub stub(isolate(), MathPowStub::DOUBLE); | |
3962 __ CallStub(&stub); | |
3963 } | |
3964 } | |
3965 | |
3966 | |
3967 void LCodeGen::DoMathExp(LMathExp* instr) { | |
3968 DoubleRegister input = ToDoubleRegister(instr->value()); | |
3969 DoubleRegister result = ToDoubleRegister(instr->result()); | |
3970 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); | |
3971 DoubleRegister double_scratch2 = double_scratch0(); | |
3972 Register temp1 = ToRegister(instr->temp1()); | |
3973 Register temp2 = ToRegister(instr->temp2()); | |
3974 | |
3975 MathExpGenerator::EmitMathExp(masm(), input, result, double_scratch1, | |
3976 double_scratch2, temp1, temp2, scratch0()); | |
3977 } | |
3978 | |
3979 | |
3980 void LCodeGen::DoMathLog(LMathLog* instr) { | |
3981 __ PrepareCallCFunction(0, 1, scratch0()); | |
3982 __ MovToFloatParameter(ToDoubleRegister(instr->value())); | |
3983 __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 0, | |
3984 1); | |
3985 __ MovFromFloatResult(ToDoubleRegister(instr->result())); | |
3986 } | |
3987 | |
3988 | |
3989 void LCodeGen::DoMathClz32(LMathClz32* instr) { | |
3990 Register input = ToRegister(instr->value()); | |
3991 Register result = ToRegister(instr->result()); | |
3992 __ cntlzw_(result, input); | |
3993 } | |
3994 | |
3995 | |
3996 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { | |
3997 DCHECK(ToRegister(instr->context()).is(cp)); | |
3998 DCHECK(ToRegister(instr->function()).is(r4)); | |
3999 DCHECK(instr->HasPointerMap()); | |
4000 | |
4001 Handle<JSFunction> known_function = instr->hydrogen()->known_function(); | |
4002 if (known_function.is_null()) { | |
4003 LPointerMap* pointers = instr->pointer_map(); | |
4004 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); | |
4005 ParameterCount count(instr->arity()); | |
4006 __ InvokeFunction(r4, count, CALL_FUNCTION, generator); | |
4007 } else { | |
4008 CallKnownFunction(known_function, | |
4009 instr->hydrogen()->formal_parameter_count(), | |
4010 instr->arity(), instr); | |
4011 } | |
4012 } | |
4013 | |
4014 | |
4015 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { | |
4016 DCHECK(ToRegister(instr->result()).is(r3)); | |
4017 | |
4018 if (instr->hydrogen()->IsTailCall()) { | |
4019 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL); | |
4020 | |
4021 if (instr->target()->IsConstantOperand()) { | |
4022 LConstantOperand* target = LConstantOperand::cast(instr->target()); | |
4023 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); | |
4024 __ Jump(code, RelocInfo::CODE_TARGET); | |
4025 } else { | |
4026 DCHECK(instr->target()->IsRegister()); | |
4027 Register target = ToRegister(instr->target()); | |
4028 __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
4029 __ JumpToJSEntry(ip); | |
4030 } | |
4031 } else { | |
4032 LPointerMap* pointers = instr->pointer_map(); | |
4033 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); | |
4034 | |
4035 if (instr->target()->IsConstantOperand()) { | |
4036 LConstantOperand* target = LConstantOperand::cast(instr->target()); | |
4037 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); | |
4038 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); | |
4039 __ Call(code, RelocInfo::CODE_TARGET); | |
4040 } else { | |
4041 DCHECK(instr->target()->IsRegister()); | |
4042 Register target = ToRegister(instr->target()); | |
4043 generator.BeforeCall(__ CallSize(target)); | |
4044 __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
4045 __ CallJSEntry(ip); | |
4046 } | |
4047 generator.AfterCall(); | |
4048 } | |
4049 } | |
4050 | |
4051 | |
4052 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { | |
4053 DCHECK(ToRegister(instr->function()).is(r4)); | |
4054 DCHECK(ToRegister(instr->result()).is(r3)); | |
4055 | |
4056 __ mov(r3, Operand(instr->arity())); | |
4057 | |
4058 // Change context. | |
4059 __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset)); | |
4060 | |
4061 bool is_self_call = false; | |
4062 if (instr->hydrogen()->function()->IsConstant()) { | |
4063 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function()); | |
4064 Handle<JSFunction> jsfun = | |
4065 Handle<JSFunction>::cast(fun_const->handle(isolate())); | |
4066 is_self_call = jsfun.is_identical_to(info()->closure()); | |
4067 } | |
4068 | |
4069 if (is_self_call) { | |
4070 __ CallSelf(); | |
4071 } else { | |
4072 __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset)); | |
4073 __ CallJSEntry(ip); | |
4074 } | |
4075 | |
4076 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); | |
4077 } | |
4078 | |
4079 | |
4080 void LCodeGen::DoCallFunction(LCallFunction* instr) { | |
4081 DCHECK(ToRegister(instr->context()).is(cp)); | |
4082 DCHECK(ToRegister(instr->function()).is(r4)); | |
4083 DCHECK(ToRegister(instr->result()).is(r3)); | |
4084 | |
4085 int arity = instr->arity(); | |
4086 CallFunctionFlags flags = instr->hydrogen()->function_flags(); | |
4087 if (instr->hydrogen()->HasVectorAndSlot()) { | |
4088 Register slot_register = ToRegister(instr->temp_slot()); | |
4089 Register vector_register = ToRegister(instr->temp_vector()); | |
4090 DCHECK(slot_register.is(r6)); | |
4091 DCHECK(vector_register.is(r5)); | |
4092 | |
4093 AllowDeferredHandleDereference vector_structure_check; | |
4094 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector(); | |
4095 int index = vector->GetIndex(instr->hydrogen()->slot()); | |
4096 | |
4097 __ Move(vector_register, vector); | |
4098 __ LoadSmiLiteral(slot_register, Smi::FromInt(index)); | |
4099 | |
4100 CallICState::CallType call_type = | |
4101 (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION; | |
4102 | |
4103 Handle<Code> ic = | |
4104 CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code(); | |
4105 CallCode(ic, RelocInfo::CODE_TARGET, instr); | |
4106 } else { | |
4107 CallFunctionStub stub(isolate(), arity, flags); | |
4108 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | |
4109 } | |
4110 } | |
4111 | |
4112 | |
4113 void LCodeGen::DoCallNew(LCallNew* instr) { | |
4114 DCHECK(ToRegister(instr->context()).is(cp)); | |
4115 DCHECK(ToRegister(instr->constructor()).is(r4)); | |
4116 DCHECK(ToRegister(instr->result()).is(r3)); | |
4117 | |
4118 __ mov(r3, Operand(instr->arity())); | |
4119 // No cell in r5 for construct type feedback in optimized code | |
4120 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); | |
4121 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); | |
4122 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); | |
4123 } | |
4124 | |
4125 | |
4126 void LCodeGen::DoCallNewArray(LCallNewArray* instr) { | |
4127 DCHECK(ToRegister(instr->context()).is(cp)); | |
4128 DCHECK(ToRegister(instr->constructor()).is(r4)); | |
4129 DCHECK(ToRegister(instr->result()).is(r3)); | |
4130 | |
4131 __ mov(r3, Operand(instr->arity())); | |
4132 if (instr->arity() == 1) { | |
4133 // We only need the allocation site for the case we have a length argument. | |
4134 // The case may bail out to the runtime, which will determine the correct | |
4135 // elements kind with the site. | |
4136 __ Move(r5, instr->hydrogen()->site()); | |
4137 } else { | |
4138 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); | |
4139 } | |
4140 ElementsKind kind = instr->hydrogen()->elements_kind(); | |
4141 AllocationSiteOverrideMode override_mode = | |
4142 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) | |
4143 ? DISABLE_ALLOCATION_SITES | |
4144 : DONT_OVERRIDE; | |
4145 | |
4146 if (instr->arity() == 0) { | |
4147 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); | |
4148 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); | |
4149 } else if (instr->arity() == 1) { | |
4150 Label done; | |
4151 if (IsFastPackedElementsKind(kind)) { | |
4152 Label packed_case; | |
4153 // We might need a change here | |
4154 // look at the first argument | |
4155 __ LoadP(r8, MemOperand(sp, 0)); | |
4156 __ cmpi(r8, Operand::Zero()); | |
4157 __ beq(&packed_case); | |
4158 | |
4159 ElementsKind holey_kind = GetHoleyElementsKind(kind); | |
4160 ArraySingleArgumentConstructorStub stub(isolate(), holey_kind, | |
4161 override_mode); | |
4162 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); | |
4163 __ b(&done); | |
4164 __ bind(&packed_case); | |
4165 } | |
4166 | |
4167 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); | |
4168 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); | |
4169 __ bind(&done); | |
4170 } else { | |
4171 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); | |
4172 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); | |
4173 } | |
4174 } | |
4175 | |
4176 | |
4177 void LCodeGen::DoCallRuntime(LCallRuntime* instr) { | |
4178 CallRuntime(instr->function(), instr->arity(), instr); | |
4179 } | |
4180 | |
4181 | |
4182 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { | |
4183 Register function = ToRegister(instr->function()); | |
4184 Register code_object = ToRegister(instr->code_object()); | |
4185 __ addi(code_object, code_object, | |
4186 Operand(Code::kHeaderSize - kHeapObjectTag)); | |
4187 __ StoreP(code_object, | |
4188 FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0); | |
4189 } | |
4190 | |
4191 | |
4192 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { | |
4193 Register result = ToRegister(instr->result()); | |
4194 Register base = ToRegister(instr->base_object()); | |
4195 if (instr->offset()->IsConstantOperand()) { | |
4196 LConstantOperand* offset = LConstantOperand::cast(instr->offset()); | |
4197 __ Add(result, base, ToInteger32(offset), r0); | |
4198 } else { | |
4199 Register offset = ToRegister(instr->offset()); | |
4200 __ add(result, base, offset); | |
4201 } | |
4202 } | |
4203 | |
4204 | |
4205 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { | |
4206 HStoreNamedField* hinstr = instr->hydrogen(); | |
4207 Representation representation = instr->representation(); | |
4208 | |
4209 Register object = ToRegister(instr->object()); | |
4210 Register scratch = scratch0(); | |
4211 HObjectAccess access = hinstr->access(); | |
4212 int offset = access.offset(); | |
4213 | |
4214 if (access.IsExternalMemory()) { | |
4215 Register value = ToRegister(instr->value()); | |
4216 MemOperand operand = MemOperand(object, offset); | |
4217 __ StoreRepresentation(value, operand, representation, r0); | |
4218 return; | |
4219 } | |
4220 | |
4221 __ AssertNotSmi(object); | |
4222 | |
4223 #if V8_TARGET_ARCH_PPC64 | |
4224 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() || | |
4225 IsInteger32(LConstantOperand::cast(instr->value()))); | |
4226 #else | |
4227 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() || | |
4228 IsSmi(LConstantOperand::cast(instr->value()))); | |
4229 #endif | |
4230 if (!FLAG_unbox_double_fields && representation.IsDouble()) { | |
4231 DCHECK(access.IsInobject()); | |
4232 DCHECK(!hinstr->has_transition()); | |
4233 DCHECK(!hinstr->NeedsWriteBarrier()); | |
4234 DoubleRegister value = ToDoubleRegister(instr->value()); | |
4235 __ stfd(value, FieldMemOperand(object, offset)); | |
4236 return; | |
4237 } | |
4238 | |
4239 if (hinstr->has_transition()) { | |
4240 Handle<Map> transition = hinstr->transition_map(); | |
4241 AddDeprecationDependency(transition); | |
4242 __ mov(scratch, Operand(transition)); | |
4243 __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0); | |
4244 if (hinstr->NeedsWriteBarrierForMap()) { | |
4245 Register temp = ToRegister(instr->temp()); | |
4246 // Update the write barrier for the map field. | |
4247 __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(), | |
4248 kSaveFPRegs); | |
4249 } | |
4250 } | |
4251 | |
4252 // Do the store. | |
4253 Register record_dest = object; | |
4254 Register record_value = no_reg; | |
4255 Register record_scratch = scratch; | |
4256 #if V8_TARGET_ARCH_PPC64 | |
4257 if (FLAG_unbox_double_fields && representation.IsDouble()) { | |
4258 DCHECK(access.IsInobject()); | |
4259 DoubleRegister value = ToDoubleRegister(instr->value()); | |
4260 __ stfd(value, FieldMemOperand(object, offset)); | |
4261 if (hinstr->NeedsWriteBarrier()) { | |
4262 record_value = ToRegister(instr->value()); | |
4263 } | |
4264 } else { | |
4265 if (representation.IsSmi() && | |
4266 hinstr->value()->representation().IsInteger32()) { | |
4267 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); | |
4268 // 64-bit Smi optimization | |
4269 // Store int value directly to upper half of the smi. | |
4270 offset = SmiWordOffset(offset); | |
4271 representation = Representation::Integer32(); | |
4272 } | |
4273 #endif | |
4274 if (access.IsInobject()) { | |
4275 Register value = ToRegister(instr->value()); | |
4276 MemOperand operand = FieldMemOperand(object, offset); | |
4277 __ StoreRepresentation(value, operand, representation, r0); | |
4278 record_value = value; | |
4279 } else { | |
4280 Register value = ToRegister(instr->value()); | |
4281 __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); | |
4282 MemOperand operand = FieldMemOperand(scratch, offset); | |
4283 __ StoreRepresentation(value, operand, representation, r0); | |
4284 record_dest = scratch; | |
4285 record_value = value; | |
4286 record_scratch = object; | |
4287 } | |
4288 #if V8_TARGET_ARCH_PPC64 | |
4289 } | |
4290 #endif | |
4291 | |
4292 if (hinstr->NeedsWriteBarrier()) { | |
4293 __ RecordWriteField(record_dest, offset, record_value, record_scratch, | |
4294 GetLinkRegisterState(), kSaveFPRegs, | |
4295 EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(), | |
4296 hinstr->PointersToHereCheckForValue()); | |
4297 } | |
4298 } | |
4299 | |
4300 | |
4301 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { | |
4302 DCHECK(ToRegister(instr->context()).is(cp)); | |
4303 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister())); | |
4304 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); | |
4305 | |
4306 if (instr->hydrogen()->HasVectorAndSlot()) { | |
4307 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr); | |
4308 } | |
4309 | |
4310 __ mov(StoreDescriptor::NameRegister(), Operand(instr->name())); | |
4311 Handle<Code> ic = CodeFactory::StoreICInOptimizedCode( | |
4312 isolate(), instr->language_mode(), | |
4313 instr->hydrogen()->initialization_state()).code(); | |
4314 CallCode(ic, RelocInfo::CODE_TARGET, instr); | |
4315 } | |
4316 | |
4317 | |
4318 void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) { | |
4319 DCHECK(ToRegister(instr->context()).is(cp)); | |
4320 DCHECK(ToRegister(instr->value()) | |
4321 .is(StoreGlobalViaContextDescriptor::ValueRegister())); | |
4322 | |
4323 int const slot = instr->slot_index(); | |
4324 int const depth = instr->depth(); | |
4325 if (depth <= StoreGlobalViaContextStub::kMaximumDepth) { | |
4326 __ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot)); | |
4327 Handle<Code> stub = CodeFactory::StoreGlobalViaContext( | |
4328 isolate(), depth, instr->language_mode()).code(); | |
4329 CallCode(stub, RelocInfo::CODE_TARGET, instr); | |
4330 } else { | |
4331 __ Push(Smi::FromInt(slot)); | |
4332 __ push(StoreGlobalViaContextDescriptor::ValueRegister()); | |
4333 __ CallRuntime(is_strict(instr->language_mode()) | |
4334 ? Runtime::kStoreGlobalViaContext_Strict | |
4335 : Runtime::kStoreGlobalViaContext_Sloppy, | |
4336 2); | |
4337 } | |
4338 } | |
4339 | |
4340 | |
4341 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { | |
4342 Representation representation = instr->hydrogen()->length()->representation(); | |
4343 DCHECK(representation.Equals(instr->hydrogen()->index()->representation())); | |
4344 DCHECK(representation.IsSmiOrInteger32()); | |
4345 | |
4346 Condition cc = instr->hydrogen()->allow_equality() ? lt : le; | |
4347 if (instr->length()->IsConstantOperand()) { | |
4348 int32_t length = ToInteger32(LConstantOperand::cast(instr->length())); | |
4349 Register index = ToRegister(instr->index()); | |
4350 if (representation.IsSmi()) { | |
4351 __ Cmpli(index, Operand(Smi::FromInt(length)), r0); | |
4352 } else { | |
4353 __ Cmplwi(index, Operand(length), r0); | |
4354 } | |
4355 cc = CommuteCondition(cc); | |
4356 } else if (instr->index()->IsConstantOperand()) { | |
4357 int32_t index = ToInteger32(LConstantOperand::cast(instr->index())); | |
4358 Register length = ToRegister(instr->length()); | |
4359 if (representation.IsSmi()) { | |
4360 __ Cmpli(length, Operand(Smi::FromInt(index)), r0); | |
4361 } else { | |
4362 __ Cmplwi(length, Operand(index), r0); | |
4363 } | |
4364 } else { | |
4365 Register index = ToRegister(instr->index()); | |
4366 Register length = ToRegister(instr->length()); | |
4367 if (representation.IsSmi()) { | |
4368 __ cmpl(length, index); | |
4369 } else { | |
4370 __ cmplw(length, index); | |
4371 } | |
4372 } | |
4373 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { | |
4374 Label done; | |
4375 __ b(NegateCondition(cc), &done); | |
4376 __ stop("eliminated bounds check failed"); | |
4377 __ bind(&done); | |
4378 } else { | |
4379 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds); | |
4380 } | |
4381 } | |
4382 | |
4383 | |
4384 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { | |
4385 Register external_pointer = ToRegister(instr->elements()); | |
4386 Register key = no_reg; | |
4387 ElementsKind elements_kind = instr->elements_kind(); | |
4388 bool key_is_constant = instr->key()->IsConstantOperand(); | |
4389 int constant_key = 0; | |
4390 if (key_is_constant) { | |
4391 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | |
4392 if (constant_key & 0xF0000000) { | |
4393 Abort(kArrayIndexConstantValueTooBig); | |
4394 } | |
4395 } else { | |
4396 key = ToRegister(instr->key()); | |
4397 } | |
4398 int element_size_shift = ElementsKindToShiftSize(elements_kind); | |
4399 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); | |
4400 int base_offset = instr->base_offset(); | |
4401 | |
4402 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { | |
4403 Register address = scratch0(); | |
4404 DoubleRegister value(ToDoubleRegister(instr->value())); | |
4405 if (key_is_constant) { | |
4406 if (constant_key != 0) { | |
4407 __ Add(address, external_pointer, constant_key << element_size_shift, | |
4408 r0); | |
4409 } else { | |
4410 address = external_pointer; | |
4411 } | |
4412 } else { | |
4413 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi); | |
4414 __ add(address, external_pointer, r0); | |
4415 } | |
4416 if (elements_kind == FLOAT32_ELEMENTS) { | |
4417 __ frsp(double_scratch0(), value); | |
4418 __ stfs(double_scratch0(), MemOperand(address, base_offset)); | |
4419 } else { // Storing doubles, not floats. | |
4420 __ stfd(value, MemOperand(address, base_offset)); | |
4421 } | |
4422 } else { | |
4423 Register value(ToRegister(instr->value())); | |
4424 MemOperand mem_operand = | |
4425 PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi, | |
4426 constant_key, element_size_shift, base_offset); | |
4427 switch (elements_kind) { | |
4428 case UINT8_ELEMENTS: | |
4429 case UINT8_CLAMPED_ELEMENTS: | |
4430 case INT8_ELEMENTS: | |
4431 if (key_is_constant) { | |
4432 __ StoreByte(value, mem_operand, r0); | |
4433 } else { | |
4434 __ stbx(value, mem_operand); | |
4435 } | |
4436 break; | |
4437 case INT16_ELEMENTS: | |
4438 case UINT16_ELEMENTS: | |
4439 if (key_is_constant) { | |
4440 __ StoreHalfWord(value, mem_operand, r0); | |
4441 } else { | |
4442 __ sthx(value, mem_operand); | |
4443 } | |
4444 break; | |
4445 case INT32_ELEMENTS: | |
4446 case UINT32_ELEMENTS: | |
4447 if (key_is_constant) { | |
4448 __ StoreWord(value, mem_operand, r0); | |
4449 } else { | |
4450 __ stwx(value, mem_operand); | |
4451 } | |
4452 break; | |
4453 case FLOAT32_ELEMENTS: | |
4454 case FLOAT64_ELEMENTS: | |
4455 case FAST_DOUBLE_ELEMENTS: | |
4456 case FAST_ELEMENTS: | |
4457 case FAST_SMI_ELEMENTS: | |
4458 case FAST_HOLEY_DOUBLE_ELEMENTS: | |
4459 case FAST_HOLEY_ELEMENTS: | |
4460 case FAST_HOLEY_SMI_ELEMENTS: | |
4461 case DICTIONARY_ELEMENTS: | |
4462 case FAST_SLOPPY_ARGUMENTS_ELEMENTS: | |
4463 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: | |
4464 UNREACHABLE(); | |
4465 break; | |
4466 } | |
4467 } | |
4468 } | |
4469 | |
4470 | |
4471 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { | |
4472 DoubleRegister value = ToDoubleRegister(instr->value()); | |
4473 Register elements = ToRegister(instr->elements()); | |
4474 Register key = no_reg; | |
4475 Register scratch = scratch0(); | |
4476 DoubleRegister double_scratch = double_scratch0(); | |
4477 bool key_is_constant = instr->key()->IsConstantOperand(); | |
4478 int constant_key = 0; | |
4479 | |
4480 // Calculate the effective address of the slot in the array to store the | |
4481 // double value. | |
4482 if (key_is_constant) { | |
4483 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | |
4484 if (constant_key & 0xF0000000) { | |
4485 Abort(kArrayIndexConstantValueTooBig); | |
4486 } | |
4487 } else { | |
4488 key = ToRegister(instr->key()); | |
4489 } | |
4490 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); | |
4491 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); | |
4492 int base_offset = instr->base_offset() + constant_key * kDoubleSize; | |
4493 if (!key_is_constant) { | |
4494 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi); | |
4495 __ add(scratch, elements, scratch); | |
4496 elements = scratch; | |
4497 } | |
4498 if (!is_int16(base_offset)) { | |
4499 __ Add(scratch, elements, base_offset, r0); | |
4500 base_offset = 0; | |
4501 elements = scratch; | |
4502 } | |
4503 | |
4504 if (instr->NeedsCanonicalization()) { | |
4505 // Turn potential sNaN value into qNaN. | |
4506 __ CanonicalizeNaN(double_scratch, value); | |
4507 __ stfd(double_scratch, MemOperand(elements, base_offset)); | |
4508 } else { | |
4509 __ stfd(value, MemOperand(elements, base_offset)); | |
4510 } | |
4511 } | |
4512 | |
4513 | |
4514 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { | |
4515 HStoreKeyed* hinstr = instr->hydrogen(); | |
4516 Register value = ToRegister(instr->value()); | |
4517 Register elements = ToRegister(instr->elements()); | |
4518 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; | |
4519 Register scratch = scratch0(); | |
4520 Register store_base = scratch; | |
4521 int offset = instr->base_offset(); | |
4522 | |
4523 // Do the store. | |
4524 if (instr->key()->IsConstantOperand()) { | |
4525 DCHECK(!hinstr->NeedsWriteBarrier()); | |
4526 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); | |
4527 offset += ToInteger32(const_operand) * kPointerSize; | |
4528 store_base = elements; | |
4529 } else { | |
4530 // Even though the HLoadKeyed instruction forces the input | |
4531 // representation for the key to be an integer, the input gets replaced | |
4532 // during bound check elimination with the index argument to the bounds | |
4533 // check, which can be tagged, so that case must be handled here, too. | |
4534 if (hinstr->key()->representation().IsSmi()) { | |
4535 __ SmiToPtrArrayOffset(scratch, key); | |
4536 } else { | |
4537 __ ShiftLeftImm(scratch, key, Operand(kPointerSizeLog2)); | |
4538 } | |
4539 __ add(scratch, elements, scratch); | |
4540 } | |
4541 | |
4542 Representation representation = hinstr->value()->representation(); | |
4543 | |
4544 #if V8_TARGET_ARCH_PPC64 | |
4545 // 64-bit Smi optimization | |
4546 if (representation.IsInteger32()) { | |
4547 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); | |
4548 DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS); | |
4549 // Store int value directly to upper half of the smi. | |
4550 offset = SmiWordOffset(offset); | |
4551 } | |
4552 #endif | |
4553 | |
4554 __ StoreRepresentation(value, MemOperand(store_base, offset), representation, | |
4555 r0); | |
4556 | |
4557 if (hinstr->NeedsWriteBarrier()) { | |
4558 SmiCheck check_needed = hinstr->value()->type().IsHeapObject() | |
4559 ? OMIT_SMI_CHECK | |
4560 : INLINE_SMI_CHECK; | |
4561 // Compute address of modified element and store it into key register. | |
4562 __ Add(key, store_base, offset, r0); | |
4563 __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs, | |
4564 EMIT_REMEMBERED_SET, check_needed, | |
4565 hinstr->PointersToHereCheckForValue()); | |
4566 } | |
4567 } | |
4568 | |
4569 | |
4570 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { | |
4571 // By cases: external, fast double | |
4572 if (instr->is_fixed_typed_array()) { | |
4573 DoStoreKeyedExternalArray(instr); | |
4574 } else if (instr->hydrogen()->value()->representation().IsDouble()) { | |
4575 DoStoreKeyedFixedDoubleArray(instr); | |
4576 } else { | |
4577 DoStoreKeyedFixedArray(instr); | |
4578 } | |
4579 } | |
4580 | |
4581 | |
4582 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { | |
4583 DCHECK(ToRegister(instr->context()).is(cp)); | |
4584 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister())); | |
4585 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister())); | |
4586 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); | |
4587 | |
4588 if (instr->hydrogen()->HasVectorAndSlot()) { | |
4589 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr); | |
4590 } | |
4591 | |
4592 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode( | |
4593 isolate(), instr->language_mode(), | |
4594 instr->hydrogen()->initialization_state()).code(); | |
4595 CallCode(ic, RelocInfo::CODE_TARGET, instr); | |
4596 } | |
4597 | |
4598 | |
4599 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) { | |
4600 class DeferredMaybeGrowElements final : public LDeferredCode { | |
4601 public: | |
4602 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr) | |
4603 : LDeferredCode(codegen), instr_(instr) {} | |
4604 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); } | |
4605 LInstruction* instr() override { return instr_; } | |
4606 | |
4607 private: | |
4608 LMaybeGrowElements* instr_; | |
4609 }; | |
4610 | |
4611 Register result = r3; | |
4612 DeferredMaybeGrowElements* deferred = | |
4613 new (zone()) DeferredMaybeGrowElements(this, instr); | |
4614 LOperand* key = instr->key(); | |
4615 LOperand* current_capacity = instr->current_capacity(); | |
4616 | |
4617 DCHECK(instr->hydrogen()->key()->representation().IsInteger32()); | |
4618 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32()); | |
4619 DCHECK(key->IsConstantOperand() || key->IsRegister()); | |
4620 DCHECK(current_capacity->IsConstantOperand() || | |
4621 current_capacity->IsRegister()); | |
4622 | |
4623 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) { | |
4624 int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); | |
4625 int32_t constant_capacity = | |
4626 ToInteger32(LConstantOperand::cast(current_capacity)); | |
4627 if (constant_key >= constant_capacity) { | |
4628 // Deferred case. | |
4629 __ b(deferred->entry()); | |
4630 } | |
4631 } else if (key->IsConstantOperand()) { | |
4632 int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); | |
4633 __ Cmpwi(ToRegister(current_capacity), Operand(constant_key), r0); | |
4634 __ ble(deferred->entry()); | |
4635 } else if (current_capacity->IsConstantOperand()) { | |
4636 int32_t constant_capacity = | |
4637 ToInteger32(LConstantOperand::cast(current_capacity)); | |
4638 __ Cmpwi(ToRegister(key), Operand(constant_capacity), r0); | |
4639 __ bge(deferred->entry()); | |
4640 } else { | |
4641 __ cmpw(ToRegister(key), ToRegister(current_capacity)); | |
4642 __ bge(deferred->entry()); | |
4643 } | |
4644 | |
4645 if (instr->elements()->IsRegister()) { | |
4646 __ Move(result, ToRegister(instr->elements())); | |
4647 } else { | |
4648 __ LoadP(result, ToMemOperand(instr->elements())); | |
4649 } | |
4650 | |
4651 __ bind(deferred->exit()); | |
4652 } | |
4653 | |
4654 | |
4655 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) { | |
4656 // TODO(3095996): Get rid of this. For now, we need to make the | |
4657 // result register contain a valid pointer because it is already | |
4658 // contained in the register pointer map. | |
4659 Register result = r3; | |
4660 __ li(result, Operand::Zero()); | |
4661 | |
4662 // We have to call a stub. | |
4663 { | |
4664 PushSafepointRegistersScope scope(this); | |
4665 if (instr->object()->IsRegister()) { | |
4666 __ Move(result, ToRegister(instr->object())); | |
4667 } else { | |
4668 __ LoadP(result, ToMemOperand(instr->object())); | |
4669 } | |
4670 | |
4671 LOperand* key = instr->key(); | |
4672 if (key->IsConstantOperand()) { | |
4673 __ LoadSmiLiteral(r6, ToSmi(LConstantOperand::cast(key))); | |
4674 } else { | |
4675 __ SmiTag(r6, ToRegister(key)); | |
4676 } | |
4677 | |
4678 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(), | |
4679 instr->hydrogen()->kind()); | |
4680 __ CallStub(&stub); | |
4681 RecordSafepointWithLazyDeopt( | |
4682 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | |
4683 __ StoreToSafepointRegisterSlot(result, result); | |
4684 } | |
4685 | |
4686 // Deopt on smi, which means the elements array changed to dictionary mode. | |
4687 __ TestIfSmi(result, r0); | |
4688 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); | |
4689 } | |
4690 | |
4691 | |
4692 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { | |
4693 Register object_reg = ToRegister(instr->object()); | |
4694 Register scratch = scratch0(); | |
4695 | |
4696 Handle<Map> from_map = instr->original_map(); | |
4697 Handle<Map> to_map = instr->transitioned_map(); | |
4698 ElementsKind from_kind = instr->from_kind(); | |
4699 ElementsKind to_kind = instr->to_kind(); | |
4700 | |
4701 Label not_applicable; | |
4702 __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset)); | |
4703 __ Cmpi(scratch, Operand(from_map), r0); | |
4704 __ bne(¬_applicable); | |
4705 | |
4706 if (IsSimpleMapChangeTransition(from_kind, to_kind)) { | |
4707 Register new_map_reg = ToRegister(instr->new_map_temp()); | |
4708 __ mov(new_map_reg, Operand(to_map)); | |
4709 __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset), | |
4710 r0); | |
4711 // Write barrier. | |
4712 __ RecordWriteForMap(object_reg, new_map_reg, scratch, | |
4713 GetLinkRegisterState(), kDontSaveFPRegs); | |
4714 } else { | |
4715 DCHECK(ToRegister(instr->context()).is(cp)); | |
4716 DCHECK(object_reg.is(r3)); | |
4717 PushSafepointRegistersScope scope(this); | |
4718 __ Move(r4, to_map); | |
4719 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; | |
4720 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); | |
4721 __ CallStub(&stub); | |
4722 RecordSafepointWithRegisters(instr->pointer_map(), 0, | |
4723 Safepoint::kLazyDeopt); | |
4724 } | |
4725 __ bind(¬_applicable); | |
4726 } | |
4727 | |
4728 | |
4729 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { | |
4730 Register object = ToRegister(instr->object()); | |
4731 Register temp = ToRegister(instr->temp()); | |
4732 Label no_memento_found; | |
4733 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); | |
4734 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound); | |
4735 __ bind(&no_memento_found); | |
4736 } | |
4737 | |
4738 | |
4739 void LCodeGen::DoStringAdd(LStringAdd* instr) { | |
4740 DCHECK(ToRegister(instr->context()).is(cp)); | |
4741 DCHECK(ToRegister(instr->left()).is(r4)); | |
4742 DCHECK(ToRegister(instr->right()).is(r3)); | |
4743 StringAddStub stub(isolate(), instr->hydrogen()->flags(), | |
4744 instr->hydrogen()->pretenure_flag()); | |
4745 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | |
4746 } | |
4747 | |
4748 | |
4749 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { | |
4750 class DeferredStringCharCodeAt final : public LDeferredCode { | |
4751 public: | |
4752 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) | |
4753 : LDeferredCode(codegen), instr_(instr) {} | |
4754 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); } | |
4755 LInstruction* instr() override { return instr_; } | |
4756 | |
4757 private: | |
4758 LStringCharCodeAt* instr_; | |
4759 }; | |
4760 | |
4761 DeferredStringCharCodeAt* deferred = | |
4762 new (zone()) DeferredStringCharCodeAt(this, instr); | |
4763 | |
4764 StringCharLoadGenerator::Generate( | |
4765 masm(), ToRegister(instr->string()), ToRegister(instr->index()), | |
4766 ToRegister(instr->result()), deferred->entry()); | |
4767 __ bind(deferred->exit()); | |
4768 } | |
4769 | |
4770 | |
4771 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { | |
4772 Register string = ToRegister(instr->string()); | |
4773 Register result = ToRegister(instr->result()); | |
4774 Register scratch = scratch0(); | |
4775 | |
4776 // TODO(3095996): Get rid of this. For now, we need to make the | |
4777 // result register contain a valid pointer because it is already | |
4778 // contained in the register pointer map. | |
4779 __ li(result, Operand::Zero()); | |
4780 | |
4781 PushSafepointRegistersScope scope(this); | |
4782 __ push(string); | |
4783 // Push the index as a smi. This is safe because of the checks in | |
4784 // DoStringCharCodeAt above. | |
4785 if (instr->index()->IsConstantOperand()) { | |
4786 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); | |
4787 __ LoadSmiLiteral(scratch, Smi::FromInt(const_index)); | |
4788 __ push(scratch); | |
4789 } else { | |
4790 Register index = ToRegister(instr->index()); | |
4791 __ SmiTag(index); | |
4792 __ push(index); | |
4793 } | |
4794 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr, | |
4795 instr->context()); | |
4796 __ AssertSmi(r3); | |
4797 __ SmiUntag(r3); | |
4798 __ StoreToSafepointRegisterSlot(r3, result); | |
4799 } | |
4800 | |
4801 | |
4802 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { | |
4803 class DeferredStringCharFromCode final : public LDeferredCode { | |
4804 public: | |
4805 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) | |
4806 : LDeferredCode(codegen), instr_(instr) {} | |
4807 void Generate() override { | |
4808 codegen()->DoDeferredStringCharFromCode(instr_); | |
4809 } | |
4810 LInstruction* instr() override { return instr_; } | |
4811 | |
4812 private: | |
4813 LStringCharFromCode* instr_; | |
4814 }; | |
4815 | |
4816 DeferredStringCharFromCode* deferred = | |
4817 new (zone()) DeferredStringCharFromCode(this, instr); | |
4818 | |
4819 DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); | |
4820 Register char_code = ToRegister(instr->char_code()); | |
4821 Register result = ToRegister(instr->result()); | |
4822 DCHECK(!char_code.is(result)); | |
4823 | |
4824 __ cmpli(char_code, Operand(String::kMaxOneByteCharCode)); | |
4825 __ bgt(deferred->entry()); | |
4826 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); | |
4827 __ ShiftLeftImm(r0, char_code, Operand(kPointerSizeLog2)); | |
4828 __ add(result, result, r0); | |
4829 __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize)); | |
4830 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | |
4831 __ cmp(result, ip); | |
4832 __ beq(deferred->entry()); | |
4833 __ bind(deferred->exit()); | |
4834 } | |
4835 | |
4836 | |
4837 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { | |
4838 Register char_code = ToRegister(instr->char_code()); | |
4839 Register result = ToRegister(instr->result()); | |
4840 | |
4841 // TODO(3095996): Get rid of this. For now, we need to make the | |
4842 // result register contain a valid pointer because it is already | |
4843 // contained in the register pointer map. | |
4844 __ li(result, Operand::Zero()); | |
4845 | |
4846 PushSafepointRegistersScope scope(this); | |
4847 __ SmiTag(char_code); | |
4848 __ push(char_code); | |
4849 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); | |
4850 __ StoreToSafepointRegisterSlot(r3, result); | |
4851 } | |
4852 | |
4853 | |
4854 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { | |
4855 LOperand* input = instr->value(); | |
4856 DCHECK(input->IsRegister() || input->IsStackSlot()); | |
4857 LOperand* output = instr->result(); | |
4858 DCHECK(output->IsDoubleRegister()); | |
4859 if (input->IsStackSlot()) { | |
4860 Register scratch = scratch0(); | |
4861 __ LoadP(scratch, ToMemOperand(input)); | |
4862 __ ConvertIntToDouble(scratch, ToDoubleRegister(output)); | |
4863 } else { | |
4864 __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output)); | |
4865 } | |
4866 } | |
4867 | |
4868 | |
4869 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { | |
4870 LOperand* input = instr->value(); | |
4871 LOperand* output = instr->result(); | |
4872 __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output)); | |
4873 } | |
4874 | |
4875 | |
4876 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { | |
4877 class DeferredNumberTagI final : public LDeferredCode { | |
4878 public: | |
4879 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) | |
4880 : LDeferredCode(codegen), instr_(instr) {} | |
4881 void Generate() override { | |
4882 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), | |
4883 instr_->temp2(), SIGNED_INT32); | |
4884 } | |
4885 LInstruction* instr() override { return instr_; } | |
4886 | |
4887 private: | |
4888 LNumberTagI* instr_; | |
4889 }; | |
4890 | |
4891 Register src = ToRegister(instr->value()); | |
4892 Register dst = ToRegister(instr->result()); | |
4893 | |
4894 DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr); | |
4895 #if V8_TARGET_ARCH_PPC64 | |
4896 __ SmiTag(dst, src); | |
4897 #else | |
4898 __ SmiTagCheckOverflow(dst, src, r0); | |
4899 __ BranchOnOverflow(deferred->entry()); | |
4900 #endif | |
4901 __ bind(deferred->exit()); | |
4902 } | |
4903 | |
4904 | |
4905 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { | |
4906 class DeferredNumberTagU final : public LDeferredCode { | |
4907 public: | |
4908 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) | |
4909 : LDeferredCode(codegen), instr_(instr) {} | |
4910 void Generate() override { | |
4911 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), | |
4912 instr_->temp2(), UNSIGNED_INT32); | |
4913 } | |
4914 LInstruction* instr() override { return instr_; } | |
4915 | |
4916 private: | |
4917 LNumberTagU* instr_; | |
4918 }; | |
4919 | |
4920 Register input = ToRegister(instr->value()); | |
4921 Register result = ToRegister(instr->result()); | |
4922 | |
4923 DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr); | |
4924 __ Cmpli(input, Operand(Smi::kMaxValue), r0); | |
4925 __ bgt(deferred->entry()); | |
4926 __ SmiTag(result, input); | |
4927 __ bind(deferred->exit()); | |
4928 } | |
4929 | |
4930 | |
4931 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value, | |
4932 LOperand* temp1, LOperand* temp2, | |
4933 IntegerSignedness signedness) { | |
4934 Label done, slow; | |
4935 Register src = ToRegister(value); | |
4936 Register dst = ToRegister(instr->result()); | |
4937 Register tmp1 = scratch0(); | |
4938 Register tmp2 = ToRegister(temp1); | |
4939 Register tmp3 = ToRegister(temp2); | |
4940 DoubleRegister dbl_scratch = double_scratch0(); | |
4941 | |
4942 if (signedness == SIGNED_INT32) { | |
4943 // There was overflow, so bits 30 and 31 of the original integer | |
4944 // disagree. Try to allocate a heap number in new space and store | |
4945 // the value in there. If that fails, call the runtime system. | |
4946 if (dst.is(src)) { | |
4947 __ SmiUntag(src, dst); | |
4948 __ xoris(src, src, Operand(HeapNumber::kSignMask >> 16)); | |
4949 } | |
4950 __ ConvertIntToDouble(src, dbl_scratch); | |
4951 } else { | |
4952 __ ConvertUnsignedIntToDouble(src, dbl_scratch); | |
4953 } | |
4954 | |
4955 if (FLAG_inline_new) { | |
4956 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex); | |
4957 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow); | |
4958 __ b(&done); | |
4959 } | |
4960 | |
4961 // Slow case: Call the runtime system to do the number allocation. | |
4962 __ bind(&slow); | |
4963 { | |
4964 // TODO(3095996): Put a valid pointer value in the stack slot where the | |
4965 // result register is stored, as this register is in the pointer map, but | |
4966 // contains an integer value. | |
4967 __ li(dst, Operand::Zero()); | |
4968 | |
4969 // Preserve the value of all registers. | |
4970 PushSafepointRegistersScope scope(this); | |
4971 | |
4972 // NumberTagI and NumberTagD use the context from the frame, rather than | |
4973 // the environment's HContext or HInlinedContext value. | |
4974 // They only call Runtime::kAllocateHeapNumber. | |
4975 // The corresponding HChange instructions are added in a phase that does | |
4976 // not have easy access to the local context. | |
4977 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | |
4978 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); | |
4979 RecordSafepointWithRegisters(instr->pointer_map(), 0, | |
4980 Safepoint::kNoLazyDeopt); | |
4981 __ StoreToSafepointRegisterSlot(r3, dst); | |
4982 } | |
4983 | |
4984 // Done. Put the value in dbl_scratch into the value of the allocated heap | |
4985 // number. | |
4986 __ bind(&done); | |
4987 __ stfd(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset)); | |
4988 } | |
4989 | |
4990 | |
4991 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { | |
4992 class DeferredNumberTagD final : public LDeferredCode { | |
4993 public: | |
4994 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) | |
4995 : LDeferredCode(codegen), instr_(instr) {} | |
4996 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); } | |
4997 LInstruction* instr() override { return instr_; } | |
4998 | |
4999 private: | |
5000 LNumberTagD* instr_; | |
5001 }; | |
5002 | |
5003 DoubleRegister input_reg = ToDoubleRegister(instr->value()); | |
5004 Register scratch = scratch0(); | |
5005 Register reg = ToRegister(instr->result()); | |
5006 Register temp1 = ToRegister(instr->temp()); | |
5007 Register temp2 = ToRegister(instr->temp2()); | |
5008 | |
5009 DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr); | |
5010 if (FLAG_inline_new) { | |
5011 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); | |
5012 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry()); | |
5013 } else { | |
5014 __ b(deferred->entry()); | |
5015 } | |
5016 __ bind(deferred->exit()); | |
5017 __ stfd(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset)); | |
5018 } | |
5019 | |
5020 | |
5021 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { | |
5022 // TODO(3095996): Get rid of this. For now, we need to make the | |
5023 // result register contain a valid pointer because it is already | |
5024 // contained in the register pointer map. | |
5025 Register reg = ToRegister(instr->result()); | |
5026 __ li(reg, Operand::Zero()); | |
5027 | |
5028 PushSafepointRegistersScope scope(this); | |
5029 // NumberTagI and NumberTagD use the context from the frame, rather than | |
5030 // the environment's HContext or HInlinedContext value. | |
5031 // They only call Runtime::kAllocateHeapNumber. | |
5032 // The corresponding HChange instructions are added in a phase that does | |
5033 // not have easy access to the local context. | |
5034 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | |
5035 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); | |
5036 RecordSafepointWithRegisters(instr->pointer_map(), 0, | |
5037 Safepoint::kNoLazyDeopt); | |
5038 __ StoreToSafepointRegisterSlot(r3, reg); | |
5039 } | |
5040 | |
5041 | |
5042 void LCodeGen::DoSmiTag(LSmiTag* instr) { | |
5043 HChange* hchange = instr->hydrogen(); | |
5044 Register input = ToRegister(instr->value()); | |
5045 Register output = ToRegister(instr->result()); | |
5046 if (hchange->CheckFlag(HValue::kCanOverflow) && | |
5047 hchange->value()->CheckFlag(HValue::kUint32)) { | |
5048 __ TestUnsignedSmiCandidate(input, r0); | |
5049 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0); | |
5050 } | |
5051 #if !V8_TARGET_ARCH_PPC64 | |
5052 if (hchange->CheckFlag(HValue::kCanOverflow) && | |
5053 !hchange->value()->CheckFlag(HValue::kUint32)) { | |
5054 __ SmiTagCheckOverflow(output, input, r0); | |
5055 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); | |
5056 } else { | |
5057 #endif | |
5058 __ SmiTag(output, input); | |
5059 #if !V8_TARGET_ARCH_PPC64 | |
5060 } | |
5061 #endif | |
5062 } | |
5063 | |
5064 | |
5065 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { | |
5066 Register scratch = scratch0(); | |
5067 Register input = ToRegister(instr->value()); | |
5068 Register result = ToRegister(instr->result()); | |
5069 if (instr->needs_check()) { | |
5070 // If the input is a HeapObject, value of scratch won't be zero. | |
5071 __ andi(scratch, input, Operand(kHeapObjectTag)); | |
5072 __ SmiUntag(result, input); | |
5073 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0); | |
5074 } else { | |
5075 __ SmiUntag(result, input); | |
5076 } | |
5077 } | |
5078 | |
5079 | |
5080 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, | |
5081 DoubleRegister result_reg, | |
5082 NumberUntagDMode mode) { | |
5083 bool can_convert_undefined_to_nan = | |
5084 instr->hydrogen()->can_convert_undefined_to_nan(); | |
5085 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); | |
5086 | |
5087 Register scratch = scratch0(); | |
5088 DCHECK(!result_reg.is(double_scratch0())); | |
5089 | |
5090 Label convert, load_smi, done; | |
5091 | |
5092 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { | |
5093 // Smi check. | |
5094 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); | |
5095 | |
5096 // Heap number map check. | |
5097 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | |
5098 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | |
5099 __ cmp(scratch, ip); | |
5100 if (can_convert_undefined_to_nan) { | |
5101 __ bne(&convert); | |
5102 } else { | |
5103 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); | |
5104 } | |
5105 // load heap number | |
5106 __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | |
5107 if (deoptimize_on_minus_zero) { | |
5108 #if V8_TARGET_ARCH_PPC64 | |
5109 __ MovDoubleToInt64(scratch, result_reg); | |
5110 // rotate left by one for simple compare. | |
5111 __ rldicl(scratch, scratch, 1, 0); | |
5112 __ cmpi(scratch, Operand(1)); | |
5113 #else | |
5114 __ MovDoubleToInt64(scratch, ip, result_reg); | |
5115 __ cmpi(ip, Operand::Zero()); | |
5116 __ bne(&done); | |
5117 __ Cmpi(scratch, Operand(HeapNumber::kSignMask), r0); | |
5118 #endif | |
5119 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | |
5120 } | |
5121 __ b(&done); | |
5122 if (can_convert_undefined_to_nan) { | |
5123 __ bind(&convert); | |
5124 // Convert undefined (and hole) to NaN. | |
5125 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | |
5126 __ cmp(input_reg, ip); | |
5127 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined); | |
5128 __ LoadRoot(scratch, Heap::kNanValueRootIndex); | |
5129 __ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); | |
5130 __ b(&done); | |
5131 } | |
5132 } else { | |
5133 __ SmiUntag(scratch, input_reg); | |
5134 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); | |
5135 } | |
5136 // Smi to double register conversion | |
5137 __ bind(&load_smi); | |
5138 // scratch: untagged value of input_reg | |
5139 __ ConvertIntToDouble(scratch, result_reg); | |
5140 __ bind(&done); | |
5141 } | |
5142 | |
5143 | |
5144 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { | |
5145 Register input_reg = ToRegister(instr->value()); | |
5146 Register scratch1 = scratch0(); | |
5147 Register scratch2 = ToRegister(instr->temp()); | |
5148 DoubleRegister double_scratch = double_scratch0(); | |
5149 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2()); | |
5150 | |
5151 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2)); | |
5152 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1)); | |
5153 | |
5154 Label done; | |
5155 | |
5156 // Heap number map check. | |
5157 __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | |
5158 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | |
5159 __ cmp(scratch1, ip); | |
5160 | |
5161 if (instr->truncating()) { | |
5162 // Performs a truncating conversion of a floating point number as used by | |
5163 // the JS bitwise operations. | |
5164 Label no_heap_number, check_bools, check_false; | |
5165 __ bne(&no_heap_number); | |
5166 __ mr(scratch2, input_reg); | |
5167 __ TruncateHeapNumberToI(input_reg, scratch2); | |
5168 __ b(&done); | |
5169 | |
5170 // Check for Oddballs. Undefined/False is converted to zero and True to one | |
5171 // for truncating conversions. | |
5172 __ bind(&no_heap_number); | |
5173 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | |
5174 __ cmp(input_reg, ip); | |
5175 __ bne(&check_bools); | |
5176 __ li(input_reg, Operand::Zero()); | |
5177 __ b(&done); | |
5178 | |
5179 __ bind(&check_bools); | |
5180 __ LoadRoot(ip, Heap::kTrueValueRootIndex); | |
5181 __ cmp(input_reg, ip); | |
5182 __ bne(&check_false); | |
5183 __ li(input_reg, Operand(1)); | |
5184 __ b(&done); | |
5185 | |
5186 __ bind(&check_false); | |
5187 __ LoadRoot(ip, Heap::kFalseValueRootIndex); | |
5188 __ cmp(input_reg, ip); | |
5189 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean); | |
5190 __ li(input_reg, Operand::Zero()); | |
5191 } else { | |
5192 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); | |
5193 | |
5194 __ lfd(double_scratch2, | |
5195 FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | |
5196 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
5197 // preserve heap number pointer in scratch2 for minus zero check below | |
5198 __ mr(scratch2, input_reg); | |
5199 } | |
5200 __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1, | |
5201 double_scratch); | |
5202 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); | |
5203 | |
5204 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
5205 __ cmpi(input_reg, Operand::Zero()); | |
5206 __ bne(&done); | |
5207 __ lwz(scratch1, | |
5208 FieldMemOperand(scratch2, HeapNumber::kValueOffset + | |
5209 Register::kExponentOffset)); | |
5210 __ cmpwi(scratch1, Operand::Zero()); | |
5211 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); | |
5212 } | |
5213 } | |
5214 __ bind(&done); | |
5215 } | |
5216 | |
5217 | |
5218 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | |
5219 class DeferredTaggedToI final : public LDeferredCode { | |
5220 public: | |
5221 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | |
5222 : LDeferredCode(codegen), instr_(instr) {} | |
5223 void Generate() override { codegen()->DoDeferredTaggedToI(instr_); } | |
5224 LInstruction* instr() override { return instr_; } | |
5225 | |
5226 private: | |
5227 LTaggedToI* instr_; | |
5228 }; | |
5229 | |
5230 LOperand* input = instr->value(); | |
5231 DCHECK(input->IsRegister()); | |
5232 DCHECK(input->Equals(instr->result())); | |
5233 | |
5234 Register input_reg = ToRegister(input); | |
5235 | |
5236 if (instr->hydrogen()->value()->representation().IsSmi()) { | |
5237 __ SmiUntag(input_reg); | |
5238 } else { | |
5239 DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr); | |
5240 | |
5241 // Branch to deferred code if the input is a HeapObject. | |
5242 __ JumpIfNotSmi(input_reg, deferred->entry()); | |
5243 | |
5244 __ SmiUntag(input_reg); | |
5245 __ bind(deferred->exit()); | |
5246 } | |
5247 } | |
5248 | |
5249 | |
5250 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { | |
5251 LOperand* input = instr->value(); | |
5252 DCHECK(input->IsRegister()); | |
5253 LOperand* result = instr->result(); | |
5254 DCHECK(result->IsDoubleRegister()); | |
5255 | |
5256 Register input_reg = ToRegister(input); | |
5257 DoubleRegister result_reg = ToDoubleRegister(result); | |
5258 | |
5259 HValue* value = instr->hydrogen()->value(); | |
5260 NumberUntagDMode mode = value->representation().IsSmi() | |
5261 ? NUMBER_CANDIDATE_IS_SMI | |
5262 : NUMBER_CANDIDATE_IS_ANY_TAGGED; | |
5263 | |
5264 EmitNumberUntagD(instr, input_reg, result_reg, mode); | |
5265 } | |
5266 | |
5267 | |
5268 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { | |
5269 Register result_reg = ToRegister(instr->result()); | |
5270 Register scratch1 = scratch0(); | |
5271 DoubleRegister double_input = ToDoubleRegister(instr->value()); | |
5272 DoubleRegister double_scratch = double_scratch0(); | |
5273 | |
5274 if (instr->truncating()) { | |
5275 __ TruncateDoubleToI(result_reg, double_input); | |
5276 } else { | |
5277 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1, | |
5278 double_scratch); | |
5279 // Deoptimize if the input wasn't a int32 (inside a double). | |
5280 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); | |
5281 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
5282 Label done; | |
5283 __ cmpi(result_reg, Operand::Zero()); | |
5284 __ bne(&done); | |
5285 #if V8_TARGET_ARCH_PPC64 | |
5286 __ MovDoubleToInt64(scratch1, double_input); | |
5287 #else | |
5288 __ MovDoubleHighToInt(scratch1, double_input); | |
5289 #endif | |
5290 __ cmpi(scratch1, Operand::Zero()); | |
5291 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); | |
5292 __ bind(&done); | |
5293 } | |
5294 } | |
5295 } | |
5296 | |
5297 | |
5298 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { | |
5299 Register result_reg = ToRegister(instr->result()); | |
5300 Register scratch1 = scratch0(); | |
5301 DoubleRegister double_input = ToDoubleRegister(instr->value()); | |
5302 DoubleRegister double_scratch = double_scratch0(); | |
5303 | |
5304 if (instr->truncating()) { | |
5305 __ TruncateDoubleToI(result_reg, double_input); | |
5306 } else { | |
5307 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1, | |
5308 double_scratch); | |
5309 // Deoptimize if the input wasn't a int32 (inside a double). | |
5310 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); | |
5311 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
5312 Label done; | |
5313 __ cmpi(result_reg, Operand::Zero()); | |
5314 __ bne(&done); | |
5315 #if V8_TARGET_ARCH_PPC64 | |
5316 __ MovDoubleToInt64(scratch1, double_input); | |
5317 #else | |
5318 __ MovDoubleHighToInt(scratch1, double_input); | |
5319 #endif | |
5320 __ cmpi(scratch1, Operand::Zero()); | |
5321 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); | |
5322 __ bind(&done); | |
5323 } | |
5324 } | |
5325 #if V8_TARGET_ARCH_PPC64 | |
5326 __ SmiTag(result_reg); | |
5327 #else | |
5328 __ SmiTagCheckOverflow(result_reg, r0); | |
5329 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); | |
5330 #endif | |
5331 } | |
5332 | |
5333 | |
5334 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { | |
5335 LOperand* input = instr->value(); | |
5336 __ TestIfSmi(ToRegister(input), r0); | |
5337 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0); | |
5338 } | |
5339 | |
5340 | |
5341 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { | |
5342 if (!instr->hydrogen()->value()->type().IsHeapObject()) { | |
5343 LOperand* input = instr->value(); | |
5344 __ TestIfSmi(ToRegister(input), r0); | |
5345 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); | |
5346 } | |
5347 } | |
5348 | |
5349 | |
5350 void LCodeGen::DoCheckArrayBufferNotNeutered( | |
5351 LCheckArrayBufferNotNeutered* instr) { | |
5352 Register view = ToRegister(instr->view()); | |
5353 Register scratch = scratch0(); | |
5354 | |
5355 __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset)); | |
5356 __ lwz(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset)); | |
5357 __ andi(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift)); | |
5358 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, cr0); | |
5359 } | |
5360 | |
5361 | |
5362 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { | |
5363 Register input = ToRegister(instr->value()); | |
5364 Register scratch = scratch0(); | |
5365 | |
5366 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | |
5367 __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | |
5368 | |
5369 if (instr->hydrogen()->is_interval_check()) { | |
5370 InstanceType first; | |
5371 InstanceType last; | |
5372 instr->hydrogen()->GetCheckInterval(&first, &last); | |
5373 | |
5374 __ cmpli(scratch, Operand(first)); | |
5375 | |
5376 // If there is only one type in the interval check for equality. | |
5377 if (first == last) { | |
5378 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); | |
5379 } else { | |
5380 DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType); | |
5381 // Omit check for the last type. | |
5382 if (last != LAST_TYPE) { | |
5383 __ cmpli(scratch, Operand(last)); | |
5384 DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType); | |
5385 } | |
5386 } | |
5387 } else { | |
5388 uint8_t mask; | |
5389 uint8_t tag; | |
5390 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); | |
5391 | |
5392 if (base::bits::IsPowerOfTwo32(mask)) { | |
5393 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); | |
5394 __ andi(r0, scratch, Operand(mask)); | |
5395 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType, | |
5396 cr0); | |
5397 } else { | |
5398 __ andi(scratch, scratch, Operand(mask)); | |
5399 __ cmpi(scratch, Operand(tag)); | |
5400 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); | |
5401 } | |
5402 } | |
5403 } | |
5404 | |
5405 | |
5406 void LCodeGen::DoCheckValue(LCheckValue* instr) { | |
5407 Register reg = ToRegister(instr->value()); | |
5408 Handle<HeapObject> object = instr->hydrogen()->object().handle(); | |
5409 AllowDeferredHandleDereference smi_check; | |
5410 if (isolate()->heap()->InNewSpace(*object)) { | |
5411 Register reg = ToRegister(instr->value()); | |
5412 Handle<Cell> cell = isolate()->factory()->NewCell(object); | |
5413 __ mov(ip, Operand(cell)); | |
5414 __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset)); | |
5415 __ cmp(reg, ip); | |
5416 } else { | |
5417 __ Cmpi(reg, Operand(object), r0); | |
5418 } | |
5419 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch); | |
5420 } | |
5421 | |
5422 | |
5423 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { | |
5424 Register temp = ToRegister(instr->temp()); | |
5425 { | |
5426 PushSafepointRegistersScope scope(this); | |
5427 __ push(object); | |
5428 __ li(cp, Operand::Zero()); | |
5429 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); | |
5430 RecordSafepointWithRegisters(instr->pointer_map(), 1, | |
5431 Safepoint::kNoLazyDeopt); | |
5432 __ StoreToSafepointRegisterSlot(r3, temp); | |
5433 } | |
5434 __ TestIfSmi(temp, r0); | |
5435 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0); | |
5436 } | |
5437 | |
5438 | |
5439 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | |
5440 class DeferredCheckMaps final : public LDeferredCode { | |
5441 public: | |
5442 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) | |
5443 : LDeferredCode(codegen), instr_(instr), object_(object) { | |
5444 SetExit(check_maps()); | |
5445 } | |
5446 void Generate() override { | |
5447 codegen()->DoDeferredInstanceMigration(instr_, object_); | |
5448 } | |
5449 Label* check_maps() { return &check_maps_; } | |
5450 LInstruction* instr() override { return instr_; } | |
5451 | |
5452 private: | |
5453 LCheckMaps* instr_; | |
5454 Label check_maps_; | |
5455 Register object_; | |
5456 }; | |
5457 | |
5458 if (instr->hydrogen()->IsStabilityCheck()) { | |
5459 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); | |
5460 for (int i = 0; i < maps->size(); ++i) { | |
5461 AddStabilityDependency(maps->at(i).handle()); | |
5462 } | |
5463 return; | |
5464 } | |
5465 | |
5466 Register object = ToRegister(instr->value()); | |
5467 Register map_reg = ToRegister(instr->temp()); | |
5468 | |
5469 __ LoadP(map_reg, FieldMemOperand(object, HeapObject::kMapOffset)); | |
5470 | |
5471 DeferredCheckMaps* deferred = NULL; | |
5472 if (instr->hydrogen()->HasMigrationTarget()) { | |
5473 deferred = new (zone()) DeferredCheckMaps(this, instr, object); | |
5474 __ bind(deferred->check_maps()); | |
5475 } | |
5476 | |
5477 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); | |
5478 Label success; | |
5479 for (int i = 0; i < maps->size() - 1; i++) { | |
5480 Handle<Map> map = maps->at(i).handle(); | |
5481 __ CompareMap(map_reg, map, &success); | |
5482 __ beq(&success); | |
5483 } | |
5484 | |
5485 Handle<Map> map = maps->at(maps->size() - 1).handle(); | |
5486 __ CompareMap(map_reg, map, &success); | |
5487 if (instr->hydrogen()->HasMigrationTarget()) { | |
5488 __ bne(deferred->entry()); | |
5489 } else { | |
5490 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); | |
5491 } | |
5492 | |
5493 __ bind(&success); | |
5494 } | |
5495 | |
5496 | |
5497 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | |
5498 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); | |
5499 Register result_reg = ToRegister(instr->result()); | |
5500 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); | |
5501 } | |
5502 | |
5503 | |
5504 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { | |
5505 Register unclamped_reg = ToRegister(instr->unclamped()); | |
5506 Register result_reg = ToRegister(instr->result()); | |
5507 __ ClampUint8(result_reg, unclamped_reg); | |
5508 } | |
5509 | |
5510 | |
5511 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { | |
5512 Register scratch = scratch0(); | |
5513 Register input_reg = ToRegister(instr->unclamped()); | |
5514 Register result_reg = ToRegister(instr->result()); | |
5515 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); | |
5516 Label is_smi, done, heap_number; | |
5517 | |
5518 // Both smi and heap number cases are handled. | |
5519 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); | |
5520 | |
5521 // Check for heap number | |
5522 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | |
5523 __ Cmpi(scratch, Operand(factory()->heap_number_map()), r0); | |
5524 __ beq(&heap_number); | |
5525 | |
5526 // Check for undefined. Undefined is converted to zero for clamping | |
5527 // conversions. | |
5528 __ Cmpi(input_reg, Operand(factory()->undefined_value()), r0); | |
5529 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined); | |
5530 __ li(result_reg, Operand::Zero()); | |
5531 __ b(&done); | |
5532 | |
5533 // Heap number | |
5534 __ bind(&heap_number); | |
5535 __ lfd(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | |
5536 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); | |
5537 __ b(&done); | |
5538 | |
5539 // smi | |
5540 __ bind(&is_smi); | |
5541 __ ClampUint8(result_reg, result_reg); | |
5542 | |
5543 __ bind(&done); | |
5544 } | |
5545 | |
5546 | |
5547 void LCodeGen::DoDoubleBits(LDoubleBits* instr) { | |
5548 DoubleRegister value_reg = ToDoubleRegister(instr->value()); | |
5549 Register result_reg = ToRegister(instr->result()); | |
5550 | |
5551 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { | |
5552 __ MovDoubleHighToInt(result_reg, value_reg); | |
5553 } else { | |
5554 __ MovDoubleLowToInt(result_reg, value_reg); | |
5555 } | |
5556 } | |
5557 | |
5558 | |
5559 void LCodeGen::DoConstructDouble(LConstructDouble* instr) { | |
5560 Register hi_reg = ToRegister(instr->hi()); | |
5561 Register lo_reg = ToRegister(instr->lo()); | |
5562 DoubleRegister result_reg = ToDoubleRegister(instr->result()); | |
5563 #if V8_TARGET_ARCH_PPC64 | |
5564 __ MovInt64ComponentsToDouble(result_reg, hi_reg, lo_reg, r0); | |
5565 #else | |
5566 __ MovInt64ToDouble(result_reg, hi_reg, lo_reg); | |
5567 #endif | |
5568 } | |
5569 | |
5570 | |
5571 void LCodeGen::DoAllocate(LAllocate* instr) { | |
5572 class DeferredAllocate final : public LDeferredCode { | |
5573 public: | |
5574 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) | |
5575 : LDeferredCode(codegen), instr_(instr) {} | |
5576 void Generate() override { codegen()->DoDeferredAllocate(instr_); } | |
5577 LInstruction* instr() override { return instr_; } | |
5578 | |
5579 private: | |
5580 LAllocate* instr_; | |
5581 }; | |
5582 | |
5583 DeferredAllocate* deferred = new (zone()) DeferredAllocate(this, instr); | |
5584 | |
5585 Register result = ToRegister(instr->result()); | |
5586 Register scratch = ToRegister(instr->temp1()); | |
5587 Register scratch2 = ToRegister(instr->temp2()); | |
5588 | |
5589 // Allocate memory for the object. | |
5590 AllocationFlags flags = TAG_OBJECT; | |
5591 if (instr->hydrogen()->MustAllocateDoubleAligned()) { | |
5592 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); | |
5593 } | |
5594 if (instr->hydrogen()->IsOldSpaceAllocation()) { | |
5595 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); | |
5596 flags = static_cast<AllocationFlags>(flags | PRETENURE); | |
5597 } | |
5598 | |
5599 if (instr->size()->IsConstantOperand()) { | |
5600 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); | |
5601 CHECK(size <= Page::kMaxRegularHeapObjectSize); | |
5602 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); | |
5603 } else { | |
5604 Register size = ToRegister(instr->size()); | |
5605 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); | |
5606 } | |
5607 | |
5608 __ bind(deferred->exit()); | |
5609 | |
5610 if (instr->hydrogen()->MustPrefillWithFiller()) { | |
5611 if (instr->size()->IsConstantOperand()) { | |
5612 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); | |
5613 __ LoadIntLiteral(scratch, size - kHeapObjectTag); | |
5614 } else { | |
5615 __ subi(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag)); | |
5616 } | |
5617 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); | |
5618 Label loop; | |
5619 __ bind(&loop); | |
5620 __ subi(scratch, scratch, Operand(kPointerSize)); | |
5621 __ StorePX(scratch2, MemOperand(result, scratch)); | |
5622 __ cmpi(scratch, Operand::Zero()); | |
5623 __ bge(&loop); | |
5624 } | |
5625 } | |
5626 | |
5627 | |
5628 void LCodeGen::DoDeferredAllocate(LAllocate* instr) { | |
5629 Register result = ToRegister(instr->result()); | |
5630 | |
5631 // TODO(3095996): Get rid of this. For now, we need to make the | |
5632 // result register contain a valid pointer because it is already | |
5633 // contained in the register pointer map. | |
5634 __ LoadSmiLiteral(result, Smi::FromInt(0)); | |
5635 | |
5636 PushSafepointRegistersScope scope(this); | |
5637 if (instr->size()->IsRegister()) { | |
5638 Register size = ToRegister(instr->size()); | |
5639 DCHECK(!size.is(result)); | |
5640 __ SmiTag(size); | |
5641 __ push(size); | |
5642 } else { | |
5643 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); | |
5644 #if !V8_TARGET_ARCH_PPC64 | |
5645 if (size >= 0 && size <= Smi::kMaxValue) { | |
5646 #endif | |
5647 __ Push(Smi::FromInt(size)); | |
5648 #if !V8_TARGET_ARCH_PPC64 | |
5649 } else { | |
5650 // We should never get here at runtime => abort | |
5651 __ stop("invalid allocation size"); | |
5652 return; | |
5653 } | |
5654 #endif | |
5655 } | |
5656 | |
5657 int flags = AllocateDoubleAlignFlag::encode( | |
5658 instr->hydrogen()->MustAllocateDoubleAligned()); | |
5659 if (instr->hydrogen()->IsOldSpaceAllocation()) { | |
5660 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); | |
5661 flags = AllocateTargetSpace::update(flags, OLD_SPACE); | |
5662 } else { | |
5663 flags = AllocateTargetSpace::update(flags, NEW_SPACE); | |
5664 } | |
5665 __ Push(Smi::FromInt(flags)); | |
5666 | |
5667 CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr, | |
5668 instr->context()); | |
5669 __ StoreToSafepointRegisterSlot(r3, result); | |
5670 } | |
5671 | |
5672 | |
5673 void LCodeGen::DoToFastProperties(LToFastProperties* instr) { | |
5674 DCHECK(ToRegister(instr->value()).is(r3)); | |
5675 __ push(r3); | |
5676 CallRuntime(Runtime::kToFastProperties, 1, instr); | |
5677 } | |
5678 | |
5679 | |
5680 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { | |
5681 DCHECK(ToRegister(instr->context()).is(cp)); | |
5682 Label materialized; | |
5683 // Registers will be used as follows: | |
5684 // r10 = literals array. | |
5685 // r4 = regexp literal. | |
5686 // r3 = regexp literal clone. | |
5687 // r5 and r7-r9 are used as temporaries. | |
5688 int literal_offset = | |
5689 LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index()); | |
5690 __ Move(r10, instr->hydrogen()->literals()); | |
5691 __ LoadP(r4, FieldMemOperand(r10, literal_offset)); | |
5692 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | |
5693 __ cmp(r4, ip); | |
5694 __ bne(&materialized); | |
5695 | |
5696 // Create regexp literal using runtime function | |
5697 // Result will be in r3. | |
5698 __ LoadSmiLiteral(r9, Smi::FromInt(instr->hydrogen()->literal_index())); | |
5699 __ mov(r8, Operand(instr->hydrogen()->pattern())); | |
5700 __ mov(r7, Operand(instr->hydrogen()->flags())); | |
5701 __ Push(r10, r9, r8, r7); | |
5702 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); | |
5703 __ mr(r4, r3); | |
5704 | |
5705 __ bind(&materialized); | |
5706 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; | |
5707 Label allocated, runtime_allocate; | |
5708 | |
5709 __ Allocate(size, r3, r5, r6, &runtime_allocate, TAG_OBJECT); | |
5710 __ b(&allocated); | |
5711 | |
5712 __ bind(&runtime_allocate); | |
5713 __ LoadSmiLiteral(r3, Smi::FromInt(size)); | |
5714 __ Push(r4, r3); | |
5715 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); | |
5716 __ pop(r4); | |
5717 | |
5718 __ bind(&allocated); | |
5719 // Copy the content into the newly allocated memory. | |
5720 __ CopyFields(r3, r4, r5.bit(), size / kPointerSize); | |
5721 } | |
5722 | |
5723 | |
5724 void LCodeGen::DoTypeof(LTypeof* instr) { | |
5725 DCHECK(ToRegister(instr->value()).is(r6)); | |
5726 DCHECK(ToRegister(instr->result()).is(r3)); | |
5727 Label end, do_call; | |
5728 Register value_register = ToRegister(instr->value()); | |
5729 __ JumpIfNotSmi(value_register, &do_call); | |
5730 __ mov(r3, Operand(isolate()->factory()->number_string())); | |
5731 __ b(&end); | |
5732 __ bind(&do_call); | |
5733 TypeofStub stub(isolate()); | |
5734 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | |
5735 __ bind(&end); | |
5736 } | |
5737 | |
5738 | |
5739 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { | |
5740 Register input = ToRegister(instr->value()); | |
5741 | |
5742 Condition final_branch_condition = | |
5743 EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input, | |
5744 instr->type_literal()); | |
5745 if (final_branch_condition != kNoCondition) { | |
5746 EmitBranch(instr, final_branch_condition); | |
5747 } | |
5748 } | |
5749 | |
5750 | |
5751 Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label, | |
5752 Register input, Handle<String> type_name) { | |
5753 Condition final_branch_condition = kNoCondition; | |
5754 Register scratch = scratch0(); | |
5755 Factory* factory = isolate()->factory(); | |
5756 if (String::Equals(type_name, factory->number_string())) { | |
5757 __ JumpIfSmi(input, true_label); | |
5758 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | |
5759 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); | |
5760 final_branch_condition = eq; | |
5761 | |
5762 } else if (String::Equals(type_name, factory->string_string())) { | |
5763 __ JumpIfSmi(input, false_label); | |
5764 __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE); | |
5765 final_branch_condition = lt; | |
5766 | |
5767 } else if (String::Equals(type_name, factory->symbol_string())) { | |
5768 __ JumpIfSmi(input, false_label); | |
5769 __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE); | |
5770 final_branch_condition = eq; | |
5771 | |
5772 } else if (String::Equals(type_name, factory->boolean_string())) { | |
5773 __ CompareRoot(input, Heap::kTrueValueRootIndex); | |
5774 __ beq(true_label); | |
5775 __ CompareRoot(input, Heap::kFalseValueRootIndex); | |
5776 final_branch_condition = eq; | |
5777 | |
5778 } else if (String::Equals(type_name, factory->undefined_string())) { | |
5779 __ CompareRoot(input, Heap::kUndefinedValueRootIndex); | |
5780 __ beq(true_label); | |
5781 __ JumpIfSmi(input, false_label); | |
5782 // Check for undetectable objects => true. | |
5783 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | |
5784 __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); | |
5785 __ ExtractBit(r0, scratch, Map::kIsUndetectable); | |
5786 __ cmpi(r0, Operand::Zero()); | |
5787 final_branch_condition = ne; | |
5788 | |
5789 } else if (String::Equals(type_name, factory->function_string())) { | |
5790 __ JumpIfSmi(input, false_label); | |
5791 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | |
5792 __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); | |
5793 __ andi(scratch, scratch, | |
5794 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); | |
5795 __ cmpi(scratch, Operand(1 << Map::kIsCallable)); | |
5796 final_branch_condition = eq; | |
5797 | |
5798 } else if (String::Equals(type_name, factory->object_string())) { | |
5799 __ JumpIfSmi(input, false_label); | |
5800 __ CompareRoot(input, Heap::kNullValueRootIndex); | |
5801 __ beq(true_label); | |
5802 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); | |
5803 __ CompareObjectType(input, scratch, ip, FIRST_SPEC_OBJECT_TYPE); | |
5804 __ blt(false_label); | |
5805 // Check for callable or undetectable objects => false. | |
5806 __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); | |
5807 __ andi(r0, scratch, | |
5808 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); | |
5809 __ cmpi(r0, Operand::Zero()); | |
5810 final_branch_condition = eq; | |
5811 | |
5812 // clang-format off | |
5813 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \ | |
5814 } else if (String::Equals(type_name, factory->type##_string())) { \ | |
5815 __ JumpIfSmi(input, false_label); \ | |
5816 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \ | |
5817 __ CompareRoot(scratch, Heap::k##Type##MapRootIndex); \ | |
5818 final_branch_condition = eq; | |
5819 SIMD128_TYPES(SIMD128_TYPE) | |
5820 #undef SIMD128_TYPE | |
5821 // clang-format on | |
5822 | |
5823 } else { | |
5824 __ b(false_label); | |
5825 } | |
5826 | |
5827 return final_branch_condition; | |
5828 } | |
5829 | |
5830 | |
5831 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { | |
5832 Register temp1 = ToRegister(instr->temp()); | |
5833 | |
5834 EmitIsConstructCall(temp1, scratch0()); | |
5835 EmitBranch(instr, eq); | |
5836 } | |
5837 | |
5838 | |
5839 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { | |
5840 DCHECK(!temp1.is(temp2)); | |
5841 // Get the frame pointer for the calling frame. | |
5842 __ LoadP(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
5843 | |
5844 // Skip the arguments adaptor frame if it exists. | |
5845 Label check_frame_marker; | |
5846 __ LoadP(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset)); | |
5847 __ CmpSmiLiteral(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); | |
5848 __ bne(&check_frame_marker); | |
5849 __ LoadP(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset)); | |
5850 | |
5851 // Check the marker in the calling frame. | |
5852 __ bind(&check_frame_marker); | |
5853 __ LoadP(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); | |
5854 __ CmpSmiLiteral(temp1, Smi::FromInt(StackFrame::CONSTRUCT), r0); | |
5855 } | |
5856 | |
5857 | |
5858 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { | |
5859 if (info()->ShouldEnsureSpaceForLazyDeopt()) { | |
5860 // Ensure that we have enough space after the previous lazy-bailout | |
5861 // instruction for patching the code here. | |
5862 int current_pc = masm()->pc_offset(); | |
5863 if (current_pc < last_lazy_deopt_pc_ + space_needed) { | |
5864 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; | |
5865 DCHECK_EQ(0, padding_size % Assembler::kInstrSize); | |
5866 while (padding_size > 0) { | |
5867 __ nop(); | |
5868 padding_size -= Assembler::kInstrSize; | |
5869 } | |
5870 } | |
5871 } | |
5872 last_lazy_deopt_pc_ = masm()->pc_offset(); | |
5873 } | |
5874 | |
5875 | |
5876 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { | |
5877 last_lazy_deopt_pc_ = masm()->pc_offset(); | |
5878 DCHECK(instr->HasEnvironment()); | |
5879 LEnvironment* env = instr->environment(); | |
5880 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); | |
5881 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); | |
5882 } | |
5883 | |
5884 | |
5885 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { | |
5886 Deoptimizer::BailoutType type = instr->hydrogen()->type(); | |
5887 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the | |
5888 // needed return address), even though the implementation of LAZY and EAGER is | |
5889 // now identical. When LAZY is eventually completely folded into EAGER, remove | |
5890 // the special case below. | |
5891 if (info()->IsStub() && type == Deoptimizer::EAGER) { | |
5892 type = Deoptimizer::LAZY; | |
5893 } | |
5894 | |
5895 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type); | |
5896 } | |
5897 | |
5898 | |
5899 void LCodeGen::DoDummy(LDummy* instr) { | |
5900 // Nothing to see here, move on! | |
5901 } | |
5902 | |
5903 | |
5904 void LCodeGen::DoDummyUse(LDummyUse* instr) { | |
5905 // Nothing to see here, move on! | |
5906 } | |
5907 | |
5908 | |
5909 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { | |
5910 PushSafepointRegistersScope scope(this); | |
5911 LoadContextFromDeferred(instr->context()); | |
5912 __ CallRuntimeSaveDoubles(Runtime::kStackGuard); | |
5913 RecordSafepointWithLazyDeopt( | |
5914 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | |
5915 DCHECK(instr->HasEnvironment()); | |
5916 LEnvironment* env = instr->environment(); | |
5917 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); | |
5918 } | |
5919 | |
5920 | |
5921 void LCodeGen::DoStackCheck(LStackCheck* instr) { | |
5922 class DeferredStackCheck final : public LDeferredCode { | |
5923 public: | |
5924 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) | |
5925 : LDeferredCode(codegen), instr_(instr) {} | |
5926 void Generate() override { codegen()->DoDeferredStackCheck(instr_); } | |
5927 LInstruction* instr() override { return instr_; } | |
5928 | |
5929 private: | |
5930 LStackCheck* instr_; | |
5931 }; | |
5932 | |
5933 DCHECK(instr->HasEnvironment()); | |
5934 LEnvironment* env = instr->environment(); | |
5935 // There is no LLazyBailout instruction for stack-checks. We have to | |
5936 // prepare for lazy deoptimization explicitly here. | |
5937 if (instr->hydrogen()->is_function_entry()) { | |
5938 // Perform stack overflow check. | |
5939 Label done; | |
5940 __ LoadRoot(ip, Heap::kStackLimitRootIndex); | |
5941 __ cmpl(sp, ip); | |
5942 __ bge(&done); | |
5943 DCHECK(instr->context()->IsRegister()); | |
5944 DCHECK(ToRegister(instr->context()).is(cp)); | |
5945 CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET, | |
5946 instr); | |
5947 __ bind(&done); | |
5948 } else { | |
5949 DCHECK(instr->hydrogen()->is_backwards_branch()); | |
5950 // Perform stack overflow check if this goto needs it before jumping. | |
5951 DeferredStackCheck* deferred_stack_check = | |
5952 new (zone()) DeferredStackCheck(this, instr); | |
5953 __ LoadRoot(ip, Heap::kStackLimitRootIndex); | |
5954 __ cmpl(sp, ip); | |
5955 __ blt(deferred_stack_check->entry()); | |
5956 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); | |
5957 __ bind(instr->done_label()); | |
5958 deferred_stack_check->SetExit(instr->done_label()); | |
5959 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); | |
5960 // Don't record a deoptimization index for the safepoint here. | |
5961 // This will be done explicitly when emitting call and the safepoint in | |
5962 // the deferred code. | |
5963 } | |
5964 } | |
5965 | |
5966 | |
5967 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { | |
5968 // This is a pseudo-instruction that ensures that the environment here is | |
5969 // properly registered for deoptimization and records the assembler's PC | |
5970 // offset. | |
5971 LEnvironment* environment = instr->environment(); | |
5972 | |
5973 // If the environment were already registered, we would have no way of | |
5974 // backpatching it with the spill slot operands. | |
5975 DCHECK(!environment->HasBeenRegistered()); | |
5976 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | |
5977 | |
5978 GenerateOsrPrologue(); | |
5979 } | |
5980 | |
5981 | |
5982 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { | |
5983 __ TestIfSmi(r3, r0); | |
5984 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); | |
5985 | |
5986 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); | |
5987 __ CompareObjectType(r3, r4, r4, LAST_JS_PROXY_TYPE); | |
5988 DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType); | |
5989 | |
5990 Label use_cache, call_runtime; | |
5991 Register null_value = r8; | |
5992 __ LoadRoot(null_value, Heap::kNullValueRootIndex); | |
5993 __ CheckEnumCache(null_value, &call_runtime); | |
5994 | |
5995 __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset)); | |
5996 __ b(&use_cache); | |
5997 | |
5998 // Get the set of properties to enumerate. | |
5999 __ bind(&call_runtime); | |
6000 __ push(r3); | |
6001 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); | |
6002 | |
6003 __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset)); | |
6004 __ LoadRoot(ip, Heap::kMetaMapRootIndex); | |
6005 __ cmp(r4, ip); | |
6006 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); | |
6007 __ bind(&use_cache); | |
6008 } | |
6009 | |
6010 | |
6011 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { | |
6012 Register map = ToRegister(instr->map()); | |
6013 Register result = ToRegister(instr->result()); | |
6014 Label load_cache, done; | |
6015 __ EnumLength(result, map); | |
6016 __ CmpSmiLiteral(result, Smi::FromInt(0), r0); | |
6017 __ bne(&load_cache); | |
6018 __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); | |
6019 __ b(&done); | |
6020 | |
6021 __ bind(&load_cache); | |
6022 __ LoadInstanceDescriptors(map, result); | |
6023 __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); | |
6024 __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); | |
6025 __ cmpi(result, Operand::Zero()); | |
6026 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache); | |
6027 | |
6028 __ bind(&done); | |
6029 } | |
6030 | |
6031 | |
6032 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { | |
6033 Register object = ToRegister(instr->value()); | |
6034 Register map = ToRegister(instr->map()); | |
6035 __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); | |
6036 __ cmp(map, scratch0()); | |
6037 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); | |
6038 } | |
6039 | |
6040 | |
6041 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, | |
6042 Register result, Register object, | |
6043 Register index) { | |
6044 PushSafepointRegistersScope scope(this); | |
6045 __ Push(object, index); | |
6046 __ li(cp, Operand::Zero()); | |
6047 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); | |
6048 RecordSafepointWithRegisters(instr->pointer_map(), 2, | |
6049 Safepoint::kNoLazyDeopt); | |
6050 __ StoreToSafepointRegisterSlot(r3, result); | |
6051 } | |
6052 | |
6053 | |
6054 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { | |
6055 class DeferredLoadMutableDouble final : public LDeferredCode { | |
6056 public: | |
6057 DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr, | |
6058 Register result, Register object, Register index) | |
6059 : LDeferredCode(codegen), | |
6060 instr_(instr), | |
6061 result_(result), | |
6062 object_(object), | |
6063 index_(index) {} | |
6064 void Generate() override { | |
6065 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_); | |
6066 } | |
6067 LInstruction* instr() override { return instr_; } | |
6068 | |
6069 private: | |
6070 LLoadFieldByIndex* instr_; | |
6071 Register result_; | |
6072 Register object_; | |
6073 Register index_; | |
6074 }; | |
6075 | |
6076 Register object = ToRegister(instr->object()); | |
6077 Register index = ToRegister(instr->index()); | |
6078 Register result = ToRegister(instr->result()); | |
6079 Register scratch = scratch0(); | |
6080 | |
6081 DeferredLoadMutableDouble* deferred; | |
6082 deferred = new (zone()) | |
6083 DeferredLoadMutableDouble(this, instr, result, object, index); | |
6084 | |
6085 Label out_of_object, done; | |
6086 | |
6087 __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0); | |
6088 __ bne(deferred->entry(), cr0); | |
6089 __ ShiftRightArithImm(index, index, 1); | |
6090 | |
6091 __ cmpi(index, Operand::Zero()); | |
6092 __ blt(&out_of_object); | |
6093 | |
6094 __ SmiToPtrArrayOffset(r0, index); | |
6095 __ add(scratch, object, r0); | |
6096 __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize)); | |
6097 | |
6098 __ b(&done); | |
6099 | |
6100 __ bind(&out_of_object); | |
6101 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); | |
6102 // Index is equal to negated out of object property index plus 1. | |
6103 __ SmiToPtrArrayOffset(r0, index); | |
6104 __ sub(scratch, result, r0); | |
6105 __ LoadP(result, | |
6106 FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize)); | |
6107 __ bind(deferred->exit()); | |
6108 __ bind(&done); | |
6109 } | |
6110 | |
6111 | |
6112 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) { | |
6113 Register context = ToRegister(instr->context()); | |
6114 __ StoreP(context, MemOperand(fp, StandardFrameConstants::kContextOffset)); | |
6115 } | |
6116 | |
6117 | |
6118 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) { | |
6119 Handle<ScopeInfo> scope_info = instr->scope_info(); | |
6120 __ Push(scope_info); | |
6121 __ push(ToRegister(instr->function())); | |
6122 CallRuntime(Runtime::kPushBlockContext, 2, instr); | |
6123 RecordSafepoint(Safepoint::kNoLazyDeopt); | |
6124 } | |
6125 | |
6126 | |
6127 #undef __ | |
6128 } // namespace internal | |
6129 } // namespace v8 | |
OLD | NEW |